repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
gate-teamware | gate-teamware-master/backend/management/commands/build_api_docs.py | import json
from django.core.management.base import BaseCommand, CommandError
from django.template.loader import render_to_string
from backend.rpcserver import JSONRPCEndpoint
class Command(BaseCommand):
help = "Generate a JSON file listing API endpoints"
def add_arguments(self, parser):
parser.add_argument('output_dest', type=str)
def handle(self, *args, **options):
output_dest = options["output_dest"]
listing = JSONRPCEndpoint.endpoint_listing()
for name, props in listing.items():
listing[name]["all_args"] = ','.join(props["arguments"])
context = {
"api_dict": listing
}
with open(output_dest, "w") as f:
f.write(render_to_string("api_docs_template.md", context))
| 788 | 24.451613 | 70 | py |
gate-teamware | gate-teamware-master/backend/management/commands/load_test_fixture.py | from multiprocessing.connection import wait
import sys, os
from django.utils import timezone
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand, CommandError
from backend.models import AnnotatorProject, Project, Document, Annotation, DocumentType
from django.core.management import call_command
TEST_FIXTURES_REGISTRY = {}
def test_fixture(func):
TEST_FIXTURES_REGISTRY[func.__name__] = func
return func
@test_fixture
def create_db_users():
"""Create default db users, admin, manager and annotator with password: testpassword"""
password = "testpassword"
admin = get_user_model().objects.create_user(username="admin", password=password, email="[email protected]")
admin.is_superuser = True
admin.is_staff = True
admin.is_account_activated = True
admin.save()
manager = get_user_model().objects.create_user(username="manager", password=password, email="[email protected]")
manager.is_manager = True
manager.is_account_activated = True
manager.save()
annotator = get_user_model().objects.create_user(username="annotator", password=password, email="[email protected]")
annotator.is_account_activated = True
annotator.save()
@test_fixture
def create_db_users_with_project():
"""
Create default db users and also create a project that belongs to an admin.
Creates 20 documents for the project.
"""
create_db_users()
admin_user = get_user_model().objects.get(username="admin")
manager_user = get_user_model().objects.get(username="manager")
annotator_user = get_user_model().objects.get(username="annotator")
users = [admin_user, manager_user, annotator_user]
project_config = [
{
"name": "htmldisplay",
"type": "html",
"text": "{{{text}}}"
},
{
"name": "sentiment",
"type": "radio",
"title": "Sentiment",
"description": "Please select a sentiment of the text above.",
"options": {
"negative": "Negative",
"neutral": "Neutral",
"positive": "Positive"
}
}
]
project = Project.objects.create(name="Test project",
owner=admin_user,
configuration=project_config,
has_test_stage=False,
has_training_stage=False,
allow_annotation_change=True,
document_id_field="id")
for i in range(20):
doc_data = {
"id": f"{i+1}",
"text": f"Document text {i}",
"preanno": {"sentiment": "positive"},
}
document = Document.objects.create(project=project, data=doc_data)
@test_fixture
def create_db_users_with_project_annotator_personal_info_deleted():
"""
Same as create_db_users_with_project but with annoator's personal info deleted
"""
create_db_users_with_project()
annotator_user = get_user_model().objects.get(username="annotator")
annotator_user.delete_user_personal_information()
@test_fixture
def create_db_users_with_project_admin_is_annotator():
create_db_users_with_project()
admin_user = get_user_model().objects.get(username="admin")
project = Project.objects.get(name="Test project")
project.add_annotator(admin_user)
@test_fixture
def create_db_users_with_project_and_annotation():
"""Create default db users and also create a set of annotation."""
create_db_users_with_project()
admin_user = get_user_model().objects.get(username="admin")
manager_user = get_user_model().objects.get(username="manager")
annotator_user = get_user_model().objects.get(username="annotator")
users = [admin_user, manager_user, annotator_user]
documents = Document.objects.all()
for document in documents:
for user in users:
annotation = Annotation.objects.create(document=document, user=user,
status=Annotation.COMPLETED, status_time=timezone.now())
annotation.data = {"sentiment": "positive"}
@test_fixture
def project_with_training_and_test():
"""Creates a project with training and test phases and documents"""
create_db_users_with_project_and_annotation()
project = Project.objects.first()
project.has_training_stage = True
project.has_test_stage = True
project.save()
num_documents = 10
# Create training and test documents
for i in range(num_documents):
doc_data = {
"id": f"{i+1}",
"text": f"Document text {i}",
"gold": {
"sentiment": {
"value": "positive",
"explanation": "Example explanation"
}
}
}
document = Document.objects.create(project=project, data=doc_data, doc_type=DocumentType.TRAINING)
document = Document.objects.create(project=project, data=doc_data, doc_type=DocumentType.TEST)
@test_fixture
def project_with_annotators():
"""
Fixture for testing the annotator management view.
Create default db users, set of annotations and add annotators to project.
Users are specified at particular stages:
- Training
- Testing (Pass)
- Testing (Fail)
- Annotating
- Completion
"""
project_with_training_and_test()
annotation_user = get_user_model().objects.get(username="annotator")
waiting_user = get_user_model().objects.create(username="waiter")
training_user = get_user_model().objects.create(username="trainer")
testing_user = get_user_model().objects.create(username="tester")
failing_user = get_user_model().objects.create(username="failer")
completed_user = get_user_model().objects.create(username="completer")
project = Project.objects.first()
for annotator in [annotation_user, waiting_user, training_user, testing_user, failing_user, completed_user]:
project.add_annotator(annotator)
project.save()
# Users that complete training
for user in [annotation_user, waiting_user, completed_user, testing_user, failing_user]:
for document in project.documents.filter(doc_type=DocumentType.TRAINING):
annotation = Annotation.objects.create(document=document, user=user,
status=Annotation.COMPLETED, status_time=timezone.now())
annotation.data = {"sentiment": "positive"}
annotation.save()
project.annotator_completed_training(user)
project.save()
# Training user doesn't complete training
for document in project.documents.filter(doc_type=DocumentType.TRAINING)[:5]:
annotation = Annotation.objects.create(document=document, user=training_user,
status=Annotation.COMPLETED, status_time=timezone.now())
annotation.data = {"sentiment": "positive"}
annotation.save()
annotator_project = AnnotatorProject.objects.get(project=project, annotator=training_user)
annotator_project.training_score = project.get_annotator_document_score(training_user, DocumentType.TRAINING)
annotator_project.save()
# Users that complete testing
for user in [annotation_user, waiting_user, completed_user]:
for document in project.documents.filter(doc_type=DocumentType.TEST):
annotation = Annotation.objects.create(document=document, user=user,
status=Annotation.COMPLETED, status_time=timezone.now())
annotation.data = {"sentiment": "positive"}
annotation.save()
project.annotator_completed_test(user)
project.save()
# Testing user doesn't complete testing
for document in project.documents.filter(doc_type=DocumentType.TEST)[:5]:
annotation = Annotation.objects.create(document=document, user=testing_user,
status=Annotation.COMPLETED, status_time=timezone.now())
annotation.data = {"sentiment": "positive"}
annotation.save()
annotator_project = AnnotatorProject.objects.get(project=project, annotator=testing_user)
annotator_project.test_score = project.get_annotator_document_score(testing_user, DocumentType.TEST)
annotator_project.save()
# Failing user fails testing
for document in project.documents.filter(doc_type=DocumentType.TEST):
annotation = Annotation.objects.create(document=document, user=failing_user,
status=Annotation.COMPLETED, status_time=timezone.now())
annotation.data = {"sentiment": "negative"}
annotation.save()
# project.get_annotator_document_score(failing_user, DocumentType.TEST)
# project.save()
project.annotator_completed_test(failing_user)
project.save()
# Completed user's annotations
for document in project.documents.filter(doc_type=DocumentType.ANNOTATION):
annotation = Annotation.objects.create(document=document, user=completed_user,
status=Annotation.COMPLETED, status_time=timezone.now())
annotation.data = {"sentiment": "positive"}
annotation.save()
ann_proj = AnnotatorProject.objects.get(project=project, annotator=completed_user)
ann_proj.annotations_completed = timezone.now()
ann_proj.save()
class Command(BaseCommand):
help = "Flushes the database and loads a test fixture"
def add_arguments(self, parser):
parser.add_argument("-n", "--name", type=str, help="Name of the fixture")
def handle(self, *args, **options):
# Flush the DB
if "name" in options and options["name"] and options["name"] in TEST_FIXTURES_REGISTRY:
# Flush the DB
print("Flushing database...")
call_command("flush", "--noinput")
print("Migrating database...")
call_command("migrate", "--noinput")
# Run the fixture function
print(f"Running command {options['name']}")
TEST_FIXTURES_REGISTRY[options["name"]]()
else:
# List available fixtures
print("No fixture specified, used the -n, --name option. Available fixtures are:")
for name, func in TEST_FIXTURES_REGISTRY.items():
func_help_str = f"- {func.__doc__}" if func.__doc__ else ""
print(f"{name} {func_help_str}")
| 10,652 | 39.048872 | 121 | py |
gate-teamware | gate-teamware-master/backend/tests/test_views.py | import math
import csv
import io
import json
from io import TextIOWrapper
from zipfile import ZipFile
from django.contrib.auth import get_user_model
from django.test import TestCase
from backend.models import Project, Document, Annotation, DocumentType
from backend.tests.test_rpc_server import TestEndpoint
from backend.views import DownloadAnnotationsView
class TestDownloadAnnotations(TestEndpoint):
def setUp(self):
self.test_user = get_user_model().objects.create(username="project_creator")
self.annotators = [get_user_model().objects.create(username=f"anno{i}") for i in range(3)]
self.project = Project.objects.create(owner=self.test_user)
self.num_training_docs = 25
self.num_test_docs = 50
self.num_docs = 100
self.num_docs_per_file = 10
self.create_documents_with_annotations(self.project, self.num_training_docs, DocumentType.TRAINING,
self.annotators)
self.create_documents_with_annotations(self.project, self.num_test_docs, DocumentType.TEST,
self.annotators)
self.create_documents_with_annotations(self.project, self.num_docs, DocumentType.ANNOTATION,
self.annotators)
self.project.refresh_from_db()
def create_documents_with_annotations(self, project, num_documents, doc_type, annotators):
for i in range(num_documents):
document = Document.objects.create(
project=project,
data={
"id": i,
"text": f"Text {i}"
},
doc_type=doc_type,
)
for annotator in annotators:
anno = Annotation.objects.create(user=annotator,
document=document,
status=Annotation.COMPLETED,
)
anno.data = { "text1": "Value1", "checkbox1": ["val1", "val2", "val3"]}
def test_json_export(self):
client = self.get_loggedin_client()
response = client.get(f"/download_annotations/{self.project.id}/training/json/raw/{self.num_docs_per_file}/anonymize/")
self.check_json_export_from_response(response,
num_documents_expected=self.num_training_docs,
num_documents_per_file=self.num_docs_per_file)
response = client.get(f"/download_annotations/{self.project.id}/test/json/raw/{self.num_docs_per_file}/anonymize/")
self.check_json_export_from_response(response,
num_documents_expected=self.num_test_docs,
num_documents_per_file=self.num_docs_per_file)
response = client.get(f"/download_annotations/{self.project.id}/annotation/json/raw/{self.num_docs_per_file}/anonymize/")
self.check_json_export_from_response(response,
num_documents_expected=self.num_docs,
num_documents_per_file=self.num_docs_per_file)
def check_json_export_from_response(self, response, num_documents_expected, num_documents_per_file):
self.assertEqual(response.status_code, 200)
with ZipFile(self.get_io_stream_from_streaming_response(response), mode="r") as zip:
self.check_num_files_in_zip(zip, num_documents_expected, num_documents_per_file)
num_docs_count = 0
for file_name in zip.namelist():
print(f"Checking {file_name}")
with zip.open(file_name, "r") as file:
file_dict = json.loads(file.read())
self.assertTrue(isinstance(file_dict, list), "Must be a list of objects")
num_docs_count += len(file_dict)
self.assertEqual(num_docs_count, num_documents_expected)
def check_num_files_in_zip(self, zip, num_documents_expected, num_documents_per_file):
"""
Num files must be ceil(num_documents_expected / num_documents_per_file)
"""
num_files_expected = math.ceil(num_documents_expected / num_documents_per_file)
self.assertEqual(len(zip.namelist()), num_files_expected, f"Must have {num_files_expected} files")
def test_jsonl_export(self):
client = self.get_loggedin_client()
response = client.get(f"/download_annotations/{self.project.id}/training/jsonl/raw/{self.num_docs_per_file}/anonymize/")
self.check_jsonl_export_from_response(response,
num_documents_expected=self.num_training_docs,
num_documents_per_file=self.num_docs_per_file)
response = client.get(f"/download_annotations/{self.project.id}/test/jsonl/raw/{self.num_docs_per_file}/anonymize/")
self.check_jsonl_export_from_response(response,
num_documents_expected=self.num_test_docs,
num_documents_per_file=self.num_docs_per_file)
response = client.get(f"/download_annotations/{self.project.id}/annotation/jsonl/raw/{self.num_docs_per_file}/anonymize/")
self.check_jsonl_export_from_response(response,
num_documents_expected=self.num_docs,
num_documents_per_file=self.num_docs_per_file)
def check_jsonl_export_from_response(self, response, num_documents_expected, num_documents_per_file):
self.assertEqual(response.status_code, 200)
with ZipFile(self.get_io_stream_from_streaming_response(response), mode="r") as zip:
self.check_num_files_in_zip(zip, num_documents_expected, num_documents_per_file)
num_docs_count = 0
for file_name in zip.namelist():
print(f"Checking {file_name}")
with zip.open(file_name, "r") as file:
for line in file:
obj_dict = json.loads(line)
self.assertTrue(isinstance(obj_dict, dict), "Object must be a dict")
num_docs_count += 1
self.assertEqual(num_docs_count, num_documents_expected)
def test_csv_export(self):
client = self.get_loggedin_client()
response = client.get(f"/download_annotations/{self.project.id}/training/csv/raw/{self.num_docs_per_file}/anonymize/")
self.check_csv_export_from_response(response,
num_documents_expected=self.num_training_docs,
num_documents_per_file=self.num_docs_per_file)
response = client.get(f"/download_annotations/{self.project.id}/test/csv/raw/{self.num_docs_per_file}/anonymize/")
self.check_csv_export_from_response(response,
num_documents_expected=self.num_test_docs,
num_documents_per_file=self.num_docs_per_file)
response = client.get(f"/download_annotations/{self.project.id}/annotation/csv/raw/{self.num_docs_per_file}/anonymize/")
self.check_csv_export_from_response(response,
num_documents_expected=self.num_docs,
num_documents_per_file=self.num_docs_per_file)
def check_csv_export_from_response(self, response, num_documents_expected, num_documents_per_file):
self.assertEqual(response.status_code, 200)
with ZipFile(self.get_io_stream_from_streaming_response(response), mode="r") as zip:
self.check_num_files_in_zip(zip, num_documents_expected, num_documents_per_file)
num_docs_count = 0
for file_name in zip.namelist():
print(f"Checking {file_name}")
with zip.open(file_name, "r") as file:
reader = csv.reader(TextIOWrapper(file), delimiter=",")
for row in reader:
print(row)
num_docs_count += 1
num_docs_count -= 1 # Minus header row
self.assertEqual(num_docs_count, num_documents_expected)
def get_io_stream_from_streaming_response(self, response):
stream = b''.join(response.streaming_content)
return io.BytesIO(stream)
| 8,609 | 49.946746 | 130 | py |
gate-teamware | gate-teamware-master/backend/tests/test_rpc_endpoints.py | from django.core import mail
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.hashers import check_password
from django.http import HttpRequest
from django.test import TestCase, Client
from django.utils import timezone
import json
from backend.models import Annotation, Document, DocumentType, Project, AnnotatorProject, UserDocumentFormatPreference
from backend.rpc import create_project, update_project, add_project_document, add_document_annotation, \
get_possible_annotators, add_project_annotator, remove_project_annotator, get_project_annotators, \
get_annotation_task, complete_annotation_task, reject_annotation_task, register, activate_account, \
generate_password_reset, reset_password, generate_user_activation, change_password, change_email, \
set_user_receive_mail_notifications, delete_documents_and_annotations, import_project_config, export_project_config, \
clone_project, delete_project, get_projects, get_project_documents, get_user_annotated_projects, \
get_user_annotations_in_project, add_project_test_document, add_project_training_document, \
get_project_training_documents, get_project_test_documents, project_annotator_allow_annotation, \
annotator_leave_project, login, change_annotation, delete_annotation_change_history, get_annotation_task_with_id, \
set_user_document_format_preference, initialise, is_authenticated, user_delete_personal_information, \
user_delete_account, admin_delete_user_personal_information, admin_delete_user
from backend.rpcserver import rpc_method
from backend.errors import AuthError
from backend.tests.test_models import create_each_annotation_status_for_user, TestUserModelDeleteUser
from backend.tests.test_rpc_server import TestEndpoint
class TestUserAuth(TestCase):
def test_user_auth(self):
username = "testuser"
user_pass = "123456789"
user_email = "[email protected]"
c = Client()
# Register
params = {
"username": username,
"password": user_pass,
"email": user_email,
}
response = c.post("/rpc/",
{"jsonrpc": "2.0", "method": "register", "id": 20, "params": [params]},
content_type="application/json")
self.assertEqual(response.status_code, 200)
msg = json.loads(response.content)
self.assertEqual(msg["result"]["isAuthenticated"], True)
# Log in
params = {
"username": username,
"password": user_pass,
}
response = c.post("/rpc/",
{"jsonrpc": "2.0", "method": "login", "id": 20, "params": [params]},
content_type="application/json")
self.assertEqual(response.status_code, 200)
msg = json.loads(response.content)
self.assertEqual(msg["result"]["isAuthenticated"], True)
# Check authentication
response = c.post("/rpc/",
{"jsonrpc": "2.0", "method": "is_authenticated", "id": 20},
content_type="application/json")
self.assertEqual(response.status_code, 200)
msg = json.loads(response.content)
self.assertEqual(msg["result"]["isAuthenticated"], True)
# Log Out
response = c.post("/rpc/",
{"jsonrpc": "2.0", "method": "logout", "id": 20},
content_type="application/json")
self.assertEqual(response.status_code, 200)
class TestInitialise(TestEndpoint):
def test_initialise(self):
context_object = initialise(self.get_request())
self.assertTrue("user" in context_object)
self.assertTrue("configs" in context_object)
self.assertTrue("docFormatPref" in context_object["configs"])
self.assertTrue("global_configs" in context_object)
self.assertTrue("allowUserDelete" in context_object["global_configs"])
class TestIsAuthenticated(TestEndpoint):
def test_is_authenticated_anonymous(self):
context = is_authenticated(self.get_request())
self.assertFalse(context["isAuthenticated"])
self.assertFalse(context["isManager"])
self.assertFalse(context["isAdmin"])
def test_is_authenticated_annotator(self):
user = self.get_default_user()
user.is_staff = False
user.is_manager = False
user.save()
context = is_authenticated(self.get_loggedin_request())
self.assertTrue(context["isAuthenticated"])
self.assertFalse(context["isManager"])
self.assertFalse(context["isAdmin"])
def test_is_authenticated_manager(self):
user = self.get_default_user()
user.is_staff = False
user.is_manager = True
user.save()
context = is_authenticated(self.get_loggedin_request())
self.assertTrue(context["isAuthenticated"])
self.assertTrue(context["isManager"])
self.assertFalse(context["isAdmin"])
def test_is_authenticated_admin(self):
user = self.get_default_user()
user.is_staff = True
user.is_manager = False
user.save()
context = is_authenticated(self.get_loggedin_request())
self.assertTrue(context["isAuthenticated"])
self.assertTrue(context["isManager"])
self.assertTrue(context["isAdmin"])
class TestAppCreatedUserAccountsCannotLogin(TestEndpoint):
def test_app_created_user_accounts_cannot_login(self):
# Create a user programmatically, without password
created_user = get_user_model().objects.create(username="doesnotexist")
# Default password is blank ""
self.assertEqual("", created_user.password)
with self.assertRaises(AuthError, msg="Should raise an error if logging in with None as password"):
login(self.get_request(), {"username": "doesnotexist", "password": None})
with self.assertRaises(AuthError, msg="Should raise an error if logging in with blank as password"):
login(self.get_request(), {"username": "doesnotexist", "password": ""})
class TestDeletedUserAccountCannotLogin(TestEndpoint):
def test_deleted_user_account_cannot_login(self):
"""
Ensures that user accounts that opts to have their personal information removed from the system
but not wiped cannot login again.
"""
username = "deleted"
password = "test1password"
get_user_model().objects.create_user(username=username,
password=password,
is_deleted=True)
with self.assertRaises(AuthError, msg="Should raise an error if trying to login with a deleted account"):
login(self.get_request(), {"username": username, "password": password})
class TestUserRegistration(TestEndpoint):
def test_user_registration(self):
# Force e-mail activation
with self.settings(ACTIVATION_WITH_EMAIL=True):
username = "testuser"
user_pass = "123456789"
user_email = "[email protected]"
# Register user, will have e-mail activation
self.call_rpc(self.get_client(), "register", {
"username": username,
"password": user_pass,
"email": user_email
})
# Check that mail is sent
self.assertTrue(len(mail.outbox) > 0, "Mail to user must have been sent")
test_user = get_user_model().objects.get(username=username)
# Check that a token has been generated with length specified by the settings
self.assertTrue(len(test_user.activate_account_token) > settings.ACTIVATION_TOKEN_LENGTH)
self.assertTrue(test_user.activate_account_token_expire > timezone.now())
with self.assertRaises(ValueError, msg="Should raise an error if user doesn't exist"):
activate_account(self.get_request(), "doesnotexist", "tokendoesnotexist")
with self.assertRaises(ValueError, msg="Should raise error if token is wrong or expired"):
activate_account(self.get_request(), test_user.username, "tokendoesnotexist")
with self.assertRaises(ValueError, msg="Should raise an error if token doesn't exist"):
activate_account(self.get_request(), test_user.username, None)
with self.assertRaises(ValueError, msg="Should raise an error if token is blank"):
activate_account(self.get_request(), test_user.username, "")
# Should activate properly this time
activate_account(self.get_request(), test_user.username, test_user.activate_account_token)
# Gets user again, now should be activate
test_user.refresh_from_db()
self.assertTrue(test_user.is_account_activated)
self.assertTrue(test_user.activate_account_token is None)
self.assertTrue(test_user.activate_account_token_expire is None)
def test_generate_user_activation(self):
# Force e-mail activation
with self.settings(ACTIVATION_WITH_EMAIL=True):
with self.assertRaises(ValueError, msg="Raise an error if user doesn't exist"):
generate_user_activation(self.get_request(), "doesnotexist")
# Gets a test user
test_user = self.get_default_user()
# Generates
generate_user_activation(self.get_request(), test_user.username)
test_user.refresh_from_db()
self.assertTrue(len(test_user.activate_account_token) > settings.ACTIVATION_TOKEN_LENGTH)
self.assertTrue(test_user.activate_account_token_expire > timezone.now())
test_user.is_account_activated = True
test_user.save()
with self.assertRaises(ValueError, msg="Raises an error if user is already activated"):
generate_user_activation(self.get_request(), test_user.username)
class TestUserPasswordReset(TestEndpoint):
def test_user_password_reset(self):
new_password = "testNewPassword12345"
test_user = self.get_default_user()
# Raise error if username is wrong
with self.assertRaises(ValueError):
generate_password_reset(self.get_request(), "doesnotexist")
# Should now generate a password reset token
self.call_rpc(self.get_client(), "generate_password_reset", test_user.username)
# Check that token generaet is valid
test_user.refresh_from_db()
self.assertTrue(len(test_user.reset_password_token) > settings.ACTIVATION_TOKEN_LENGTH)
self.assertTrue(test_user.reset_password_token_expire > timezone.now())
# Check that mail is sent
self.assertTrue(len(mail.outbox) > 0)
# Should raise error if token is wrong or expired
with self.assertRaises(ValueError):
reset_password(self.get_request(), test_user.username, "tokendoesnotexist", new_password)
with self.assertRaises(ValueError):
reset_password(self.get_request(), test_user.username, None, new_password)
with self.assertRaises(ValueError):
reset_password(self.get_request(), test_user.username, "", new_password)
# Should now be able to reset password
reset_password(self.get_request(), test_user.username, test_user.reset_password_token, new_password)
# Gets user again, now should now reset the password
test_user.refresh_from_db()
self.assertTrue(check_password(new_password, test_user.password))
self.assertTrue(test_user.reset_password_token is None)
self.assertTrue(test_user.reset_password_token_expire is None)
class TestRPCDeleteUser(TestEndpoint):
def setUp(self) -> None:
self.user = get_user_model().objects.create(username="test1",
first_name="TestFirstname",
last_name="TestLastname",
email="[email protected]")
self.project = Project.objects.create(owner=self.user)
create_each_annotation_status_for_user(self.user, self.project)
self.user2 = get_user_model().objects.create(username="test2",
first_name="TestFirstname",
last_name="TestLastname",
email="[email protected]")
project2 = Project.objects.create(owner=self.user2)
Annotation.objects.create(user=self.user,
document=Document.objects.create(project=project2),
status=Annotation.PENDING)
create_each_annotation_status_for_user(user=self.user2, project=project2)
def test_user_delete_personal_information(self):
request = self.get_request()
request.user = self.user
user_delete_personal_information(request)
TestUserModelDeleteUser.check_user_personal_data_deleted(self, self.user)
def test_user_delete_account(self):
request = self.get_request()
request.user = self.user
user_id = self.user.pk
user_delete_account(request)
TestUserModelDeleteUser.check_user_is_deleted(self, user_id)
def test_admin_delete_user_personal_information(self):
admin_delete_user_personal_information(self.get_loggedin_request(), self.user.username)
TestUserModelDeleteUser.check_user_personal_data_deleted(self, self.user)
def test_admin_delete_user(self):
user_id = self.user.pk
admin_delete_user(self.get_loggedin_request(), self.user.username)
TestUserModelDeleteUser.check_user_is_deleted(self, user_id)
class TestUserConfig(TestEndpoint):
def test_change_password(self):
changed_password = "1234567test*"
change_password(self.get_loggedin_request(), {"password": changed_password})
user = self.get_default_user()
user.refresh_from_db()
self.assertTrue(check_password(changed_password, user.password))
def test_change_email(self):
changed_email = "[email protected]"
change_email(self.get_loggedin_request(), {"email": changed_email})
user = self.get_default_user()
user.refresh_from_db()
self.assertEqual(user.email, changed_email)
def test_change_receive_mail_notification(self):
user = self.get_default_user()
set_user_receive_mail_notifications(self.get_loggedin_request(), False)
user.refresh_from_db()
self.assertEqual(user.receive_mail_notifications, False)
set_user_receive_mail_notifications(self.get_loggedin_request(), True)
user.refresh_from_db()
self.assertEqual(user.receive_mail_notifications, True)
def test_change_user_document_format_preference(self):
user = self.get_default_user()
set_user_document_format_preference(self.get_loggedin_request(), "JSON")
user.refresh_from_db()
self.assertEqual(user.doc_format_pref, UserDocumentFormatPreference.JSON)
set_user_document_format_preference(self.get_loggedin_request(), "CSV")
user.refresh_from_db()
self.assertEqual(user.doc_format_pref, UserDocumentFormatPreference.CSV)
class TestProject(TestEndpoint):
def test_create_project(self):
proj_obj = create_project(self.get_loggedin_request())
self.assertIsNotNone(proj_obj)
self.assertTrue('id' in proj_obj)
self.assertTrue(proj_obj['id'] > 0)
self.assertTrue('name' in proj_obj)
saved_proj = Project.objects.get(pk=proj_obj['id'])
self.assertEqual(saved_proj.owner.pk, self.get_default_user().pk) # Owner is project creator
def test_delete_project(self):
"""
Test to make sure that a deleted project will remove associated documents and annotations. It should
also remove annotators from the project.
"""
self.assertEqual(Project.objects.all().count(), 0, "A project already exists")
self.assertEqual(Document.objects.all().count(), 0, "Documents already exist")
self.assertEqual(Annotation.objects.all().count(), 0, "Annotation already exists")
proj = Project.objects.create()
for i in range(10):
doc = Document.objects.create(project=proj)
for i in range(10):
annotation = Annotation.objects.create(document=doc,)
self.assertEqual(Project.objects.all().count(), 1, "Must have 1 project")
self.assertEqual(Document.objects.all().count(), 10, "Must have 10 documents")
self.assertEqual(Annotation.objects.all().count(), 100, "Must have 100 total annotations")
def create_user_and_add_to_project(i, proj):
user = get_user_model().objects.create(username=f"annotator_{i}")
user.annotates.add(proj)
user.save()
return user
annotators = [create_user_and_add_to_project(i, proj) for i in range(10)]
delete_project(self.get_loggedin_request(), project_id=proj.pk)
self.assertEqual(Project.objects.all().count(), 0, "Must have no project")
self.assertEqual(Document.objects.all().count(), 0, "All documents should have been deleted")
self.assertEqual(Annotation.objects.all().count(), 0, "All annotations should have been deleted")
for annotator in annotators:
annotator.refresh_from_db()
self.assertEqual(annotator.annotates.filter(annotatorproject__status=AnnotatorProject.ACTIVE).first(), None, "Annotator should have been removed from the deleted project")
def test_update_project(self):
project = Project.objects.create()
data = {
"id": project.pk,
"name": "Test project",
"configuration": [
{
"name": "sentiment",
"title": "Sentiment",
"type": "radio",
"options": {
"positive": "Positive",
"negative": "Negative",
"neutral": "Neutral"
}
},
{
"name": "reason",
"title": "Reason for your stated sentiment",
"type": "textarea"
}
],
}
self.assertTrue(update_project(self.get_loggedin_request(), data))
saved_proj = Project.objects.get(pk=project.pk)
self.assertEqual(len(saved_proj.configuration), 2)
def test_import_project_config(self):
project = Project.objects.create()
data = {
"name": "Test project",
"description": "Desc",
"annotator_guideline": "Test guideline",
"configuration": [
{
"name": "sentiment",
"title": "Sentiment",
"type": "radio",
"options": {
"positive": "Positive",
"negative": "Negative",
"neutral": "Neutral"
}
},
{
"name": "reason",
"title": "Reason for your stated sentiment",
"type": "textarea"
}
],
"annotations_per_doc": 4,
"annotator_max_annotation": 0.8,
"annotation_timeout": 50,
"document_input_preview": {
"text": "Doc text"
}
}
import_project_config(self.get_loggedin_request(), project.pk, data)
project.refresh_from_db()
self.assertEqual(project.name, data["name"])
self.assertEqual(project.description, data["description"])
self.assertEqual(project.annotator_guideline, data["annotator_guideline"])
self.assertListEqual(project.configuration, data["configuration"])
self.assertEqual(project.annotations_per_doc, data["annotations_per_doc"])
self.assertEqual(project.annotator_max_annotation, data["annotator_max_annotation"])
self.assertEqual(project.annotation_timeout, data["annotation_timeout"])
self.assertDictEqual(project.document_input_preview, data["document_input_preview"])
def test_export_project_config(self):
project = Project.objects.create()
data = {
"id": project.pk,
"name": "Test project",
"description": "Desc",
"annotator_guideline": "Test guideline",
"configuration": [
{
"name": "sentiment",
"title": "Sentiment",
"type": "radio",
"options": {
"positive": "Positive",
"negative": "Negative",
"neutral": "Neutral"
}
},
{
"name": "reason",
"title": "Reason for your stated sentiment",
"type": "textarea"
}
],
"annotations_per_doc": 4,
"annotator_max_annotation": 0.8,
"annotation_timeout": 50,
"document_input_preview": {
"text": "Doc text"
}
}
update_project(self.get_loggedin_request(), data)
config_export_dict = export_project_config(self.get_loggedin_request(), project.pk)
self.assertEqual(config_export_dict["name"], data["name"])
self.assertEqual(config_export_dict["description"], data["description"])
self.assertEqual(config_export_dict["annotator_guideline"], data["annotator_guideline"])
self.assertListEqual(config_export_dict["configuration"], data["configuration"])
self.assertEqual(config_export_dict["annotations_per_doc"], data["annotations_per_doc"])
self.assertEqual(config_export_dict["annotator_max_annotation"], data["annotator_max_annotation"])
self.assertEqual(config_export_dict["annotation_timeout"], data["annotation_timeout"])
self.assertDictEqual(config_export_dict["document_input_preview"], data["document_input_preview"])
def test_clone_project(self):
project = Project.objects.create()
data = {
"id": project.pk,
"name": "Test project",
"description": "Desc",
"annotator_guideline": "Test guideline",
"configuration": [
{
"name": "sentiment",
"title": "Sentiment",
"type": "radio",
"options": {
"positive": "Positive",
"negative": "Negative",
"neutral": "Neutral"
}
},
{
"name": "reason",
"title": "Reason for your stated sentiment",
"type": "textarea"
}
],
"annotations_per_doc": 4,
"annotator_max_annotation": 0.8,
"annotation_timeout": 50,
"document_input_preview": {
"text": "Doc text"
}
}
update_project(self.get_loggedin_request(), data)
project.refresh_from_db()
# Add some documents to the project
for i in range(5):
Document.objects.create(project=project, doc_type=DocumentType.TRAINING)
for i in range(10):
Document.objects.create(project=project, doc_type=DocumentType.TEST)
for i in range(20):
Document.objects.create(project=project, doc_type=DocumentType.ANNOTATION)
self.assertEqual(5, project.num_training_documents)
self.assertEqual(10, project.num_test_documents)
self.assertEqual(20, project.num_documents)
# Add annotator to project
ann1 = get_user_model().objects.create(username="ann1")
project.add_annotator(ann1)
self.assertEqual(1, project.annotators.all().count())
cloned_project_dict = clone_project(self.get_loggedin_request(), project.pk)
cloned_project = Project.objects.get(pk=cloned_project_dict["id"])
# Must not have associated documents
self.assertEqual(0, cloned_project.num_training_documents)
self.assertEqual(0, cloned_project.num_test_documents)
self.assertEqual(0, cloned_project.num_documents)
# Must not have associated users
self.assertEqual(0, cloned_project.annotators.all().count())
def test_get_projects(self):
num_projects = 10
for i in range(num_projects):
Project.objects.create(name=f"Project {i}", owner=self.get_default_user())
result = get_projects(self.get_loggedin_request())
self.assertEqual(len(result["items"]), num_projects)
self.assertEqual(result["total_count"], num_projects)
page_size = 5
# Get page 1
result = get_projects(self.get_loggedin_request(), 1, page_size)
self.assertEqual(len(result["items"]), page_size)
self.assertEqual(result["total_count"], num_projects)
# Get page 2
result = get_projects(self.get_loggedin_request(), 2, page_size)
self.assertEqual(len(result["items"]), page_size)
self.assertEqual(result["total_count"], num_projects)
# Get with filtering
result = get_projects(self.get_loggedin_request(), 1, page_size, "8") # Get project with no. 8 in title
self.assertEqual(len(result["items"]), 1)
self.assertEqual(result["total_count"], 1)
class TestDocument(TestEndpoint):
def test_create_document(self):
proj = Project.objects.create(owner=self.get_default_user())
doc_obj = {
"text": "Document text"
}
test_doc_obj = {
"text": "Test document text"
}
train_doc_obj = {
"text": "Train document text"
}
# Check docs count
proj.refresh_from_db()
self.assertEqual(0, proj.num_documents)
self.assertEqual(0, proj.num_test_documents)
self.assertEqual(0, proj.num_training_documents)
# Adding annotation doc, check for content
doc_id = add_project_document(self.get_loggedin_request(), proj.pk, doc_obj)
self.assertTrue(doc_id > 0)
doc = Document.objects.get(pk=doc_id)
self.assertEqual(doc.project.pk, proj.pk)
self.assertEqual(doc.data["text"], doc_obj["text"]) # Data check
# Adding 2 test doc, check first doc for content
test_doc_id = add_project_test_document(self.get_loggedin_request(), proj.pk, test_doc_obj)
add_project_test_document(self.get_loggedin_request(), proj.pk, test_doc_obj)
self.assertTrue(test_doc_id > 0)
test_doc = Document.objects.get(pk=test_doc_id)
self.assertEqual(test_doc.project.pk, proj.pk)
self.assertEqual(test_doc.data["text"], test_doc_obj["text"]) # Data check
# Adding 3 train doc, check first one for content
train_doc_id = add_project_training_document(self.get_loggedin_request(), proj.pk, train_doc_obj)
add_project_training_document(self.get_loggedin_request(), proj.pk, train_doc_obj)
add_project_training_document(self.get_loggedin_request(), proj.pk, train_doc_obj)
self.assertTrue(train_doc_id > 0)
train_doc = Document.objects.get(pk=train_doc_id)
self.assertEqual(train_doc.project.pk, proj.pk)
self.assertEqual(train_doc.data["text"], train_doc_obj["text"]) # Data check
# Check docs count
proj.refresh_from_db()
self.assertEqual(1, proj.num_documents)
self.assertEqual(2, proj.num_test_documents)
self.assertEqual(3, proj.num_training_documents)
def test_get_project_documents(self):
num_projects = 10
num_docs_per_project = 20
num_train_docs_per_project = 10
num_test_docs_per_project = 15
num_annotations_per_doc = 5
project_ids = []
for i in range(num_projects):
project = Project.objects.create(name=f"Project {i}", owner=self.get_default_user())
project_ids.append(project.id)
# Annotation docs
for j in range(num_docs_per_project):
doc = Document.objects.create(project=project, doc_type=DocumentType.ANNOTATION)
for k in range(num_annotations_per_doc):
annotation = Annotation.objects.create(document=doc, user=self.get_default_user())
# Training docs
for j in range(num_train_docs_per_project):
doc = Document.objects.create(project=project, doc_type=DocumentType.TRAINING)
for k in range(num_annotations_per_doc):
annotation = Annotation.objects.create(document=doc, user=self.get_default_user())
# Test docs
for j in range(num_test_docs_per_project):
doc = Document.objects.create(project=project, doc_type=DocumentType.TEST)
for k in range(num_annotations_per_doc):
annotation = Annotation.objects.create(document=doc, user=self.get_default_user())
test_project_id = project_ids[0]
# Gets all docs in a project
result = get_project_documents(self.get_loggedin_request(), test_project_id)
self.assertEqual(len(result["items"]), num_docs_per_project)
self.assertEqual(result["total_count"], num_docs_per_project)
# Paginate docs
page_size = 5
num_pages = 4
for i in range(num_pages):
result = get_project_documents(self.get_loggedin_request(), test_project_id, i+1, page_size)
self.assertEqual(len(result["items"]), page_size)
self.assertEqual(result["total_count"], num_docs_per_project)
# Gets all training docs in a project
result = get_project_training_documents(self.get_loggedin_request(), test_project_id)
self.assertEqual(len(result["items"]), num_train_docs_per_project)
self.assertEqual(result["total_count"], num_train_docs_per_project)
# Paginate training docs
page_size = 5
num_pages = 2
for i in range(num_pages):
result = get_project_training_documents(self.get_loggedin_request(), test_project_id, i + 1, page_size)
self.assertEqual(len(result["items"]), page_size)
self.assertEqual(result["total_count"], num_train_docs_per_project)
# Gets all test docs in a project
result = get_project_test_documents(self.get_loggedin_request(), test_project_id)
self.assertEqual(len(result["items"]), num_test_docs_per_project)
self.assertEqual(result["total_count"], num_test_docs_per_project)
# Paginate test docs
page_size = 5
num_pages = 3
for i in range(num_pages):
result = get_project_test_documents(self.get_loggedin_request(), test_project_id, i + 1, page_size)
self.assertEqual(len(result["items"]), page_size)
self.assertEqual(result["total_count"], num_test_docs_per_project)
class TestAnnotation(TestEndpoint):
def test_add_annotation(self):
proj = Project.objects.create(owner=self.get_default_user())
doc = Document.objects.create(project=proj)
initial_annotation_data = {"label1": "Annotation content", "label2": "Someothercontent"}
annote_id = add_document_annotation(self.get_loggedin_request(),
doc.pk,
initial_annotation_data)
annotation = Annotation.objects.get(pk=annote_id)
self.assertEqual(annotation.user.pk, self.get_default_user().pk) # Annotation linked to user
self.assertDictEqual(annotation.data, initial_annotation_data) # Data check
class TestDocumentAndAnnotation(TestEndpoint):
def test_delete_document_and_annotation(self):
proj = Project.objects.create(owner=self.get_default_user())
doc = Document.objects.create(project=proj)
annote = Annotation.objects.create(document=doc)
annote2 = Annotation.objects.create(document=doc)
self.assertTrue(Document.objects.count() == 1, "Must have a document")
self.assertTrue(Annotation.objects.count() == 2, "Must have 2 annotations")
delete_documents_and_annotations(self.get_loggedin_request(), [], [annote2.pk])
self.assertTrue(Annotation.objects.count() == 1, "Must have 1 annotation")
delete_documents_and_annotations(self.get_loggedin_request(), [doc.pk], [])
self.assertTrue(Document.objects.count() == 0, "Must have 0 documents")
self.assertTrue(Annotation.objects.count() == 0, "Must have 0 annotations")
class TestAnnotationExport(TestEndpoint):
def test_rpc_get_annotations_endpoint(self):
user = self.get_default_user()
user.is_manager = True
user.is_account_activated = True
user.save()
c = self.get_loggedin_client()
##setup
project = Project.objects.create()
with open('examples/documents.json') as f:
for input_document in json.load(f):
document = Document.objects.create(project=project, data=input_document)
annotation = Annotation.objects.create(user=user, document=document)
annotation.data = {"testannotation": "test"}
# test the endpoint
response = c.post("/rpc/", {"jsonrpc": "2.0", "method": "get_annotations", "id": 20, "params": [project.id]},
content_type="application/json")
self.assertEqual(response.status_code, 200)
# test the get_annotations function
from backend.rpc import get_annotations
annotations = get_annotations(None, project.id)
self.assertIsNotNone(annotations)
self.assertEqual(type(annotations), list)
class TestUsers(TestEndpoint):
def test_list_possible_annotators(self):
user = self.get_default_user()
ann1 = get_user_model().objects.create(username="ann1")
ann2 = get_user_model().objects.create(username="ann2")
ann3 = get_user_model().objects.create(username="ann3")
ann4 = get_user_model().objects.create(username="ann4", is_deleted=True)
proj = Project.objects.create(owner=user)
proj2 = Project.objects.create(owner=user)
# Listing all annotators for project 1 and 2 without anyone added to project
possible_annotators = get_possible_annotators(self.get_loggedin_request(), proj_id=proj.pk)
self.assertEqual(4, len(possible_annotators), "Should list all users without deleted user")
possible_annotators = get_possible_annotators(self.get_loggedin_request(), proj_id=proj2.pk)
self.assertEqual(4, len(possible_annotators), "Should list all users without deleted user")
project_annotators = get_project_annotators(self.get_loggedin_request(), proj_id=proj.pk)
self.assertEqual(0, len(project_annotators))
# Add two project annotators to project 1
add_project_annotator(self.get_loggedin_request(), proj.pk, ann1.username)
add_project_annotator(self.get_loggedin_request(), proj.pk, ann2.username)
possible_annotators = get_possible_annotators(self.get_loggedin_request(), proj_id=proj.pk)
self.assertEqual(2, len(possible_annotators), "Associate 2 users with a project, should list 2 users")
possible_annotators = get_possible_annotators(self.get_loggedin_request(), proj_id=proj2.pk)
self.assertEqual(2, len(possible_annotators), "Associate 2 users with a project, should list 2 users")
project_annotators = get_project_annotators(self.get_loggedin_request(), proj_id=proj.pk)
self.assertEqual(2, len(project_annotators))
# Remove a an annotator form project 1
remove_project_annotator(self.get_loggedin_request(), proj.pk, ann1.username)
possible_annotators = get_possible_annotators(self.get_loggedin_request(), proj_id=proj.pk)
self.assertEqual(2, len(possible_annotators), "Remove 1 user from project, should still list 2 users")
possible_annotators = get_possible_annotators(self.get_loggedin_request(), proj_id=proj2.pk)
self.assertEqual(3, len(possible_annotators), "Remove 1 user from project 2, should list 3 users")
project_annotators = get_project_annotators(self.get_loggedin_request(), proj_id=proj.pk)
self.assertEqual(2, len(project_annotators))
class TestUserAnnotationList(TestEndpoint):
def test_get_user_annotated_projects(self):
user = self.get_default_user()
# Create a project with user annotation
project = Project.objects.create(name="Test project", owner=user)
num_docs = 30
num_test_docs = 10
num_train_docs = 15
num_annotations = 10
current_num_annotation = 0
# Create documents with certain number of annotations
for i in range(num_docs):
doc = Document.objects.create(project=project)
if current_num_annotation < num_annotations:
annotation = Annotation.objects.create(document=doc, user=user, status=Annotation.COMPLETED)
current_num_annotation += 1
# Create test and train documents with an annoation each
for i in range(num_test_docs):
doc = Document.objects.create(project=project, doc_type=DocumentType.TEST)
Annotation.objects.create(document=doc, user=user, status=Annotation.COMPLETED)
for i in range(num_train_docs):
doc = Document.objects.create(project=project, doc_type=DocumentType.TRAINING)
Annotation.objects.create(document=doc, user=user, status=Annotation.COMPLETED)
# Create projects without annotations
for i in range(10):
Project.objects.create(name=f"No annotation {i}", owner=user)
# Only a single project has the user's annotation
projects_list = get_user_annotated_projects(self.get_loggedin_request())
self.assertEqual(len(projects_list), 1)
# Gets all docs with annotation
result = get_user_annotations_in_project(self.get_loggedin_request(), projects_list[0]["id"], 1)
self.assertEqual(len(result["items"]), num_annotations)
self.assertEqual(result["total_count"], num_annotations)
for doc in result["items"]:
self.assertEqual("Annotation", doc["doc_type"])
# Gets paginated results
page_size = 5
result = get_user_annotations_in_project(self.get_loggedin_request(), projects_list[0]["id"], 1, page_size)
self.assertEqual(len(result["items"]), page_size)
self.assertEqual(result["total_count"], num_annotations)
result = get_user_annotations_in_project(self.get_loggedin_request(), projects_list[0]["id"], 2, page_size)
self.assertEqual(len(result["items"]), page_size)
self.assertEqual(result["total_count"], num_annotations)
class TestUserManagement(TestEndpoint):
def setUp(self):
user = self.get_default_user()
user.is_staff = True
user.save()
self.ann1 = get_user_model().objects.create(username="ann1")
get_user_model().objects.create(username="ann2")
get_user_model().objects.create(username="ann3")
def test_get_all_users(self):
c = self.get_loggedin_client()
response = self.call_rpc(c, "get_all_users")
self.assertEqual(response.status_code, 200)
self.assertEqual(len(json.loads(response.content)["result"]), 4)
def test_get_user(self):
c = self.get_loggedin_client()
response = self.call_rpc(c, "get_user", "ann1")
self.assertEqual(response.status_code, 200)
def test_admin_update_user(self):
c = self.get_loggedin_client()
data = {
"id": self.ann1.id,
"username": "ann1",
"email": "[email protected]",
"is_manager": True,
"is_admin": False,
"is_activated": False
}
response = self.call_rpc(c, "admin_update_user", data)
self.assertEqual(response.status_code, 200)
def test_admin_change_user_password(self):
changed_password = "1234567test*"
c = self.get_loggedin_client()
self.call_rpc(c, "admin_update_user_password", "ann1", changed_password)
ann1_user = get_user_model().objects.get(username="ann1")
self.assertTrue(check_password(changed_password, ann1_user.password))
class TestAnnotationTaskManager(TestEndpoint):
def annotation_info(self, annotation_id, message):
annotation = Annotation.objects.get(pk=annotation_id)
print(
f"{message} : [annotation id] {annotation.pk} [document {annotation.document.id}] [user {annotation.user.pk}]")
def test_annotation_task(self):
# Create users and project, add them as annotators
manager = self.get_default_user()
manager_request = self.get_loggedin_request()
ann1 = get_user_model().objects.create(username="ann1")
ann1_request = self.get_request()
ann1_request.user = ann1
proj = Project.objects.create(owner=manager)
proj.annotations_per_doc = 3
proj.annotator_max_annotation = 0.6 # Annotator can annotator max of 60% of docs
# Create documents
num_docs = 10
docs = list()
for i in range(num_docs):
docs.append(Document.objects.create(project=proj))
self.assertEqual(proj.documents.count(), num_docs)
# Get blank annotation task, user has no project association
self.assertIsNone(get_annotation_task(ann1_request))
# Add ann1 as the project's annotator
self.assertTrue(add_project_annotator(manager_request, proj.id, ann1.username))
# Reject first task
ann1.refresh_from_db()
task_context = get_annotation_task(ann1_request)
rejected_id = task_context['annotation_id']
self.annotation_info(rejected_id, "Rejected annotation")
self.assertEqual(proj.num_occupied_tasks, 1, "Num occupied must be 1")
reject_annotation_task(ann1_request, task_context["annotation_id"])
self.assertEqual(proj.num_occupied_tasks, 0, "Num occupied should be zero after rejection")
# Time out the second task
ann1.refresh_from_db()
task_context = get_annotation_task(ann1_request)
timed_out_id = task_context['annotation_id']
timed_out_annotation = Annotation.objects.get(pk=timed_out_id)
timed_out_annotation.timed_out = timezone.now()
timed_out_annotation.save()
self.annotation_info(timed_out_id, "Forcing timeout")
# Complete the rest of annotation tasks
for i in range(6):
ann1.refresh_from_db()
task_context = get_annotation_task(ann1_request)
current_annotation_id = task_context['annotation_id']
self.annotation_info(current_annotation_id, "Annotated")
proj.refresh_from_db()
self.assertNotEqual(rejected_id, task_context['annotation_id'])
self.assertIsNotNone(task_context)
self.assertGreater(task_context["annotation_id"], 0)
self.assertTrue(i == proj.num_completed_tasks, f"Num completed should be {i} ")
self.assertTrue(i + 1 == proj.num_occupied_tasks, f"Num occupied should be {i + 1}")
second_context = get_annotation_task(ann1_request)
proj.refresh_from_db()
self.assertEqual(current_annotation_id, second_context['annotation_id'],
"Calling get task again without completing must return the same annotation task")
complete_annotation_task(ann1_request, task_context["annotation_id"], {})
proj.refresh_from_db()
self.assertTrue(i + 1 == proj.num_completed_tasks, f"Num completed should be {i + 1}")
self.assertTrue(i + 1 == proj.num_occupied_tasks, f"Num occupied should be {i + 1}")
# Default ratio is set at 0.6 so after making 6 annotations out of 10 docs
# we expect the 7th one to be in reach of quota
ann1.refresh_from_db()
task_context = get_annotation_task(ann1_request)
self.assertIsNone(task_context)
def test_get_annotation_task_with_id(self):
# Create users and project, add them as annotators
manager = self.get_default_user()
manager_request = self.get_loggedin_request()
ann1 = get_user_model().objects.create(username="ann1")
ann1_request = self.get_request()
ann1_request.user = ann1
proj = Project.objects.create(owner=manager)
annotation_data = {
"test_label1": "test_value1",
"test_label2": "test_value2",
}
# Create documents and annotations
num_docs = 10
for i in range(num_docs):
doc = Document.objects.create(project=proj)
anno = Annotation.objects.create(document=doc, user=ann1, status=Annotation.COMPLETED)
anno.data = annotation_data
ann1.refresh_from_db()
all_annotations = ann1.annotations.all()
for annotation in all_annotations:
result = get_annotation_task_with_id(ann1_request, annotation.id)
self.assertEqual(annotation.id, result["annotation_id"])
self.assertEqual(annotation.document.id, result["document_id"])
self.assertEqual(annotation.document.project.id, result["project_id"])
self.assertDictEqual(annotation_data, result["annotation_data"])
def test_allowed_to_annotate(self):
"""
add_project_annotator allows annotators to perform annotation on real dataset by default if there's no
testing or training stages
"""
# Create users and project, add them as annotators
manager = self.get_default_user()
manager_request = self.get_loggedin_request()
ann1 = get_user_model().objects.create(username="ann1")
ann1_request = self.get_request()
ann1_request.user = ann1
proj = Project.objects.create(owner=manager)
# Create documents
num_docs = 10
for i in range(num_docs):
Document.objects.create(project=proj)
# Add ann1 as the proj_test_stage's annotator and get task, allowed to annotate by default if
# there's no testing or training stages
self.assertTrue(add_project_annotator(manager_request, proj.id, ann1.username))
self.assertTrue(get_annotation_task(ann1_request))
def test_task_rejection(self):
"""
User should be removed from the project if they don't have more tasks due to rejecting
documents.
"""
# Create users and project, add them as annotators
manager = self.get_default_user()
manager_request = self.get_loggedin_request()
ann1 = get_user_model().objects.create(username="ann1")
ann1_request = self.get_request()
ann1_request.user = ann1
proj = Project.objects.create(owner=manager)
proj.annotations_per_doc = 3
proj.annotator_max_annotation = 0.6 # Annotator can annotator max of 60% of docs
# Create documents
num_docs = 10
docs = list()
for i in range(num_docs):
docs.append(Document.objects.create(project=proj))
# Add ann1 as the project's annotator
self.assertTrue(add_project_annotator(manager_request, proj.id, ann1.username))
ann1.refresh_from_db()
# Reject all tasks
for i in range(num_docs+1):
task_context = get_annotation_task(ann1_request)
if task_context is None:
ann1.refresh_from_db()
self.assertTrue(ann1.annotates.filter(annotatorproject__status=AnnotatorProject.ACTIVE).distinct().first() is None)
return
else:
reject_annotation_task(ann1_request, task_context["annotation_id"])
self.assertTrue(False, "All documents rejected but annotator still getting annotation tasks")
def test_multi_user_annotation_task(self):
num_annotators = 10
num_documents = 10
num_annotations_per_doc = 5
annotator_max_annotation = 0.6 # Annotator can annotator max of 60% of docs
# Create users and project, add them as annotators
manager = self.get_default_user()
manager_request = self.get_loggedin_request()
annotators = [get_user_model().objects.create(username=f"annotator{i}") for i in range(num_annotators)]
proj = Project.objects.create(owner=manager)
proj.annotations_per_doc = num_annotations_per_doc
proj.annotator_max_annotation = annotator_max_annotation
proj.save()
# Make them project annotator and allow them to annotate
for annotator in annotators:
self.assertTrue(add_project_annotator(manager_request, proj.id, annotator.username))
documents = [Document.objects.create(project=proj) for i in range(num_documents)]
for i in range(num_annotations_per_doc + 1):
for annotator in annotators:
self.perform_annotation(annotator, expect_completed=proj.num_annotation_tasks_remaining < 1)
print(
f"Remaining/completed/total {proj.num_annotation_tasks_remaining}/{proj.num_completed_tasks}/{proj.num_annotation_tasks_total}")
proj.refresh_from_db()
self.assertEqual(proj.num_annotation_tasks_remaining, 0, "There must be no remaining tasks")
for doc in documents:
doc.refresh_from_db()
self.assertEqual(doc.num_completed_annotations, proj.annotations_per_doc, "Documents must have exact number of annotations when finished")
user_id_set = set()
for anno in doc.annotations.filter(status=Annotation.COMPLETED):
self.assertFalse(anno.user.pk in user_id_set, "Annotator must only annotate a document once")
user_id_set.add(anno.user.pk)
print(f"Doc ID: {doc.pk} completed_annotations: {doc.num_completed_annotations} annotator_ids: {','.join(str(v) for v in user_id_set)}")
def get_annotator_request(self, annotator):
annotator.refresh_from_db()
request = self.get_request()
request.user = annotator
return request
def perform_annotation(self, annotator, expect_completed=False):
request = self.get_annotator_request(annotator)
task_context = get_annotation_task(request)
if expect_completed:
self.assertTrue(task_context is None)
return
if task_context: #ignore if annotation task returned is None
annotation_id = task_context['annotation_id']
self.annotation_info(annotation_id, "Annotated")
complete_annotation_task(request, annotation_id, {})
def reject_annotation(self, annotator):
request = self.get_annotator_request(annotator)
task_context = get_annotation_task(request)
annotation_id = task_context['annotation_id']
self.annotation_info(annotation_id, "Rejected")
reject_annotation_task(request, annotation_id)
def test_completing_project(self):
""" Case where project finishes before an annotator reaches quota """
num_annotators = 100
num_documents = 10
num_annotations_per_doc = 3
num_total_tasks = num_documents * num_annotations_per_doc
annotator_max_annotation = 0.6 # Annotator can annotator max of 60% of docs
# Create users and project, add them as annotators
manager = self.get_default_user()
manager_request = self.get_loggedin_request()
annotators = [get_user_model().objects.create(username=f"annotator{i}") for i in range(num_annotators)]
ann1 = get_user_model().objects.create(username="ann1")
ann1_request = self.get_request()
ann1_request.user = ann1
proj = Project.objects.create(owner=manager)
proj.annotations_per_doc = num_annotations_per_doc
proj.annotator_max_annotation = annotator_max_annotation
proj.save()
# Make them project annotator and allow to annotate
for annotator in annotators:
self.assertTrue(add_project_annotator(manager_request, proj.id, annotator.username))
documents = [Document.objects.create(project=proj) for i in range(num_documents)]
annotation_count = 0
for i in range(num_annotations_per_doc):
for annotator in annotators:
self.assertFalse(proj.is_completed)
self.perform_annotation(annotator)
annotation_count += 1
if num_total_tasks - annotation_count < 1:
break
if num_total_tasks - annotation_count < 1:
break
self.assertTrue(proj.is_completed)
self.assertEqual(0, proj.num_annotators)
def test_leave_project(self):
""" Tests a case where user leaves the project they're active in"""
# Create project and add annotator
project = Project.objects.create()
annotator = get_user_model().objects.create(username="annotator")
annotator2 = get_user_model().objects.create(username="annotator2")
project.add_annotator(annotator)
# They should be marked as active
annotator_proj = AnnotatorProject.objects.get(project=project, annotator=annotator)
self.assertEqual(AnnotatorProject.ACTIVE, annotator_proj.status,
"Annotator status should be marked as active")
# Leave project
req = self.get_loggedin_request()
req.user = annotator
annotator_leave_project(req)
# Should be marked as complete
annotator_proj.refresh_from_db()
self.assertEqual(AnnotatorProject.COMPLETED, annotator_proj.status,
"Annotator status should be marked as completed")
# Should raise an exception if user is not associated with project
with self.assertRaises(Exception):
req.user = annotator2
annotator_leave_project(req)
class TestAnnotationTaskManagerTrainTestMode(TestEndpoint):
def setUp(self):
# Create users and project, add them as annotators
self.manager = self.get_default_user()
self.manager_request = self.get_loggedin_request()
self.num_annotators = 6
annotator_users = [get_user_model().objects.create(username=f"ann{i}") for i in range(self.num_annotators)]
# self.annotators is a list of (user, request) tuples
self.annotators = []
for ann in annotator_users:
req = self.get_request()
req.user = ann
self.annotators.append((ann, req))
self.ann1, self.ann1_request = self.annotators[0]
self.proj = Project.objects.create(owner=self.manager)
self.proj.annotations_per_doc = 3
self.proj.annotator_max_annotation = 0.6 # Annotator can annotator max of 60% of docs
# Example sentiment config, single label
self.proj.configuration = [
{
"name": "htmldisplay",
"type": "html",
"text": "{{{text}}}"
},
{
"name": "sentiment",
"type": "radio",
"title": "Sentiment",
"description": "Please select a sentiment of the text above.",
"options": {
"negative": "Negative",
"neutral": "Neutral",
"positive": "Positive"
}
}
]
self.proj.save()
# Create documents
self.num_docs = 20
self.docs = []
for i in range(self.num_docs):
self.docs.append(Document.objects.create(project=self.proj, data={
"text": f"Document {i}"
}))
# Create training documents
self.num_training_docs = 5
self.training_docs = []
for i in range(self.num_training_docs):
self.docs.append(Document.objects.create(project=self.proj,
doc_type=DocumentType.TRAINING,
data={
"text": f"Document {i}",
"gold": {
"sentiment": {
"value": "positive",
}
}
}))
# Create test document
self.num_test_docs = 10
self.test_docs = []
for i in range(self.num_test_docs):
self.docs.append(Document.objects.create(project=self.proj,
doc_type=DocumentType.TEST,
data={
"text": f"Document {i}",
"gold": {
"sentiment": {
"value": "positive",
}
}
}))
def test_annotation_task_with_training_only(self):
self.proj.has_training_stage = True
self.proj.has_test_stage = False
self.proj.can_annotate_after_passing_training_and_test = False
self.proj.save()
# Add annotator 1 to project
self.assertTrue(add_project_annotator(self.manager_request, self.proj.id, self.ann1.username))
# Complete training annotations
self.assertEqual(self.num_training_docs, self.complete_annotations(self.num_training_docs, "Training"))
# Expect perfect score
self.proj.refresh_from_db()
self.assertEqual(self.num_training_docs,
self.proj.get_annotator_document_score(self.ann1, DocumentType.TRAINING))
# No task until annotator is allowed to annotate
self.assertFalse("annotation_id" in get_annotation_task(self.ann1_request))
project_annotator_allow_annotation(self.manager_request, self.proj.id, self.ann1.username)
self.assertTrue(get_annotation_task(self.ann1_request))
# Then complete the task normally
self.assertEqual(self.proj.max_num_task_per_annotator, self.complete_annotations(self.num_docs, "Annotation"))
self.assertEqual(0, self.proj.num_annotator_task_remaining(self.ann1))
def test_annotation_task_with_training_only_auto_annotate(self):
self.proj.has_training_stage = True
self.proj.has_test_stage = False
self.proj.can_annotate_after_passing_training_and_test = True
self.proj.save()
# Add annotator 1 to project
self.assertTrue(add_project_annotator(self.manager_request, self.proj.id, self.ann1.username))
# Complete training annotations
self.assertEqual(self.num_training_docs, self.complete_annotations(self.num_training_docs, "Training"))
# Expect perfect score
self.proj.refresh_from_db()
self.assertEqual(self.num_training_docs,
self.proj.get_annotator_document_score(self.ann1, DocumentType.TRAINING))
# No task until annotator is allowed to annotate
self.assertTrue(get_annotation_task(self.ann1_request))
# Then complete the task normally
self.assertEqual(self.proj.max_num_task_per_annotator, self.complete_annotations(self.num_docs, "Annotation"))
self.assertEqual(0, self.proj.num_annotator_task_remaining(self.ann1))
def test_annotation_task_with_test_only(self):
self.proj.has_training_stage = False
self.proj.has_test_stage = True
self.proj.can_annotate_after_passing_training_and_test = False
self.proj.save()
# Add annotator 1 to project
self.assertTrue(add_project_annotator(self.manager_request, self.proj.id, self.ann1.username))
# Complete test annotations
self.assertEqual(self.num_test_docs, self.complete_annotations(self.num_test_docs, "Test"))
# Expect perfect score
self.proj.refresh_from_db()
self.assertEqual(self.num_test_docs,
self.proj.get_annotator_document_score(self.ann1, DocumentType.TEST))
# No task until annotator is allowed to annotate
self.assertFalse("annotation_id" in get_annotation_task(self.ann1_request))
project_annotator_allow_annotation(self.manager_request, self.proj.id, self.ann1.username)
self.assertTrue(get_annotation_task(self.ann1_request))
# Then complete the task normally
self.assertEqual(self.proj.max_num_task_per_annotator, self.complete_annotations(self.num_docs, "Annotation"))
self.assertEqual(0, self.proj.num_annotator_task_remaining(self.ann1))
def test_annotation_task_with_test_and_train(self):
self.proj.has_training_stage = True
self.proj.has_test_stage = True
self.proj.can_annotate_after_passing_training_and_test = False
self.proj.save()
# Add annotator 1 to project
self.assertTrue(add_project_annotator(self.manager_request, self.proj.id, self.ann1.username))
# Complete training annotations
self.assertEqual(self.num_training_docs, self.complete_annotations(self.num_training_docs, "Training"))
# Expect perfect score
self.proj.refresh_from_db()
self.assertEqual(self.num_training_docs,
self.proj.get_annotator_document_score(self.ann1, DocumentType.TRAINING))
# Complete test annotations
self.assertEqual(self.num_test_docs, self.complete_annotations(self.num_test_docs, "Test"))
# Expect perfect score
self.proj.refresh_from_db()
self.assertEqual(self.num_test_docs,
self.proj.get_annotator_document_score(self.ann1, DocumentType.TEST))
# No task until annotator is allowed to annotate
self.assertFalse("annotation_id" in get_annotation_task(self.ann1_request))
project_annotator_allow_annotation(self.manager_request, self.proj.id, self.ann1.username)
self.assertTrue(get_annotation_task(self.ann1_request))
# Then complete the task normally
self.assertEqual(self.proj.max_num_task_per_annotator, self.complete_annotations(self.num_docs, "Annotation"))
self.assertEqual(0, self.proj.num_annotator_task_remaining(self.ann1))
def test_annotation_task_with_test_and_train_auto_pass(self):
self.proj.has_training_stage = True
self.proj.has_test_stage = True
self.proj.min_test_pass_threshold = 1.0
self.proj.can_annotate_after_passing_training_and_test = True
self.proj.save()
# Add annotator 1 to project
self.assertTrue(add_project_annotator(self.manager_request, self.proj.id, self.ann1.username))
# Complete training annotations
self.assertEqual(self.num_training_docs, self.complete_annotations(self.num_training_docs, "Training"))
# Expect perfect score
self.proj.refresh_from_db()
self.assertEqual(self.num_training_docs,
self.proj.get_annotator_document_score(self.ann1, DocumentType.TRAINING))
# Complete test annotations
self.assertEqual(self.num_test_docs, self.complete_annotations(self.num_test_docs, "Test"))
# Expect perfect score
self.proj.refresh_from_db()
self.assertEqual(self.num_test_docs,
self.proj.get_annotator_document_score(self.ann1, DocumentType.TEST))
# Pass mark above threshold elevates user to annotator
self.assertTrue(get_annotation_task(self.ann1_request))
# Then complete the task normally
self.assertEqual(self.proj.max_num_task_per_annotator, self.complete_annotations(self.num_docs, "Annotation"))
self.assertEqual(0, self.proj.num_annotator_task_remaining(self.ann1))
def test_annotation_task_with_test_and_train_auto_pass_fail(self):
self.proj.has_training_stage = True
self.proj.has_test_stage = True
self.proj.min_test_pass_threshold = 0.6
self.proj.can_annotate_after_passing_training_and_test = True
self.proj.save()
# Add annotator 1 to project
self.assertTrue(add_project_annotator(self.manager_request, self.proj.id, self.ann1.username))
# Complete training annotations
self.assertEqual(self.num_training_docs, self.complete_annotations(self.num_training_docs, "Training"))
# Expect perfect score
self.proj.refresh_from_db()
self.assertEqual(self.num_training_docs,
self.proj.get_annotator_document_score(self.ann1, DocumentType.TRAINING))
# Complete test annotations
self.assertEqual(self.num_test_docs, self.complete_annotations(self.num_test_docs, "Test", use_wrong_answer=True))
# Expect zero score, user not elevated to annotator
self.proj.refresh_from_db()
self.assertEqual(0,
self.proj.get_annotator_document_score(self.ann1, DocumentType.TEST))
# No task until annotator is allowed to annotate
self.assertFalse("annotation_id" in get_annotation_task(self.ann1_request))
project_annotator_allow_annotation(self.manager_request, self.proj.id, self.ann1.username)
self.assertTrue(get_annotation_task(self.ann1_request))
# Then complete the task normally
self.assertEqual(self.proj.max_num_task_per_annotator, self.complete_annotations(self.num_docs, "Annotation"))
self.assertEqual(0, self.proj.num_annotator_task_remaining(self.ann1))
def test_annotations_per_doc_not_enforced_for_training_or_test(self):
self.proj.has_training_stage = True
self.proj.has_test_stage = True
self.proj.min_test_pass_threshold = 1.0
self.proj.can_annotate_after_passing_training_and_test = True
self.proj.save()
docs_annotated_per_user = []
for (i, (ann_user, _)) in enumerate(self.annotators):
# Add to project
self.assertTrue(add_project_annotator(self.manager_request, self.proj.id, ann_user.username))
# Every annotator should be able to complete every training document, even though
# max annotations per document is less than the total number of annotators
self.assertEqual(self.num_training_docs,
self.complete_annotations(self.num_training_docs, "Training", annotator=i))
# Expect perfect score
self.proj.refresh_from_db()
self.assertEqual(self.num_training_docs,
self.proj.get_annotator_document_score(ann_user, DocumentType.TRAINING))
# Every annotator should be able to complete every test document, even though
# max annotations per document is less than the total number of annotators
self.assertEqual(self.num_test_docs,
self.complete_annotations(self.num_test_docs, "Test", annotator=i))
# Expect perfect score
self.proj.refresh_from_db()
self.assertEqual(self.num_training_docs,
self.proj.get_annotator_document_score(ann_user, DocumentType.TRAINING))
# Now attempt to complete task normally
num_annotated = self.complete_annotations(self.num_docs, "Annotation", annotator=i)
docs_annotated_per_user.append(num_annotated)
self.assertLessEqual(num_annotated, self.proj.max_num_task_per_annotator,
f"Annotator {i} was allowed to annotate too many documents")
# All documents should now be fully annotated
self.assertEqual(sum(docs_annotated_per_user), self.num_docs * self.proj.annotations_per_doc,
"Project was not fully annotated")
# But at least one user must have annotated strictly less than max_num_task_per_annotator,
# since the project was set up such that 20 docs * 3 annotations per doc is less than
# 6 annotators * 12 docs per annotator (60% of the corpus) - this verifies that the max
# annotators per document _does_ apply to the annotation phase
self.assertTrue(any(n < self.proj.max_num_task_per_annotator for n in docs_annotated_per_user),
"All users got their full quota of documents - this is more than 3 annotations per doc")
def complete_annotations(self, num_annotations_to_complete, expected_doc_type_str, *, annotator=0,
use_wrong_answer=False):
answer = "positive"
if use_wrong_answer:
answer = "negative"
ann, ann_req = self.annotators[annotator]
# Expect to get self.num_training_docs tasks
num_completed_tasks = 0
for i in range(num_annotations_to_complete):
task_context = get_annotation_task(ann_req)
if task_context:
self.assertEqual(expected_doc_type_str, task_context.get("document_type"),
f"Document type does not match in task {task_context!r}, " +
"annotator {ann.username}, document {i}")
complete_annotation_task(ann_req, task_context["annotation_id"], {"sentiment": answer})
num_completed_tasks += 1
return num_completed_tasks
class TestAnnotationChange(TestEndpoint):
def test_change_annotation_history(self):
# Create initial project with annotation
project = Project.objects.create(name="Test1")
doc = Document.objects.create(project=project)
annotation = Annotation.objects.create(document=doc,
user=self.get_default_user()
)
initial_annotation_data = {
"label": "Test annotation 1"
}
new_annotation_data = {
"label": "Changed annotation"
}
# Fails if tries to change before the annotation is marked as completed
with self.assertRaises(RuntimeError):
change_annotation(self.get_loggedin_request(), annotation_id=annotation.pk, new_data=new_annotation_data)
# Compete the annotation
annotation.complete_annotation(initial_annotation_data)
annotation.refresh_from_db()
# Checks that the data goes into the change list
self.assertEqual(1, annotation.change_history.all().count(), "Must already have 1 data history item")
self.assertDictEqual(initial_annotation_data, annotation.data)
# Tries to change annotation
change_annotation(self.get_loggedin_request(), annotation_id=annotation.pk, new_data=new_annotation_data)
self.assertEqual(2, annotation.change_history.all().count(), "Must have 2 data history items")
self.assertDictEqual(new_annotation_data, annotation.data)
# Fails for testing document, not allowed to change
test_doc = Document.objects.create(project=project, doc_type=DocumentType.TEST)
test_annotation = Annotation.objects.create(document=test_doc)
test_annotation.complete_annotation(initial_annotation_data)
with self.assertRaises(RuntimeError):
change_annotation(self.get_loggedin_request(), test_annotation.pk, new_annotation_data)
# Fails for training document, not allowed to change
train_doc = Document.objects.create(project=project, doc_type=DocumentType.TRAINING)
train_annotation = Annotation.objects.create(document=train_doc)
train_annotation.complete_annotation(initial_annotation_data)
with self.assertRaises(RuntimeError):
change_annotation(self.get_loggedin_request(), train_annotation.pk, new_annotation_data)
def test_delete_annotation_change_history(self):
# Create initial project with annotation
project = Project.objects.create(name="Test1")
doc = Document.objects.create(project=project)
annotation = Annotation.objects.create(document=doc,
user=self.get_default_user()
)
initial_annotation_data = {
"label": "Test annotation 1"
}
new_annotation_data = {
"label": "Changed annotation"
}
# Complete the annotation and change once
annotation.complete_annotation(initial_annotation_data)
annotation.change_annotation(new_annotation_data)
self.assertEqual(2, annotation.change_history.all().count(), "Must have 2 change entries")
delete_annotation_change_history(self.get_loggedin_request(),
annotation_change_history_id=annotation.change_history.first().pk)
# Raises an error if there's only one entry left, should not be able to delete
with self.assertRaises(RuntimeError):
delete_annotation_change_history(self.get_loggedin_request(),
annotation_change_history_id=annotation.change_history.first().pk)
| 74,587 | 41.116318 | 183 | py |
gate-teamware | gate-teamware-master/backend/tests/test_misc.py | from django.test import TestCase
from backend.utils.misc import get_value_from_key_path, insert_value_to_key_path
class InsertExtractValuesFromDictWithKeyPath(TestCase):
def test_get_value_with_key_path(self):
target_value = "Test value"
test_dict = {
"path1": {
"path2": {
"path3": target_value
}
},
"path_array1": [
{
"path2": target_value
}
]
}
# Get for normal path, should exist
self.assertEqual(get_value_from_key_path(test_dict, "path1.path2.path3", "."), target_value)
# Get for nonexistant path
self.assertEqual(get_value_from_key_path(test_dict, "path1.dontexist", "."), None)
# Get for path with array (not supported)
self.assertEqual(get_value_from_key_path(test_dict, "path_array1.0.path2", "."), None)
def test_insert_value_with_key_path(self):
target_value = "Test value"
test_dict = {
"path1": {
"path2": {
}
},
"path_array1": [
{
"path2": target_value
}
]
}
# Insert into new path
self.assertEqual(insert_value_to_key_path(test_dict, "newpath1.newpath2", target_value, "."), True)
self.assertEqual(test_dict["newpath1"]["newpath2"], target_value)
# Insert into existing path
self.assertEqual(insert_value_to_key_path(test_dict, "path1.path2.path3", target_value, "."), True)
self.assertEqual(test_dict["path1"]["path2"]["path3"], target_value)
# Insert into path with array (not supported)
self.assertEqual(insert_value_to_key_path(test_dict, "path_array1.path2.path3", target_value, "."), False)
| 1,882 | 29.868852 | 114 | py |
gate-teamware | gate-teamware-master/backend/tests/test_models.py | import math
from datetime import timedelta
from django.db import models
from django.conf import settings
from django.contrib.auth import get_user_model
from django.test import TestCase, Client
from django.utils import timezone
from backend.models import Project, Document, DocumentType, Annotation, AnnotatorProject
from backend.utils.serialize import ModelSerializer
# Import signal which is used for cleaning up user pending annotations
import backend.signals
class ModelTestCase(TestCase):
def check_model_field(self, model_class, field_name, field_type):
self.assertEqual(model_class._meta.get_field(field_name).__class__, field_type)
def check_model_fields(self, model_class, field_name_types_dict):
for field_name, field_type in field_name_types_dict.items():
self.check_model_field(model_class, field_name, field_type)
class TestUserModel(TestCase):
def test_agree_privacy_policy(self):
user = get_user_model().objects.create(username="test1", agreed_privacy_policy=True)
self.assertTrue(user.agreed_privacy_policy)
def test_document_association_check(self):
user = get_user_model().objects.create(username="test1")
user2 = get_user_model().objects.create(username="test2")
project = Project.objects.create()
doc = Document.objects.create(project=project)
doc2 = Document.objects.create(project=project)
project.add_annotator(user=user)
project2 = Project.objects.create()
doc3 = Document.objects.create(project=project2)
# Test association where user is an annotator of a project and user2 is not
self.assertTrue(user.is_associated_with_document(doc))
self.assertTrue(user.is_associated_with_document(doc2))
self.assertFalse(user.is_associated_with_document(doc3))
self.assertFalse(user2.is_associated_with_document(doc))
self.assertFalse(user2.is_associated_with_document(doc2))
self.assertFalse(user2.is_associated_with_document(doc3))
# Same as above but now user and user2 has annotations
annotation = Annotation.objects.create(user=user, document=doc3)
annotation2 = Annotation.objects.create(user=user2, document=doc)
annotation3 = Annotation.objects.create(user=user2, document=doc2)
annotation4 = Annotation.objects.create(user=user2, document=doc3)
self.assertTrue(user.is_associated_with_document(doc))
self.assertTrue(user.is_associated_with_document(doc2))
self.assertTrue(user.is_associated_with_document(doc3))
self.assertTrue(user2.is_associated_with_document(doc))
self.assertTrue(user2.is_associated_with_document(doc2))
self.assertTrue(user2.is_associated_with_document(doc3))
def test_check_annotation_association_check(self):
user = get_user_model().objects.create(username="test1")
user2 = get_user_model().objects.create(username="test2")
project = Project.objects.create()
doc = Document.objects.create(project=project)
annotation = Annotation.objects.create(user=user, document=doc)
doc2 = Document.objects.create(project=project)
annotation2 = Annotation.objects.create(user=user2, document=doc2)
self.assertTrue(user.is_associated_with_annotation(annotation))
self.assertFalse(user2.is_associated_with_annotation(annotation))
self.assertFalse(user.is_associated_with_annotation(annotation2))
self.assertTrue(user2.is_associated_with_annotation(annotation2))
def test_check_user_active_project(self):
user = get_user_model().objects.create(username="test1")
self.assertFalse(user.has_active_project)
self.assertEqual(user.active_project, None)
project = Project.objects.create()
project.add_annotator(user)
self.assertTrue(user.has_active_project)
self.assertEqual(user.active_project, project)
project.remove_annotator(user)
self.assertFalse(user.has_active_project)
self.assertEqual(user.active_project, None)
def create_each_annotation_status_for_user(user: get_user_model(), project: Project):
annotation_statuses = [Annotation.PENDING,
Annotation.COMPLETED,
Annotation.REJECTED,
Annotation.TIMED_OUT,
Annotation.ABORTED]
for annotation_status in annotation_statuses:
Annotation.objects.create(user=user,
document=Document.objects.create(project=project),
status=annotation_status)
class TestUserModelDeleteUser(TestCase):
def setUp(self) -> None:
self.user = get_user_model().objects.create(username="test1",
first_name="TestFirstname",
last_name="TestLastname",
email="[email protected]")
self.project = Project.objects.create(owner=self.user)
create_each_annotation_status_for_user(self.user, self.project)
self.user2 = get_user_model().objects.create(username="test2",
first_name="TestFirstname",
last_name="TestLastname",
email="[email protected]")
project2 = Project.objects.create(owner=self.user2)
Annotation.objects.create(user=self.user,
document=Document.objects.create(project=project2),
status=Annotation.PENDING)
create_each_annotation_status_for_user(user=self.user2, project=project2)
def test_clear_pending_annotations(self):
# Check that there's at least one pending annotation for the user
self.assertEqual(2, Annotation.objects.filter(status=Annotation.PENDING, user=self.user).count())
self.user.clear_pending_annotations()
# No pending annotation for the user
self.assertEqual(0, Annotation.objects.filter(status=Annotation.PENDING, user=self.user).count())
def test_remove_user_personal_data(self):
"""
Tests deleting all personal information of the user from the system and replacing
all personally identifiable information with placeholders
"""
self.user.delete_user_personal_information()
self.user.refresh_from_db()
TestUserModelDeleteUser.check_user_personal_data_deleted(self, self.user)
@staticmethod
def check_user_personal_data_deleted(test_obj, user):
# Make sure db is refereshed first
user.refresh_from_db()
# User should be marked as deleted
test_obj.assertTrue(user.is_deleted)
# Deleted username is a combination of [DELETED_USER_USERNAME_PREFIX]_hashedvalues
test_obj.assertTrue(user.username.startswith(settings.DELETED_USER_USERNAME_PREFIX))
# Deleted mail is a combination of [DELETED_USER_USERNAME_PREFIX]_hashedvalues@[DELETED_USER_EMAIL_DOMAIN]
test_obj.assertTrue(user.email.startswith(settings.DELETED_USER_USERNAME_PREFIX))
test_obj.assertTrue(user.email.endswith(settings.DELETED_USER_EMAIL_DOMAIN))
# First name and last name should be DELETED_USER_FIRSTNAME and DELETED_USER_LASTNAME
test_obj.assertEqual(user.first_name, settings.DELETED_USER_FIRSTNAME)
test_obj.assertEqual(user.last_name, settings.DELETED_USER_LASTNAME)
# Removed user should not have pending annotations
test_obj.assertEqual(0, Annotation.objects.filter(status=Annotation.PENDING, user=user).count())
def test_delete_user(self):
user_id = self.user.pk
self.user.delete()
TestUserModelDeleteUser.check_user_is_deleted(self, user_id)
@staticmethod
def check_user_is_deleted(test_obj, user_id):
test_obj.assertEqual(0, Annotation.objects.filter(user_id=user_id).count(),
"Deleted user should not have any annotations")
test_obj.assertEqual(0, Project.objects.filter(owner_id=user_id).count(),
"Deleted user should not have any projects")
class TestDocumentModel(ModelTestCase):
def test_model_fields(self):
self.check_model_fields(Document, {
"project": models.ForeignKey,
"data": models.JSONField,
"created": models.DateTimeField,
})
def test_document(self):
annotator = get_user_model().objects.create(username="test1")
project = Project.objects.create()
doc = Document.objects.create(project=project)
# Annotation
abort_annotation = Annotation.objects.create(user=annotator, document=doc)
abort_annotation.abort_annotation()
self.assertFalse(doc.user_completed_annotation_of_document(annotator))
reject_annotation = Annotation.objects.create(user=annotator, document=doc)
reject_annotation.reject_annotation()
self.assertFalse(doc.user_completed_annotation_of_document(annotator))
timeout_annotation = Annotation.objects.create(user=annotator, document=doc)
timeout_annotation.timeout_annotation()
self.assertFalse(doc.user_completed_annotation_of_document(annotator))
pending_annotation = Annotation.objects.create(user=annotator, document=doc)
self.assertFalse(doc.user_completed_annotation_of_document(annotator))
self.assertEqual(1, doc.num_completed_and_pending_annotations)
complete_annotation1 = Annotation.objects.create(user=annotator, document=doc)
complete_annotation1.complete_annotation({})
self.assertTrue(doc.user_completed_annotation_of_document(annotator))
self.assertEqual(2, doc.num_completed_and_pending_annotations)
complete_annotation2 = Annotation.objects.create(user=annotator, document=doc)
complete_annotation2.complete_annotation({})
complete_annotation3 = Annotation.objects.create(user=annotator, document=doc)
complete_annotation3.complete_annotation({})
doc.refresh_from_db()
self.assertEqual(4, doc.num_completed_and_pending_annotations)
self.assertEqual(3, doc.num_completed_annotations)
self.assertEqual(1, doc.num_pending_annotations)
self.assertEqual(1, doc.num_timed_out_annotations)
self.assertEqual(1, doc.num_rejected_annotations)
self.assertEqual(1, doc.num_aborted_annotations)
self.assertEqual(3, doc.num_user_completed_annotations(annotator))
self.assertEqual(1, doc.num_user_pending_annotations(annotator))
self.assertEqual(1, doc.num_user_timed_out_annotations(annotator))
self.assertEqual(1, doc.num_user_rejected_annotations(annotator))
self.assertEqual(1, doc.num_user_aborted_annotations(annotator))
def test_document_type_str(self):
project = Project.objects.create()
doc = Document.objects.create(project=project, doc_type=DocumentType.ANNOTATION)
self.assertEqual("Annotation", doc.doc_type_str)
doc = Document.objects.create(project=project, doc_type=DocumentType.TRAINING)
self.assertEqual("Training", doc.doc_type_str)
doc = Document.objects.create(project=project, doc_type=DocumentType.TEST)
self.assertEqual("Test", doc.doc_type_str)
class TestProjectModel(ModelTestCase):
def setUp(self):
self.annotations_per_doc = 3
self.annotator_max_annotation = 0.6
self.num_docs = 10
self.num_test_docs = 7
self.num_training_docs = 3
self.num_annotators = 10
self.num_total_tasks = self.num_docs * self.annotations_per_doc
self.project = Project.objects.create(annotations_per_doc=self.annotations_per_doc,
annotator_max_annotation=self.annotator_max_annotation)
self.docs = [Document.objects.create(project=self.project) for i in range(self.num_docs)]
self.training_docs = [Document.objects.create(project=self.project, doc_type=DocumentType.TRAINING) for i in
range(self.num_training_docs)]
self.test_docs = [Document.objects.create(project=self.project, doc_type=DocumentType.TEST) for i in
range(self.num_test_docs)]
# Add answers to training docs
counter = 0
for doc in self.training_docs:
doc.data = {
"id": f"{counter}",
"text": f"Text{counter}",
"gold": {
"label1": {
"value": f"val{counter}",
"explanation": f"Exp{counter}"
},
"label2": {
"value": f"val{counter}",
"explanation": f"Exp{counter}"
},
}
}
doc.save()
counter += 1
# Add answers to testing docs
counter = 0
for doc in self.test_docs:
doc.data = {
"id": f"{counter}",
"text": f"Text{counter}",
"gold": {
"label1": {
"value": f"val{counter}",
},
"label2": {
"value": f"val{counter}",
}
}
}
doc.save()
counter += 1
self.annotators = [get_user_model().objects.create(username=f"test{i}") for i in range(self.num_annotators)]
for annotator in self.annotators:
self.project.add_annotator(annotator)
def test_model_fields(self):
self.check_model_fields(Project, {
"name": models.TextField,
"description": models.TextField,
"annotator_guideline": models.TextField,
"configuration": models.JSONField,
"owner": models.ForeignKey,
"annotations_per_doc": models.IntegerField,
"annotator_max_annotation": models.FloatField,
"annotation_timeout": models.IntegerField,
"document_input_preview": models.JSONField,
"document_id_field": models.TextField,
"has_training_stage": models.BooleanField,
"has_test_stage": models.BooleanField,
"can_annotate_after_passing_training_and_test": models.BooleanField,
"min_test_pass_threshold": models.FloatField,
"document_gold_standard_field": models.TextField,
})
def test_add_annotator(self):
project = Project.objects.create()
annotator = get_user_model().objects.create(username="anno1")
self.assertEqual(0, project.annotators.all().count())
project.add_annotator(annotator)
self.assertEqual(1, project.annotators.all().count())
annotator_project = AnnotatorProject.objects.get(project=project, annotator=annotator)
self.assertEqual(AnnotatorProject.ACTIVE, annotator_project.status)
def test_make_annotator_active(self):
project = Project.objects.create()
for i in range(20):
document = Document.objects.create(project=project)
project2 = Project.objects.create()
annotator = get_user_model().objects.create(username="anno1")
# Fails if not associated with project
with self.assertRaises(Exception):
project.make_annotator_active(annotator)
# Adds to first project and removes
project.add_annotator(annotator)
# Fails if already active
with self.assertRaises(Exception):
project.make_annotator_active(annotator)
# Removes from first project and adds to second project
project.remove_annotator(annotator)
project2.add_annotator(annotator)
# Fail if active in another project
with self.assertRaises(Exception):
project.make_annotator_active(annotator)
# Remove from project 2, should now be able to make active in project 1
project2.remove_annotator(annotator)
project.make_annotator_active(annotator)
annotator_project = AnnotatorProject.objects.get(project=project, annotator=annotator)
self.assertEqual(AnnotatorProject.ACTIVE, annotator_project.status)
# Adds annotation to project mark user as completed
for document in project.documents.all():
annotation = Annotation.objects.create(user=annotator, document=document, status=Annotation.COMPLETED)
project.remove_annotator(annotator)
# Fail if already reached quota
with self.assertRaises(Exception):
project.make_annotator_active(annotator)
def test_remove_annotator(self):
project = Project.objects.create()
annotator = get_user_model().objects.create(username="anno1")
project.add_annotator(annotator)
self.assertEqual(1, project.annotators.all().count())
project.remove_annotator(annotator)
self.assertEqual(1, project.annotators.all().count())
annotator_project = AnnotatorProject.objects.get(project=project, annotator=annotator)
self.assertEqual(AnnotatorProject.COMPLETED, annotator_project.status)
def test_reject_annotator(self):
project = Project.objects.create()
annotator = get_user_model().objects.create(username="anno1")
project.add_annotator(annotator)
self.assertEqual(1, project.annotators.all().count())
project.reject_annotator(annotator)
self.assertEqual(1, project.annotators.all().count())
annotator_project = AnnotatorProject.objects.get(project=project, annotator=annotator)
self.assertEqual(AnnotatorProject.COMPLETED, annotator_project.status)
self.assertEqual(True, annotator_project.rejected)
def test_num_documents(self):
self.assertEqual(self.project.num_documents, self.num_docs)
def test_num_test_documents(self):
self.assertEqual(self.project.num_test_documents, self.num_test_docs)
def test_num_training_documents(self):
self.assertEqual(self.project.num_training_documents, self.num_training_docs)
def test_num_annotation_tasks_total(self):
self.assertEqual(self.project.num_annotation_tasks_total, self.num_docs * self.annotations_per_doc)
def test_num_completed_tasks(self):
self.annotate_docs_all_states(self.docs, self.annotators[0])
self.annotate_docs_all_states(self.test_docs, self.annotators[0])
self.annotate_docs_all_states(self.training_docs, self.annotators[0])
self.assertEqual(self.project.num_completed_tasks, self.num_docs)
def test_num_pending_tasks(self):
self.annotate_docs_all_states(self.docs, self.annotators[0])
self.annotate_docs_all_states(self.test_docs, self.annotators[0])
self.annotate_docs_all_states(self.training_docs, self.annotators[0])
self.assertEqual(self.project.num_pending_tasks, self.num_docs)
def test_num_rejected_tasks(self):
self.annotate_docs_all_states(self.docs, self.annotators[0])
self.annotate_docs_all_states(self.test_docs, self.annotators[0])
self.annotate_docs_all_states(self.training_docs, self.annotators[0])
self.assertEqual(self.project.num_rejected_tasks, self.num_docs)
def test_num_timed_out_tasks(self):
self.annotate_docs_all_states(self.docs, self.annotators[0])
self.annotate_docs_all_states(self.test_docs, self.annotators[0])
self.annotate_docs_all_states(self.training_docs, self.annotators[0])
self.assertEqual(self.project.num_timed_out_tasks, self.num_docs)
def test_num_aborted_tasks(self):
self.annotate_docs_all_states(self.docs, self.annotators[0])
self.annotate_docs_all_states(self.test_docs, self.annotators[0])
self.annotate_docs_all_states(self.training_docs, self.annotators[0])
self.assertEqual(self.project.num_aborted_tasks, self.num_docs)
def test_num_occupied_tasks(self):
self.annotate_docs_all_states(self.docs, self.annotators[0])
self.annotate_docs_all_states(self.test_docs, self.annotators[0])
self.annotate_docs_all_states(self.training_docs, self.annotators[0])
self.assertEqual(self.project.num_occupied_tasks, self.num_docs * 2,
f"Must have {self.num_docs * 2} annotations (completed + pending)")
def test_num_annotation_tasks_remaining(self):
self.annotate_docs_all_states(self.docs, self.annotators[0])
self.annotate_docs_all_states(self.test_docs, self.annotators[0])
self.annotate_docs_all_states(self.training_docs, self.annotators[0])
self.assertEqual(self.project.num_annotation_tasks_remaining,
self.num_docs * self.annotations_per_doc - self.num_docs * 2)
def annotate_docs_all_states(self, docs, annotator):
for doc in docs:
Annotation.objects.create(user=annotator, document=doc, status=Annotation.REJECTED)
Annotation.objects.create(user=annotator, document=doc, status=Annotation.COMPLETED)
Annotation.objects.create(user=annotator, document=doc, status=Annotation.ABORTED)
Annotation.objects.create(user=annotator, document=doc, status=Annotation.PENDING)
Annotation.objects.create(user=annotator, document=doc, status=Annotation.TIMED_OUT)
def test_max_num_task_per_annotator(self):
self.assertEqual(self.project.max_num_task_per_annotator,
math.ceil(self.num_docs * self.annotator_max_annotation))
def test_num_annotators(self):
self.assertEqual(self.project.num_annotators, self.num_annotators)
def test_is_project_configured_and_config_error_message(self):
# Project has docs but is not configured
self.assertFalse(self.project.is_project_configured)
self.assertEqual(len(self.project.project_configuration_error_message), 1)
# Add a blank configuration with one item
self.project.configuration = [{}]
self.project.save()
# Project is considered configured
self.assertTrue(self.project.is_project_configured)
self.assertEqual(len(self.project.project_configuration_error_message), 0)
def test_project_annotation_timeout(self):
annotator = self.annotators[0]
project = self.project
docs = self.docs
annotation = project.assign_annotator_task(annotator)
# Check timing out is set properly
self.assertIsNotNone(annotation)
self.assertIsNotNone(annotation.times_out_at)
# Timeout should be more than minutes set in project
self.assertTrue(annotation.times_out_at > annotation.created)
def test_num_annotator_task_remaining(self):
project = self.project
docs = self.docs
annotator = self.annotators[0]
annotator2 = self.annotators[1]
num_docs_user_can_annotate = project.max_num_task_per_annotator
project.refresh_from_db()
for doc in docs:
Annotation.objects.create(user=annotator2, document=doc, status=Annotation.COMPLETED)
Annotation.objects.create(user=annotator2, document=doc, status=Annotation.ABORTED)
for i in range(math.ceil(
project.annotator_max_annotation * project.documents.filter(doc_type=DocumentType.ANNOTATION).count())):
project.refresh_from_db()
doc = docs[i]
Annotation.objects.create(user=annotator, document=doc, status=Annotation.COMPLETED)
Annotation.objects.create(user=annotator, document=doc, status=Annotation.ABORTED)
self.assertEqual(project.num_annotator_task_remaining(annotator), num_docs_user_can_annotate - (i + 1))
print(project.num_annotator_task_remaining(annotator))
self.assertEqual(project.num_annotator_task_remaining(annotator), 0, "Must have 0 tasks remaining")
def test_get_annotator_annotatable_occupied_completed_pending_documents_query(self):
project = self.project
annotator = self.annotators[0]
# Add some pre-annotated test and training docs to ensure that they do not
# interfere with actual annotation document counts
for doc in self.training_docs:
Annotation.objects.create(document=doc, user=annotator, status=Annotation.COMPLETED)
for doc in self.test_docs:
Annotation.objects.create(document=doc, user=annotator, status=Annotation.COMPLETED)
# Can annotate all docs when blank
self.assertEqual(project.get_annotator_annotatable_documents_query(annotator).count(), self.num_docs)
# No annotations so no occupied, completed or pending
self.assertEqual(project.get_annotator_occupied_documents_query(annotator).count(), 0)
self.assertEqual(project.get_annotator_completed_documents_query(annotator).count(), 0)
self.assertEqual(project.get_annotator_pending_documents_query(annotator).count(), 0)
# One less doc if other annotator has annotated
for i in range(self.annotations_per_doc):
ann = self.annotators[i + 1]
Annotation.objects.create(user=ann, document=self.docs[0], status=Annotation.COMPLETED)
self.assertEqual(project.get_annotator_annotatable_documents_query(annotator).count(), self.num_docs - 1)
# Reset annotation state
Annotation.objects.all().delete()
# One less doc if other annotator has annotated, should also happen for pending status
for i in range(self.annotations_per_doc):
ann = self.annotators[i + 1]
Annotation.objects.create(user=ann, document=self.docs[0], status=Annotation.PENDING)
self.assertEqual(project.get_annotator_annotatable_documents_query(annotator).count(), self.num_docs - 1)
# Rejected
Annotation.objects.create(user=annotator, document=self.docs[1], status=Annotation.REJECTED)
self.assertEqual(project.get_annotator_annotatable_documents_query(annotator).count(), self.num_docs - 2)
self.assertEqual(project.get_annotator_occupied_documents_query(annotator).count(), 1)
self.assertEqual(project.get_annotator_completed_documents_query(annotator).count(), 0)
self.assertEqual(project.get_annotator_pending_documents_query(annotator).count(), 0)
# Pending
Annotation.objects.create(user=annotator, document=self.docs[2], status=Annotation.PENDING)
self.assertEqual(project.get_annotator_annotatable_documents_query(annotator).count(), self.num_docs - 3)
self.assertEqual(project.get_annotator_occupied_documents_query(annotator).count(), 2)
self.assertEqual(project.get_annotator_completed_documents_query(annotator).count(), 0)
self.assertEqual(project.get_annotator_pending_documents_query(annotator).count(), 1)
# Completed
Annotation.objects.create(user=annotator, document=self.docs[3], status=Annotation.COMPLETED)
self.assertEqual(project.get_annotator_annotatable_documents_query(annotator).count(), self.num_docs - 4)
self.assertEqual(project.get_annotator_occupied_documents_query(annotator).count(), 3)
self.assertEqual(project.get_annotator_completed_documents_query(annotator).count(), 1)
self.assertEqual(project.get_annotator_pending_documents_query(annotator).count(), 1)
# Aborted should not affect count
Annotation.objects.create(user=annotator, document=self.docs[3], status=Annotation.ABORTED)
self.assertEqual(project.get_annotator_annotatable_documents_query(annotator).count(), self.num_docs - 4)
self.assertEqual(project.get_annotator_occupied_documents_query(annotator).count(), 3)
self.assertEqual(project.get_annotator_completed_documents_query(annotator).count(), 1)
self.assertEqual(project.get_annotator_pending_documents_query(annotator).count(), 1)
# Timed out should not affect count
Annotation.objects.create(user=annotator, document=self.docs[3], status=Annotation.TIMED_OUT)
self.assertEqual(project.get_annotator_annotatable_documents_query(annotator).count(), self.num_docs - 4)
self.assertEqual(project.get_annotator_occupied_documents_query(annotator).count(), 3)
self.assertEqual(project.get_annotator_completed_documents_query(annotator).count(), 1)
self.assertEqual(project.get_annotator_pending_documents_query(annotator).count(), 1)
def test_annotator_reached_quota(self):
num_tasks_to_complete = math.ceil(self.num_docs * self.annotator_max_annotation)
annotator = self.annotators[0]
for i in range(num_tasks_to_complete):
self.assertFalse(self.project.annotator_reached_quota(annotator))
Annotation.objects.create(user=annotator, document=self.docs[i], status=Annotation.COMPLETED)
self.assertTrue(self.project.annotator_reached_quota(annotator))
def test_annotator_completed_training(self):
annotator = self.annotators[0]
self.project.annotator_completed_training(annotator)
annotator_proj = AnnotatorProject.objects.get(annotator=annotator, project=self.project)
self.assertTrue(annotator_proj.training_completed is not None)
self.assertTrue(annotator_proj.training_score == 0)
def test_annotator_completed_test(self):
annotator = self.annotators[0]
self.project.annotator_completed_test(annotator)
annotator_proj = AnnotatorProject.objects.get(annotator=annotator, project=self.project)
self.assertTrue(annotator_proj.test_completed is not None)
self.assertTrue(annotator_proj.test_score == 0)
def test_annotator_set_allowed_to_annotate(self):
annotator = self.annotators[0]
self.project.annotator_set_allowed_to_annotate(annotator)
annotator_proj = AnnotatorProject.objects.get(annotator=annotator, project=self.project)
self.assertTrue(annotator_proj.allowed_to_annotate is True)
def test_annotator_completed_annotation(self):
annotator = self.annotators[0]
self.project.remove_annotator(annotator)
annotator_proj = AnnotatorProject.objects.get(annotator=annotator, project=self.project)
self.assertTrue(annotator_proj.status == AnnotatorProject.COMPLETED)
def test_get_annotator_document_score(self):
annotator = self.annotators[0]
# No annotations == 0
self.assertEqual(0, self.project.get_annotator_document_score(annotator, DocumentType.TEST))
incorrect_data = {
"label1": "Incorrect",
"label2": "Incorrect"
}
# All incorrect
for doc in self.test_docs:
anno = Annotation.objects.create(user=annotator, document=doc)
anno.data = incorrect_data
self.assertEqual(0, self.project.get_annotator_document_score(annotator, DocumentType.TEST))
# All correct
annotator2 = self.annotators[1]
for doc in self.test_docs:
correct_annotation_data = {
"label1": doc.data["gold"]["label1"]["value"],
"label2": doc.data["gold"]["label2"]["value"],
}
anno = Annotation.objects.create(user=annotator2, document=doc)
anno.data = correct_annotation_data
self.assertEqual(self.num_test_docs, self.project.get_annotator_document_score(annotator2, DocumentType.TEST))
# 4 correct
num_correct = 4
annotator3 = self.annotators[2]
counter = 0
for doc in self.test_docs:
correct_annotation_data = {
"label1": doc.data["gold"]["label1"]["value"],
"label2": doc.data["gold"]["label2"]["value"],
}
data = correct_annotation_data if counter < num_correct else incorrect_data
anno = Annotation.objects.create(user=annotator3, document=doc)
anno.data = data
counter += 1
self.assertEqual(num_correct, self.project.get_annotator_document_score(annotator3, DocumentType.TEST))
def test_check_annotation_answer(self):
# Label with single string value
answers_str_label = {
"label1": {
"value": "answer"
}
}
annotation_str_label = {
"label1": "answer"
}
# Correct str answer
self.assertTrue(self.project.check_annotation_answer(annotation_str_label, answers_str_label))
# Incorrect str answer
annotation_str_label["label1"] = "Incorrect"
self.assertFalse(self.project.check_annotation_answer(annotation_str_label, answers_str_label))
# Label with list value (multiple choice)
answers_list_label = {
"label1": {
"value": ["answer1", "answer3", "answer2"]
}
}
annotation_list_label = {
"label1": ["answer2", "answer1", "answer3"]
}
self.assertTrue(self.project.check_annotation_answer(annotation_list_label, answers_list_label))
# One wrong value
annotation_list_label["label1"] = ["answer2", "answer1", "answer4"]
self.assertFalse(self.project.check_annotation_answer(annotation_list_label, answers_list_label))
# Too many values
annotation_list_label["label1"] = ["answer2", "answer1", "answer3", "answer4"]
self.assertFalse(self.project.check_annotation_answer(annotation_list_label, answers_list_label))
# Missing a value
annotation_list_label["label1"] = ["answer2", "answer3"]
self.assertFalse(self.project.check_annotation_answer(annotation_list_label, answers_list_label))
# Two labels with str and list
answers_list_str_label = {
"label1": {
"value": ["answer1", "answer3", "answer2"]
},
"label2": {
"value": "answer"
}
}
annotation_list_str_label = {
"label1": ["answer2", "answer1", "answer3"],
"label2": "answer",
}
self.assertTrue(self.project.check_annotation_answer(annotation_list_str_label, answers_list_str_label))
# Has additional label in annotation, is ok and won't be included in the check
annotation_list_str_label["label3"] = "Other answer"
self.assertTrue(self.project.check_annotation_answer(annotation_list_str_label, answers_list_str_label))
# Missing one label in annotation
annotation_list_str_label.pop("label2")
self.assertFalse(self.project.check_annotation_answer(annotation_list_str_label, answers_list_str_label))
def test_saving_and_loading(self):
name = "Test name"
created_at = timezone.now()
data = {
"entry1": "val1",
"entry2": "val2"
}
proj = Project(name=name, created=created_at, configuration=data)
proj.save()
loaded_proj = Project.objects.get(pk=proj.pk)
self.assertEqual(loaded_proj.name, name)
self.assertEqual(loaded_proj.created, created_at)
proj_data = loaded_proj.configuration
self.assertEqual(proj_data["entry1"], data["entry1"])
self.assertEqual(proj_data["entry2"], data["entry2"])
def test_serializing(self):
user1 = get_user_model().objects.create(username="user1")
user2 = get_user_model().objects.create(username="user2")
name = "Test name"
created_at = timezone.now()
data = {
"entry1": "val1",
"entry2": "val2"
}
proj = Project(name=name, created=created_at, configuration=data)
proj.save()
# One document
document = Document.objects.create(project=proj)
document.project = proj
document.save()
# Two document
document2 = Document.objects.create(project=proj)
# User 1 as owner
user1.owns.add(proj)
user1.save()
# Refresh project model and serialize
proj.refresh_from_db()
serializer = ModelSerializer()
serialized_proj = serializer.serialize(proj, exclude_fields=set(["annotatorproject"]))
self.assertEqual(serialized_proj["name"], proj.name)
self.assertEqual(serialized_proj["configuration"], proj.configuration)
self.assertEqual(serialized_proj["owner"], user1.id)
self.assertEqual(serialized_proj["documents"], [document.id, document2.id])
def test_deserialize(self):
name = "Test name"
created_at = timezone.now()
data = {
"entry1": "val1",
"entry2": "val2"
}
user1 = get_user_model().objects.create(username="user1")
user2 = get_user_model().objects.create(username="user2")
proj = Project()
proj.save()
input_dict = {
"id": proj.id,
"name": name,
"created_at": created_at,
"data": data,
"owner": user1.id
}
serializer = ModelSerializer()
deserialized_obj = serializer.deserialize(Project, input_dict)
self.assertEqual(input_dict["name"], deserialized_obj.name)
self.assertEqual(input_dict["owner"], deserialized_obj.owner.id)
def test_clone_project(self):
project_fields = {
"description",
"annotator_guideline",
"configuration",
"annotations_per_doc",
"annotator_max_annotation",
"allow_document_reject",
"allow_annotation_change",
"annotation_timeout",
"document_input_preview",
"document_input_preview_csv",
"document_id_field",
"has_training_stage",
"has_test_stage",
"can_annotate_after_passing_training_and_test",
"min_test_pass_threshold",
"document_gold_standard_field",
"document_pre_annotation_field",
}
clone_prefix = "Test project prefix "
# Check fields must match exactly
fields = Project.get_project_config_fields({"name", "owner", "id", "created", "uuid"})
field_name_set = set()
for field in fields:
field_name_set.add(field.name)
self.assertSetEqual(project_fields, field_name_set)
project = Project.objects.create(name="Testname",
description="Test desc",
annotator_guideline="Test annotator guideline",
configuration=[
{
"name": "sentiment",
"title": "Sentiment",
"type": "radio",
"options": {
"positive": "Positive",
"negative": "Negative",
"neutral": "Neutral"
}
},
{
"name": "reason",
"title": "Reason for your stated sentiment",
"type": "textarea"
}
],
annotations_per_doc=3,
annotator_max_annotation=0.3,
allow_document_reject=False,
allow_annotation_change=False,
annotation_timeout=809,
document_input_preview={"test": "testest"},
document_id_field="test_field_name",
has_training_stage=False,
has_test_stage=False,
can_annotate_after_passing_training_and_test=False,
min_test_pass_threshold=0.9,
document_gold_standard_field="test_fname",
)
cloned_project = project.clone(clone_name_prefix=clone_prefix)
# Check ID and name
self.assertNotEqual(project.id, cloned_project.id)
self.assertEqual(clone_prefix + project.name, cloned_project.name)
# Check cloned parameters
for field_name in project_fields:
self.assertEqual(getattr(project, field_name), getattr(cloned_project, field_name))
class TestAnnotationModel(ModelTestCase):
def test_model_fields(self):
self.check_model_fields(Annotation, {
"user": models.ForeignKey,
"document": models.ForeignKey,
"times_out_at": models.DateTimeField,
"created": models.DateTimeField,
"status": models.IntegerField,
"status_time": models.DateTimeField,
})
def test_model_data(self):
"""Getting and setting data to the annotation model"""
project = Project.objects.create(name="Test")
document = Document.objects.create(project=project)
user = get_user_model().objects.create(username="test1")
annotation = Annotation.objects.create(document=document, user=user)
annotation_test_data = {
"id": 10,
"label": "Test annotation content"
}
annotation_test_data_changed = {
"id": 50,
"label": "Test annotation content 2"
}
annotation.data = annotation_test_data
self.assertDictEqual(annotation_test_data, annotation.data, "Contents of returned dict should be the same")
annotation.data = annotation_test_data_changed
self.assertDictEqual(annotation_test_data_changed, annotation.data,
"Contents of returned dict should be the same")
anno_db_fetch = Annotation.objects.get(pk=annotation.id)
self.assertDictEqual(annotation_test_data_changed, anno_db_fetch.data,
"Contents of returned dict should be the smae")
def test_timeout_check(self):
num_already_timedout = 12
num_completed = 50
num_rejected = 4
num_to_timeout = 23
current_time = timezone.now()
timeout_time = current_time + timedelta(minutes=30)
timeout_check_time = current_time + timedelta(minutes=31)
annotator = get_user_model().objects.create(username="test1")
project = Project.objects.create()
document = Document.objects.create(project=project)
for i in range(num_already_timedout):
Annotation.objects.create(document=document,
user=annotator,
times_out_at=timeout_time,
status=Annotation.TIMED_OUT,
status_time=timeout_time)
for i in range(num_completed):
Annotation.objects.create(document=document,
user=annotator,
times_out_at=timeout_time,
status=Annotation.COMPLETED,
status_time=timeout_time)
for i in range(num_rejected):
Annotation.objects.create(document=document,
user=annotator,
times_out_at=timeout_time,
status=Annotation.REJECTED,
status_time=timeout_time)
for i in range(num_to_timeout):
Annotation.objects.create(document=document,
user=annotator,
times_out_at=timeout_time)
self.assertEqual(num_already_timedout + num_completed + num_rejected + num_to_timeout,
Annotation.objects.all().count(), "Must have this many number of annotations in total")
num_timed_out = Annotation.check_for_timed_out_annotations(current_time + timedelta(minutes=15))
self.assertEqual(0, num_timed_out, "There should be no timed out annotations yet")
num_timed_out = Annotation.check_for_timed_out_annotations(timeout_check_time)
self.assertEqual(num_to_timeout, num_timed_out, "Number of timed annotations to timeout must be equal")
num_timed_out = Annotation.check_for_timed_out_annotations(timeout_check_time + timedelta(hours=1))
self.assertEqual(0, num_timed_out, "Must not be anymore annotations to timeout")
def test_change_annotation(self):
original_annotation = {"label1": "value1"}
changed_annotation = {"label1": "value2"}
changed_annotation2 = {"label1": "value3"}
project = Project.objects.create()
document = Document.objects.create(project=project)
annotator = get_user_model().objects.create(username="Testannotator")
annotator2 = get_user_model().objects.create(username="Testannotator2")
annotation = Annotation.objects.create(document=document, user=annotator)
with self.assertRaises(RuntimeError):
# Error should be raised if annotation not already completed
annotation.change_annotation(changed_annotation, annotator2)
# Complete the annotation
annotation.complete_annotation(original_annotation)
self.assertDictEqual(original_annotation, annotation.data, "Expects the original annotation")
# Change the annotation by setting the data property
annotation.data = changed_annotation
annotation.refresh_from_db()
self.assertDictEqual(changed_annotation, annotation.data, "Expects the first annotation change")
self.assertEqual(annotator,
annotation.latest_annotation_history().changed_by,
"Changed by the owner of the annotation object by default")
# Change the annotation using change_annotation method
annotation.change_annotation(changed_annotation2, annotator2)
self.assertDictEqual(changed_annotation2, annotation.data, "Expects the second annotation change")
self.assertEqual(annotator2,
annotation.latest_annotation_history().changed_by,
"Should be changed by annotator2")
def test_get_annotations_for_user_in_project(self):
num_projects = 3
num_annotators = 4
num_annotations_for_annotator = 4
projects = [Project.objects.create() for i in range(num_projects)]
annotators = [get_user_model().objects.create(username=f"Testannotator{i}") for i in range(num_annotators)]
doc_types = [DocumentType.ANNOTATION, DocumentType.TRAINING, DocumentType.TEST]
anno_status = [Annotation.PENDING, Annotation.COMPLETED,
Annotation.REJECTED, Annotation.ABORTED, Annotation.TIMED_OUT]
# Create annotations for all annotators
for project in projects:
for annotator in annotators:
for i in range(num_annotations_for_annotator):
for doc_type in doc_types:
for status in anno_status:
doc = Document.objects.create(project=project, doc_type=doc_type)
Annotation.objects.create(user=annotator, document=doc, status=status)
# Check for all annotators and projects
for project in projects:
for annotator in annotators:
# Shows 4 pending and 4 completed annotations
self.assertEqual(num_annotations_for_annotator * 2,
len(Annotation.get_annotations_for_user_in_project(annotator.pk, project.pk)))
self.assertEqual(num_annotations_for_annotator * 2,
len(Annotation.get_annotations_for_user_in_project(annotator.pk,
project.pk,
DocumentType.TEST)))
self.assertEqual(num_annotations_for_annotator * 2,
len(Annotation.get_annotations_for_user_in_project(annotator.pk,
project.pk,
DocumentType.TRAINING)))
class TestDocumentAnnotationModelExport(TestCase):
def setUp(self):
self.test_user = get_user_model().objects.create(username="project_creator")
self.annotators = [get_user_model().objects.create(username=f"anno{i}") for i in range(3)]
self.project = Project.objects.create(owner=self.test_user)
for i in range(10):
document = Document.objects.create(
project=self.project,
data={
"id": i,
"text": f"Text {i}",
"feature1": "Testvalue 1",
"feature2": "Testvalue 1",
"feature3": "Testvalue 1",
}
)
for annotator in self.annotators:
annotation = Annotation.objects.create(user=annotator,
document=document,
status=Annotation.COMPLETED,
)
annotation.data = {
"text1": "Value1",
"checkbox1": ["val1", "val2", "val3"],
}
annotation_pending = Annotation.objects.create(user=annotator,
document=document,
status=Annotation.PENDING)
annotation_timed_out = Annotation.objects.create(user=annotator,
document=document,
status=Annotation.TIMED_OUT)
annotation_reject = Annotation.objects.create(user=annotator,
document=document,
status=Annotation.REJECTED)
annotation_aborted = Annotation.objects.create(user=annotator,
document=document,
status=Annotation.ABORTED)
self.project.refresh_from_db()
def test_export_raw(self):
for document in self.project.documents.all():
doc_dict = document.get_doc_annotation_dict("raw")
print(doc_dict)
self.assertTrue("id" in doc_dict)
self.assertTrue("text" in doc_dict)
self.assertTrue("feature1" in doc_dict)
self.assertTrue("feature2" in doc_dict)
self.assertTrue("feature3" in doc_dict)
self.check_raw_gate_annotation_formatting(doc_dict)
def test_export_gate(self):
for document in self.project.documents.all():
doc_dict = document.get_doc_annotation_dict("gate")
print(doc_dict)
self.assertTrue("text" in doc_dict)
self.assertTrue("features" in doc_dict)
doc_features = doc_dict["features"]
self.assertTrue("id" in doc_features)
self.assertTrue("feature1" in doc_features)
self.assertTrue("feature2" in doc_features)
self.assertTrue("feature3" in doc_features)
self.check_raw_gate_annotation_formatting(doc_dict)
def check_raw_gate_annotation_formatting(self, doc_dict):
self.assertTrue("annotation_sets" in doc_dict)
self.assertTrue(len(doc_dict["annotation_sets"]) == 3)
# Test annotation formatting
for aset_key, aset_data in doc_dict["annotation_sets"].items():
self.assertTrue("name" in aset_data)
self.assertTrue("annotations" in aset_data)
self.assertEqual(len(aset_data["annotations"]), 1)
anno_dict = aset_data["annotations"][0]
self.assertTrue("type" in anno_dict)
self.assertTrue("start" in anno_dict)
self.assertTrue("end" in anno_dict)
self.assertTrue("id" in anno_dict)
self.assertTrue("features" in anno_dict)
self.assertTrue("label" in anno_dict["features"])
label_dict = anno_dict["features"]["label"]
self.assertTrue("text1" in label_dict)
self.assertTrue("checkbox1" in label_dict)
def test_export_csv(self):
for document in self.project.documents.all():
doc_dict = document.get_doc_annotation_dict("csv")
print(doc_dict)
self.assertTrue("id" in doc_dict)
self.assertTrue("text" in doc_dict)
self.assertTrue("feature1" in doc_dict)
self.assertTrue("feature2" in doc_dict)
self.assertTrue("feature3" in doc_dict)
self.assertTrue("annotations" in doc_dict)
self.assertTrue(len(doc_dict["annotations"]) == 3)
anno_set_dict = doc_dict["annotations"]
for set_key in anno_set_dict:
self.assertTrue(isinstance(anno_set_dict[set_key]["text1"], str))
self.assertTrue(isinstance(anno_set_dict[set_key]["checkbox1"], str))
def test_export_raw_anonymized(self):
for document in self.project.documents.all():
doc_dict = document.get_doc_annotation_dict("raw", anonymize=True)
for aset_key, aset_data in doc_dict["annotation_sets"].items():
self.assertTrue(isinstance(aset_data.get("name", None), int))
def test_export_raw_deanonymized(self):
for document in self.project.documents.all():
doc_dict = document.get_doc_annotation_dict("raw", anonymize=False)
for aset_key, aset_data in doc_dict["annotation_sets"].items():
self.assertTrue(isinstance(aset_data.get("name", None), str))
def test_export_gate_anonymized(self):
for document in self.project.documents.all():
doc_dict = document.get_doc_annotation_dict("gate", anonymize=True)
for aset_key, aset_data in doc_dict["annotation_sets"].items():
self.assertTrue(isinstance(aset_data.get("name", None), int))
def test_export_gate_deanonymized(self):
for document in self.project.documents.all():
doc_dict = document.get_doc_annotation_dict("gate", anonymize=False)
for aset_key, aset_data in doc_dict["annotation_sets"].items():
self.assertTrue(isinstance(aset_data.get("name", None), str))
| 55,876 | 44.688471 | 120 | py |
gate-teamware | gate-teamware-master/backend/tests/test_telemetry.py | from urllib.parse import urljoin
from django.test import TestCase
from django.conf import settings
import requests
import requests_mock
from backend.management.commands import load_test_fixture
from backend.utils.telemetry import TelemetrySender
from backend.models import Project
@requests_mock.Mocker()
class TestTelemetrySender(TestCase):
def setUp(self):
load_test_fixture.project_with_annotators()
settings.TELEMETRY_ON = True
def test_telemetry_sender(self, mocker):
"""Tests telemetry sender."""
proj = Project.objects.first()
ts = TelemetrySender("completed", { 'uuid': 'mock-uuid'})
mocker.post(ts.url, status_code=201) # set up mocker for http post request
ts.send()
assert ts.http_status_code == 201
mocker.post(ts.url, status_code=500)
ts.send()
assert ts.http_status_code == 500
def test_project_completion_telemetry(self, mocker):
"""Tests telemetry sending when project is completed."""
url = urljoin(settings.TELEMETRY_BASE_URL, settings.TELEMETRY_PATH)
mocker.post(url, status_code=201)
proj = Project.objects.first()
assert proj.is_completed
proj.check_project_complete()
# Assert that the http request was sent
assert mocker.called == True
# get the data that was sent
sent_data = mocker.last_request.json()
assert sent_data["product"] == "teamware"
assert sent_data.get("uuid", None) is not None
assert sent_data["status"] == "complete"
assert sent_data["documents"] == 20
assert sent_data["completed_tasks"] == 80
def test_project_deletion_telemetry(self, mocker):
"""Tests telemetry sending when project is deleted."""
url = urljoin(settings.TELEMETRY_BASE_URL, settings.TELEMETRY_PATH)
mocker.post(url, status_code=201)
proj = Project.objects.first()
proj.delete()
# Assert that the http request was sent
assert mocker.called == True
# get the data that was sent
sent_data = mocker.last_request.json()
assert sent_data["product"] == "teamware"
assert sent_data["status"] == "deleted"
assert sent_data["documents"] == 20
# check that project is still deleted even if telemetry fails
with self.assertRaises(Project.DoesNotExist):
Project.objects.get(id=proj.id)
@requests_mock.Mocker()
class TestFailingTelemetry(TestCase):
def setUp(self):
load_test_fixture.project_with_annotators()
settings.TELEMETRY_ON = True
def test_failing_telemetry(self, mocker):
url = urljoin(settings.TELEMETRY_BASE_URL, settings.TELEMETRY_PATH)
# set up mocker for http post request with timeout
mocker.post(url, exc=requests.exceptions.ConnectTimeout)
proj = Project.objects.first()
# delete a project, triggering a telemetry call
with self.assertRaises(requests.exceptions.ConnectTimeout):
proj.delete()
# check that project is still deleted even if telemetry fails
with self.assertRaises(Project.DoesNotExist):
Project.objects.get(id=proj.id)
| 3,260 | 30.057143 | 82 | py |
gate-teamware | gate-teamware-master/backend/tests/__init__.py | 0 | 0 | 0 | py |
|
gate-teamware | gate-teamware-master/backend/tests/test_migrations.py | from django.utils import timezone
from django.db import connection
from django.db.migrations.executor import MigrationExecutor
from django.test.testcases import TransactionTestCase
class MigrationTestCase(TransactionTestCase):
"""A Test case for testing migrations"""
# These must be defined by subclasses.
migrate_from = None
migrate_to = None
def setUp(self):
super(MigrationTestCase, self).setUp()
self.executor = MigrationExecutor(connection)
self.executor.migrate(self.migrate_from)
def migrate_to_dest(self):
self.executor.loader.build_graph() # reload.
self.executor.migrate(self.migrate_to)
@property
def old_apps(self):
return self.executor.loader.project_state(self.migrate_from).apps
@property
def new_apps(self):
return self.executor.loader.project_state(self.migrate_to).apps
class TestMigrationToAnnotatorProject_0015(MigrationTestCase):
migrate_from = [('backend', "0014_annotation_time_to_complete")]
migrate_to = [('backend', "0015_auto_20220318_1620")]
def test_migration_to_0015(self):
# Setup code
User = self.old_apps.get_model('backend', "ServiceUser")
Project = self.old_apps.get_model('backend', "Project")
Document = self.old_apps.get_model('backend', "Document")
Annotation = self.old_apps.get_model('backend', "Annotation")
# Create a project
test_projectname = "Test project1"
project = Project.objects.create(name=test_projectname)
project2 = Project.objects.create()
project2_id = project2.id
project3 = Project.objects.create()
project3_id = project3.id
# Create 10 users and add to project
num_project_users = 10
for i in range(num_project_users):
User.objects.create(username=f"user{i}", annotates_id=project.id)
# Create more users with no project
num_non_project_users = 15
for i in range(num_non_project_users):
User.objects.create(username=f"nonprojuser{i}")
# Create annotated documents for project 2 and 3 for user0 to test
# that they're also migrated to projects they're currently not active in
user0 = User.objects.get(username="user0")
doc1 = Document.objects.create(project_id=project2_id)
Annotation.objects.create(document_id=doc1.id, user_id=user0.id)
doc2 = Document.objects.create(project_id=project3_id)
Annotation.objects.create(document_id=doc2.id, user_id=user0.id)
# Perform migration
self.migrate_to_dest()
User = self.new_apps.get_model('backend', "ServiceUser")
Project = self.new_apps.get_model('backend', "Project")
AnnotatorProject = self.new_apps.get_model('backend', 'AnnotatorProject')
# assertions
user = User.objects.get(username="user0")
# User project associations
self.assertEqual(3, user.annotates.count(), "User must be associated with 3 projects")
# user0 must have 1 active project
self.assertEqual(1, AnnotatorProject.objects.filter(annotator_id=user.id, status=0).count())
for ap in AnnotatorProject.objects.filter(annotator_id=user.id, status=0):
self.assertEqual(test_projectname, ap.project.name, f"Project name must be {test_projectname}")
# user0 must have 2 inactive project
self.assertEqual(2, AnnotatorProject.objects.filter(annotator_id=user.id, status=1).count())
project = Project.objects.get(name=test_projectname)
self.assertEqual(num_project_users, project.annotators.count(), f"Project must have {num_project_users} users")
class TestMigrationToAnnotationChangeHistory_0024(MigrationTestCase):
migrate_from = [('backend', "0023_annotationchangehistory")]
migrate_to = [('backend', "0024_rename_data_annotation__data")]
def test_migration_to_0024(self):
# Setup code
User = self.old_apps.get_model('backend', "ServiceUser")
Project = self.old_apps.get_model('backend', "Project")
Document = self.old_apps.get_model('backend', "Document")
Annotation = self.old_apps.get_model('backend', "Annotation")
# Create annotations with contents for 10 projects with 10 documents, 1 annotation each
for i in range(10):
project = Project.objects.create()
for j in range(10):
document = Document.objects.create(project_id=project.id)
Annotation.objects.create(document_id=document.id, data={"label1": "value1"})
# Perform migration
self.migrate_to_dest()
Annotation = self.new_apps.get_model('backend', "Annotation")
for annotation in Annotation.objects.all():
if annotation._data:
self.assertDictEqual(annotation._data, annotation.change_history.last().data)
| 4,928 | 37.507813 | 119 | py |
gate-teamware | gate-teamware-master/backend/tests/test_rpc_server.py | from django.db import transaction
from django.contrib.auth import get_user_model
from django.test import TestCase, Client
import json
from django.test.utils import TZ_SUPPORT
from django.contrib.auth.models import AnonymousUser
from django.test.client import RequestFactory
from backend.models import Annotation, Document, Project
from backend.rpcserver import rpc_method, rpc_method_auth, AuthError, rpc_method_manager, rpc_method_admin, \
UNAUTHORIZED_ERROR, AUTHENTICATION_ERROR, JSONRPCEndpoint
import backend.rpcserver
@rpc_method
def rpc_test_function_listing(request, param1, param2=30, param3="Test"):
""" Used in the testing of endpoints listing """
return param1 + param2
@rpc_method
def rpc_test_add_func(request, a, b):
return a+b
@rpc_method_auth
def rpc_test_need_auth(request):
return 10
@rpc_method_manager
def rpc_test_need_manager(request):
return 10
@rpc_method_admin
def rpc_test_need_admin(request):
return 10
@rpc_method
def rpc_test_raise_auth_error(request):
raise AuthError("Raised to test authentication error handling")
@rpc_method_auth
def rpc_test_raise_permission_error(request):
raise PermissionError("Thrown to test permission error handling")
@rpc_method
@transaction.atomic
def rpc_test_django_atomic(request, a, b):
return a+b
@rpc_method
def rpc_endpoint_for_test_call(request, a, b):
return a + b
class TestEndpoint(TestCase):
username = "testuser"
password = "123456789"
user_email = "[email protected]"
factory = RequestFactory()
user = None
client = None
def get_default_user(self):
if not self.user:
self.user = get_user_model().objects.create(username=self.username,
password=self.password,
email=self.user_email,
is_staff=True)
self.user.set_password(self.password)
self.user.save()
return self.user
def get_request(self):
request = self.factory.get("/")
request.user = AnonymousUser()
return request
def get_loggedin_request(self):
request = self.factory.get("/")
request.user = self.get_default_user()
return request
def get_client(self):
if not self.client:
self.client = Client()
return self.client
def get_loggedin_client(self):
client = self.get_client()
user = self.get_default_user()
self.assertTrue(client.login(username=self.username, password=self.password))
return client
def call_rpc(self, client, method_name, *params):
response = client.post("/rpc/", {
"jsonrpc": "2.0",
"id": 0,
"method": method_name,
"params": list(params)
}, content_type="application/json")
return response
class TestTestEndpoint(TestCase):
def test_call_rpc(self):
e = TestEndpoint()
c = e.get_client()
response = e.call_rpc(c, "rpc_endpoint_for_test_call", 2, 3)
self.assertEqual(response.status_code, 200)
msg = json.loads(response.content)
self.assertEqual(msg["result"], 5)
def test_client_loggedin(self):
e = TestEndpoint()
self.assertIsNotNone(e.get_loggedin_client())
class TestRPCServer(TestEndpoint):
def test_rpc_server(self):
username = "testuser"
user_pass = "123456789"
user = get_user_model().objects.create(username=username)
user.set_password(user_pass)
user.save()
c = Client()
# Blank message
response = c.post("/rpc/", {}, content_type="application/json")
msg = json.loads(response.content)
self.assertEqual(response.status_code, 400)
self.assertEqual(msg["error"]["code"], backend.rpcserver.INVALID_REQUEST)
# Function that doesn't exist
response = c.post("/rpc/", {"jsonrpc": "2.0", "method": "idontexist"}, content_type="application/json")
msg = json.loads(response.content)
self.assertEqual(response.status_code, 405)
self.assertEqual(msg["error"]["code"], backend.rpcserver.METHOD_NOT_FOUND)
# Invalid params
response = c.post("/rpc/", {"jsonrpc": "2.0", "method": "rpc_test_add_func"}, content_type="application/json")
msg = json.loads(response.content)
self.assertEqual(response.status_code, 400)
self.assertEqual(msg["error"]["code"], backend.rpcserver.INVALID_PARAMS)
# Valid formed request
response = c.post("/rpc/",
{"jsonrpc": "2.0", "method": "rpc_test_add_func", "params": [30, 40], "id": 20},
content_type="application/json")
msg = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertEqual(msg["result"], 30+40)
self.assertEqual(msg["id"], 20)
# Raising auth error from inside function
response = c.post("/rpc/",
{"jsonrpc": "2.0", "method": "rpc_test_raise_auth_error", "id": 20},
content_type="application/json")
msg = json.loads(response.content)
self.assertEqual(response.status_code, 401)
self.assertEqual(msg["error"]["code"], backend.rpcserver.AUTHENTICATION_ERROR)
# Raising permission error form inside function
response = c.post("/rpc/",
{"jsonrpc": "2.0", "method": "rpc_test_raise_permission_error", "id": 20},
content_type="application/json")
msg = json.loads(response.content)
self.assertEqual(response.status_code, 401)
self.assertEqual(msg["error"]["code"], backend.rpcserver.AUTHENTICATION_ERROR)
# Needs authentication
response = c.post("/rpc/",
{"jsonrpc": "2.0", "method": "rpc_test_need_auth", "id": 20},
content_type="application/json")
msg = json.loads(response.content)
self.assertEqual(response.status_code, 401)
self.assertEqual(msg["error"]["code"], backend.rpcserver.AUTHENTICATION_ERROR)
# Authenticated
loggedin = c.login(username=username, password=user_pass)
self.assertTrue(loggedin)
response = c.post("/rpc/",
{"jsonrpc": "2.0", "method": "rpc_test_need_auth", "id": 20},
content_type="application/json")
msg = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertEqual(msg["result"], 10)
# Raising permission error from inside function after logged in
response = c.post("/rpc/",
{"jsonrpc": "2.0", "method": "rpc_test_raise_permission_error", "id": 20},
content_type="application/json")
msg = json.loads(response.content)
self.assertEqual(response.status_code, 401)
self.assertEqual(msg["error"]["code"], backend.rpcserver.UNAUTHORIZED_ERROR)
def test_rpc_django_atomic(self):
c = Client()
response = c.post("/rpc/",
{"jsonrpc": "2.0", "method": "rpc_test_django_atomic", "params": [30, 40], "id": 20},
content_type="application/json")
msg = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertEqual(msg["result"], 30 + 40)
self.assertEqual(msg["id"], 20)
def test_endpoint_auth_and_permissions(self):
client = Client()
annotator_username = "annotator"
manager_username = "manager"
admin_username = "admin"
password = "12345678test*"
annotator_user = get_user_model().objects.create(username=annotator_username,
email="[email protected]",
is_account_activated=True)
annotator_user.set_password(password)
annotator_user.save()
manager_user = get_user_model().objects.create(username=manager_username,
email="[email protected]",
is_account_activated=True,
is_manager=True)
manager_user.set_password(password)
manager_user.save()
admin_user = get_user_model().objects.create(username=admin_username,
email="[email protected]",
is_account_activated=True,
is_staff=True,
is_superuser=True,
is_manager=True)
admin_user.set_password(password)
admin_user.save()
# Not logged in
response = self.call_rpc(client, "rpc_test_need_auth")
self.assertEqual(response.status_code, 401)
msg = json.loads(response.content)
self.assertEqual(msg["error"]["code"], AUTHENTICATION_ERROR)
response = self.call_rpc(client, "rpc_test_need_manager")
self.assertEqual(response.status_code, 401)
msg = json.loads(response.content)
self.assertEqual(msg["error"]["code"], AUTHENTICATION_ERROR)
response = self.call_rpc(client, "rpc_test_need_admin")
self.assertEqual(response.status_code, 401)
msg = json.loads(response.content)
self.assertEqual(msg["error"]["code"], AUTHENTICATION_ERROR)
# Logged in as annotator
self.assertTrue(client.login(username=annotator_username, password=password))
response = self.call_rpc(client, "rpc_test_need_auth")
self.assertEqual(response.status_code, 200)
response = self.call_rpc(client, "rpc_test_need_manager")
self.assertEqual(response.status_code, 401)
msg = json.loads(response.content)
self.assertEqual(msg["error"]["code"], UNAUTHORIZED_ERROR)
response = self.call_rpc(client, "rpc_test_need_admin")
self.assertEqual(response.status_code, 401)
msg = json.loads(response.content)
self.assertEqual(msg["error"]["code"], UNAUTHORIZED_ERROR)
# Logged in as manager
self.assertTrue(client.login(username=manager_username, password=password))
response = self.call_rpc(client, "rpc_test_need_auth")
self.assertEqual(response.status_code, 200)
response = self.call_rpc(client, "rpc_test_need_manager")
self.assertEqual(response.status_code, 200)
response = self.call_rpc(client, "rpc_test_need_admin")
self.assertEqual(response.status_code, 401)
msg = json.loads(response.content)
self.assertEqual(msg["error"]["code"], UNAUTHORIZED_ERROR)
# Logged in as admin
self.assertTrue(client.login(username=admin_username, password=password))
response = self.call_rpc(client, "rpc_test_need_auth")
self.assertEqual(response.status_code, 200)
response = self.call_rpc(client, "rpc_test_need_manager")
self.assertEqual(response.status_code, 200)
response = self.call_rpc(client, "rpc_test_need_admin")
self.assertEqual(response.status_code, 200)
def test_endpoint_listing(self):
listing = JSONRPCEndpoint.endpoint_listing()
print(listing)
| 11,628 | 37.003268 | 118 | py |
gate-teamware | gate-teamware-master/backend/utils/telemetry.py | import json
import logging
from threading import Thread
import requests
from urllib.parse import urljoin
from django.conf import settings
log = logging.getLogger(__name__)
class TelemetrySender:
def __init__(self, status: str, data: dict) -> None:
self.url = urljoin(settings.TELEMETRY_BASE_URL, settings.TELEMETRY_PATH)
self.data = data
self.data.update({"product": "teamware", "status": status})
self.http_status_code = None
def send(self):
"""
Makes a post request to the telemetry server containing a dict as json data, if telemetry is switched on.
"""
if settings.TELEMETRY_ON:
self.thread = Thread(target=self._post_request)
self.thread.run()
else:
log.info(f"Telemetry is switched off. Not sending telemetry data for project {self.data['uuid']}.")
def _post_request(self):
log.info(f"Sending telemetry data for project {self.data['uuid']} to {self.url}.")
r = requests.post(self.url, json=self.data)
self.http_status_code = r.status_code
log.info(f"{self.http_status_code}: {r.text}")
| 1,165 | 34.333333 | 113 | py |
gate-teamware | gate-teamware-master/backend/utils/misc.py | import string
import random
def get_value_from_key_path(obj_dict, key_path, delimiter="."):
"""
Gets value from a dictionary following a delimited key_path. Does not work for path with array elements.
:returns: None if path does not exist.
"""
if key_path is None:
return None
key_path_split = key_path.split(delimiter)
current_value = obj_dict
for key in key_path_split:
if type(current_value) is dict and key in current_value:
current_value = current_value[key]
else:
return None
return current_value
def insert_value_to_key_path(obj_dict, key_path, value, delimiter="."):
"""
Insert value into a dictionary following a delimited key_path. Does not work for path with array elements.
Returns True if key can be inserted.
"""
key_path_split = key_path.split(delimiter)
key_path_length = len(key_path_split)
current_dict = obj_dict
for index, key in enumerate(key_path_split):
if index < key_path_length - 1:
# Insert dict if doesn't exist
if key not in current_dict:
current_dict[key] = {}
if type(current_dict[key]) is dict:
current_dict = current_dict[key]
else:
return False
else:
current_dict[key] = value
return True
return False
def read_custom_document(path):
"""
Reads in a text file and returns as a string.
Primarily used for reading in custom privacy policy and/or terms & conditions documents.
"""
with open(path) as file:
doc_str = file.read()
return doc_str
def generate_random_string(length) -> string:
"""
Generates random ascii string of lowercase, uppercase and digits of length
@param length Length of the generated random string
@return Generated random string
"""
use_characters = string.ascii_letters + string.digits
return ''.join([random.choice(use_characters) for i in range(length)])
| 2,038 | 29.893939 | 110 | py |
gate-teamware | gate-teamware-master/backend/utils/__init__.py | 0 | 0 | 0 | py |
|
gate-teamware | gate-teamware-master/backend/utils/serialize.py | import logging
import json
from datetime import datetime
from django.db import models
from django.db.models import Model, ManyToOneRel, ManyToManyRel, ForeignKey
from django.utils import timezone
from backend.models import Project
log = logging.getLogger(__name__)
def dsl_val(attr_name, obj, data):
"""
Insert value of `data` with key `attr_name` into the object `obj` with attribute name `attr_name`
"""
if attr_name in data:
setattr(obj, attr_name, data[attr_name])
def dsl_json(attr_name, obj, data):
"""
Convert value of `data` with key `attr_name` into a JSON string and insert into the
object `obj` with attribute name `attr_name`
"""
if attr_name in data:
setattr(obj, attr_name, json.dumps(data[attr_name]))
def dsl_date(attr_name, obj, data):
"""
Convert value of `data` with key `attr_name` into a datetime object and insert into the
object `obj` with attribute name `attr_name`
"""
if attr_name in data:
if data[attr_name] is None:
setattr(obj, attr_name, None)
elif isinstance(data[attr_name], str):
setattr(obj, attr_name, datetime.fromisoformat(data[attr_name]))
elif isinstance(data[attr_name], datetime):
setattr(obj, attr_name, data[attr_name])
else:
raise ValueError("Date must be None, str or datetime object")
class FieldSerializer:
def serialize(self, model_obj, field):
return getattr(model_obj, field.name)
def deserialize(self, model_obj, val_input, field):
setattr(model_obj, field.name, val_input)
class ForeignKeySerializer(FieldSerializer):
def serialize(self, model_obj, field):
related_obj = getattr(model_obj, field.name)
if not related_obj:
return None
return related_obj.id
def deserialize(self, model_obj, val_input, field):
rel_obj = None
if val_input:
rel_obj = field.related_model.objects.get(pk=val_input)
setattr(model_obj, field.name, rel_obj)
class RelationSerializer(FieldSerializer):
def serialize(self, model_obj, field):
relation_objs = getattr(model_obj, field.name).all().values_list('id', flat=True)
return [rel_obj for rel_obj in relation_objs]
def deserialize(self, model_obj, val_input, field):
pass # TODO ? Might be better to manage these relations in a separate field
class ModelSerializer:
def __init__(self):
"""
field_serializer:dict Use special serializer (subclass of FieldSerializer) for the specified field name e.g. {"my_json_field": JSONFieldSerializer}
field_relation_spec:dict Serialize one-to-many or many-to-many relations. The spec allows declarations of
fields in the related object to serialize e.g.
{
"my_relation_field": {
"field": {"id", "name", ...}
}
}
"""
self._field_serializer = FieldSerializer()
self._relation_serializer = RelationSerializer()
self._foreign_key_serializer = ForeignKeySerializer()
self.field_serializers = {}
self.field_relation_spec = {}
self.serializer_dict = {
ManyToManyRel: self._relation_serializer,
ManyToOneRel: self._relation_serializer,
ForeignKey: self._foreign_key_serializer
}
def serialize(self, model_obj: Model, select_fields: set = None, exclude_fields: set = None):
if not model_obj or not isinstance(model_obj, Model):
raise ValueError("Must provide an instance of a Model to serialize")
output = {}
fields_to_serialize = self.get_field_names_to_serialize(model_obj, select_fields, exclude_fields)
# Value fields, foreign keys and fields with serializers
for field in model_obj.__class__._meta.get_fields():
if field.name in fields_to_serialize:
output[field.name] = self.serialize_field(model_obj, field)
return output
def get_field_names_to_serialize(self, model_obj: Model, select_fields: set, exclude_fields: set):
fields_to_serialize = select_fields
if not fields_to_serialize or len(fields_to_serialize) < 1:
fields_to_serialize = set()
for field in model_obj.__class__._meta.get_fields():
fields_to_serialize.add(field.name)
if exclude_fields:
for exclude_name in exclude_fields:
fields_to_serialize.remove(exclude_name)
return fields_to_serialize
def serialize_field(self, model_obj: Model, field):
field_serializer = self.get_field_serializer(field)
return field_serializer.serialize(model_obj, field)
def get_field_serializer(self, django_field):
if django_field.__class__ in self.serializer_dict:
return self.serializer_dict[django_field.__class__]
else:
return FieldSerializer()
def deserialize(self, model_class, input_dict, select_fields: set = None, exclude_fields: set = None):
if not issubclass(model_class, Model):
raise ValueError(f"{model_class} must be subclass of django Model")
model_obj = model_class.objects.get(pk=input_dict["id"])
if not model_obj:
raise ValueError(f"No object with id {input_dict['id']}")
fields_to_serialize = self.get_field_names_to_serialize(model_obj, select_fields, exclude_fields)
# Value fields, foreign keys and fields with serializers
for field in model_obj.__class__._meta.get_fields():
if field.name in fields_to_serialize and field.name in input_dict:
self.deserialize_field(model_obj, input_dict[field.name], field)
model_obj.save()
return model_obj
def deserialize_field(self, model_obj, input_field, field):
field_serializer = self.get_field_serializer(field)
return field_serializer.deserialize(model_obj, input_field, field)
| 6,068 | 35.125 | 155 | py |
gate-teamware | gate-teamware-master/teamware/wsgi.py | """
WSGI config for teamware project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'teamware.settings.deployment')
application = get_wsgi_application()
| 404 | 22.823529 | 79 | py |
gate-teamware | gate-teamware-master/teamware/__init__.py | 0 | 0 | 0 | py |
|
gate-teamware | gate-teamware-master/teamware/urls.py | """teamware URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, re_path
from backend import views
from backend.rpcserver import JSONRPCEndpoint
urlpatterns = [
path('admin/', admin.site.urls),
path('rpc/', JSONRPCEndpoint.as_view()),
path('download_annotations/<int:project_id>/<str:doc_type>/<str:export_type>/<str:json_format>/<int:entries_per_file>/<str:anonymize>/', views.DownloadAnnotationsView.as_view()),
re_path('^.*$', views.MainView.as_view(), name="index"),
]
| 1,121 | 39.071429 | 182 | py |
gate-teamware | gate-teamware-master/teamware/asgi.py | """
ASGI config for teamware project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'teamware.settings')
application = get_asgi_application()
| 393 | 22.176471 | 78 | py |
gate-teamware | gate-teamware-master/teamware/settings/docker-test.py | """
Settings for Dockerised CI tests
"""
from .deployment import *
TELEMETRY_ON = False
FRONTEND_DEV_SERVER_USE = False
| 122 | 12.666667 | 32 | py |
gate-teamware | gate-teamware-master/teamware/settings/base.py | """
Django settings for teamware project.
The base.py file is loaded by default.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent.parent
SETTINGS_DIR = Path(__file__).resolve().parent
if 'DJANGO_SECRET_KEY' in os.environ:
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY')
else:
SECRET_KEY = 'django-insecure-+rh5#u6=19q90g$)e%ca&wpfjsju*5*=9b#ah2b&dlwpkx%4$o'
print("DEFAULT SECRET IS BEING USED!! This should only happen in development and automated testing")
if 'DB_USERNAME' in os.environ:
POSTGRES_USERNAME = os.environ.get('DB_USERNAME')
if 'DB_PASSWORD' in os.environ:
POSTGRES_PASSWORD = os.environ.get('DB_PASSWORD')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost', '127.0.0.1','0.0.0.0']
# Application definition
INSTALLED_APPS = [
'backend.apps.BackendConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'gmailapi_backend',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
]
ROOT_URLCONF = 'teamware.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS':
[
os.path.join(BASE_DIR, 'templates'),
os.path.join(BASE_DIR, 'backend/templates'),
os.path.join(BASE_DIR, 'frontend/templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'teamware.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get("DJANGO_DB_NAME", "teamware_db"),
"USER": os.environ.get("DB_USERNAME", "user"),
"PASSWORD": os.environ.get("DB_PASSWORD", "password"),
"HOST": os.environ.get("DB_HOST", "127.0.0.1"),
"PORT": os.environ.get("DB_PORT", "5432"),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "frontend/public/static"),
os.path.join(BASE_DIR, "frontend/dist/static"),
]
STATIC_ROOT = os.path.join(BASE_DIR, "static")
WEBPACK_LOADER = {
'DEFAULT': {
'CACHE': not DEBUG,
'BUNDLE_DIR_NAME': 'bundles/',
'STATS_FILE': os.path.join(BASE_DIR, 'frontend', 'webpack-stats.json')
}
}
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
AUTH_USER_MODEL = 'backend.ServiceUser'
CSRF_USE_SESSIONS = False
CSRF_COOKIE_HTTPONLY = False
APP_NAME = "GATE Teamware"
APP_URL = os.getenv('DJANGO_APP_URL', "http://127.0.0.1:8000")
# Admin email - The mail address to be used for contacting
# users of the system
ADMIN_EMAIL = os.getenv('DJANGO_ADMIN_EMAIL', '[email protected]')
# User account activation settings
ACTIVATION_URL_PATH = "/activate"
ACTIVATION_WITH_EMAIL = os.getenv('DJANGO_ACTIVATION_WITH_EMAIL', '').lower() in ['true', 'yes', 'on']
ACTIVATION_EMAIL_TIMEOUT_DAYS = 7
ACTIVATION_TOKEN_LENGTH = 128
# Password reset settings
PASSWORD_RESET_URL_PATH = "/passwordreset"
PASSWORD_RESET_TIMEOUT_HOURS = 10
PASSWORD_RESET_TOKEN_LENGTH = 128
"""
# Email Configuration - Specify e-mail backend here
# https://docs.djangoproject.com/en/3.2/topics/email/
# django-gmailapi-backend (https://github.com/dolfim/django-gmailapi-backend) is used for sending
# emails though Google's API. See documentation for more details.
"""
"""
Select the email backend to use
Emails are sent to local memory by default: django.core.mail.backends.locmem.EmailBackend
For SMTP: django.core.mail.backends.smtp.EmailBackend
For Gmail tokens: gmailapi_backend.mail.GmailBackend
"""
EMAIL_BACKEND = os.getenv('DJANGO_EMAIL_BACKEND', 'django.core.mail.backends.locmem.EmailBackend')
"""
Send e-mail through standard SMTP server. See [https://github.com/dolfim/django-gmailapi-backend](https://github.com/dolfim/django-gmailapi-backend)
for full list of configuration parameters.
"""
EMAIL_HOST = os.getenv('DJANGO_EMAIL_HOST', 'localhost')
EMAIL_PORT = int(os.getenv('DJANGO_EMAIL_PORT', 25))
if 'DJANGO_EMAIL_HOST_USER' in os.environ:
# If user is set then password must also, and we want to raise an
# exception if it's missing
EMAIL_HOST_USER = os.environ['DJANGO_EMAIL_HOST_USER']
EMAIL_HOST_PASSWORD = os.environ['DJANGO_EMAIL_HOST_PASSWORD']
if 'DJANGO_EMAIL_SECURITY' in os.environ:
if os.environ['DJANGO_EMAIL_SECURITY'].lower() == 'ssl':
EMAIL_USE_SSL = True
elif os.environ['DJANGO_EMAIL_SECURITY'].lower() == 'tls':
EMAIL_USE_TLS = True
else:
raise ValueError("DJANGO_EMAIL_SECURITY, if set, must be either SSL or TLS")
if 'DJANGO_EMAIL_CLIENT_CERTIFICATE' in os.environ:
# If certificate is set then key must also, and we want to raise an
# exception if it's missing
EMAIL_SSL_CERTFILE = os.environ['DJANGO_EMAIL_CLIENT_CERTIFICATE']
EMAIL_SSL_KEYFILE = os.environ['DJANGO_EMAIL_CLIENT_KEY']
"""
If sending e-mail through Gmail using Google's API, the following parameters must be set:
"""
GMAIL_API_CLIENT_ID = os.getenv('DJANGO_GMAIL_API_CLIENT_ID', 'google_assigned_id')
GMAIL_API_CLIENT_SECRET = os.getenv('DJANGO_GMAIL_API_CLIENT_SECRET', 'google_assigned_secret')
GMAIL_API_REFRESH_TOKEN = os.getenv('DJANGO_GMAIL_API_REFRESH_TOKEN', 'google_assigned_token')
# Telemetry settings
TELEMETRY_ON = False
TELEMETRY_BASE_URL = 'https://reports.gate.ac.uk/'
TELEMETRY_PATH = 'phone_home'
"""
Privacy Policy Settings
"""
CUSTOM_PP_DIR = 'custom-policies'
CUSTOM_PP_DOCUMENT_PATH = os.path.join(CUSTOM_PP_DIR, 'privacy-policy.md')
CUSTOM_TC_DOCUMENT_PATH = os.path.join(CUSTOM_PP_DIR, 'terms-and-conditions.md')
PRIVACY_POLICY = {
'HOST_NAME': os.getenv('PP_HOST_NAME', 'No name configured.'),
'HOST_ADDRESS': os.getenv('PP_HOST_ADDRESS', 'No address configured.'),
'HOST_CONTACT': os.getenv('PP_HOST_CONTACT', 'No contact link configured.'),
}
PRIVACY_POLICY.update({
'ADMIN_NAME': os.getenv('PP_ADMIN_NAME', PRIVACY_POLICY['HOST_NAME']),
'ADMIN_ADDRESS': os.getenv('PP_ADMIN_ADDRESS', PRIVACY_POLICY['HOST_ADDRESS']),
'ADMIN_CONTACT': os.getenv('PP_ADMIN_CONTACT', PRIVACY_POLICY['HOST_CONTACT'])
})
"""
Deleted user settings
"""
ALLOW_USER_DELETE = True # Whether to allow deleting user and their associated projects and data
DELETED_USER_USERNAME_PREFIX = "deleted"
DELETED_USER_USERNAME_HASH_LENGTH = 8
DELETED_USER_FIRSTNAME = "Deleted"
DELETED_USER_LASTNAME = "Deleted"
DELETED_USER_EMAIL_DOMAIN = "teamware-deleted.com"
"""
Frontend dev server configuration
"""
FRONTEND_DEV_SERVER_USE = True
| 8,634 | 30.746324 | 148 | py |
gate-teamware | gate-teamware-master/teamware/settings/test.py | """
Settings for local backend testing
"""
from .base import *
| 64 | 9.833333 | 34 | py |
gate-teamware | gate-teamware-master/teamware/settings/docker-integration.py | """
Settings for integration testing
Uses a clean database every time
"""
from .deployment import *
DATABASES['default']['NAME'] = "teamware_integration_db"
# Turn off e-mail activation for testing
ACTIVATION_WITH_EMAIL = False
TELEMETRY_ON = False
FRONTEND_DEV_SERVER_USE = False
| 285 | 18.066667 | 56 | py |
gate-teamware | gate-teamware-master/teamware/settings/deployment.py | import logging
import sys
import os
from .base import *
# Enable csrf in production
MIDDLEWARE.append(
'django.middleware.csrf.CsrfViewMiddleware'
)
DEBUG = (os.environ.get('DJANGO_DEBUG', "false").lower() in ['true', 'yes', 'on', '1'])
if 'DJANGO_ALLOWED_HOSTS' in os.environ:
# This looks a bit horrible, but the logic is split DJANGO_ALLOWED_HOSTS on
# commas, strip surrounding whitespace off each element, and filter out any
# remaining empty strings
ALLOWED_HOSTS.extend(host for host in (h.strip() for h in os.environ['DJANGO_ALLOWED_HOSTS'].split(',')) if host)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
},
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'stream': sys.stdout,
'formatter': 'verbose'
},
},
'loggers': {
'': {
'handlers': ['console'],
'level': 'INFO',
'propagate': True,
},
},
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get("DJANGO_DB_NAME", "teamware_db"),
"USER": os.environ.get("DB_USERNAME", "user"),
"PASSWORD": os.environ.get("DB_PASSWORD", "password"),
"HOST": os.environ.get("DB_HOST", "db"),
"PORT": os.environ.get("DB_PORT", "5432"),
}
}
TELEMETRY_ON = True
FRONTEND_DEV_SERVER_USE = False
| 1,550 | 25.741379 | 117 | py |
gate-teamware | gate-teamware-master/teamware/settings/__init__.py | 0 | 0 | 0 | py |
|
gate-teamware | gate-teamware-master/teamware/settings/integration.py | """
Settings for local integration testing
Uses a clean database every time
"""
from .base import *
DATABASES['default']['NAME'] = "teamware_integration_db"
# Turn off e-mail activation for testing
ACTIVATION_WITH_EMAIL = False
TELEMETRY_ON = False
| 253 | 17.142857 | 56 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/setup.py | import os
import os.path as osp
import shutil
import subprocess
import sys
import warnings
from setuptools import find_packages, setup
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
version_file = 'mmedit/version.py'
def get_git_hash():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
sha = out.strip().decode('ascii')
except OSError:
sha = 'unknown'
return sha
def get_hash():
if os.path.exists('.git'):
sha = get_git_hash()[:7]
elif os.path.exists(version_file):
try:
from mmedit.version import __version__
sha = __version__.split('+')[-1]
except ImportError:
raise ImportError('Unable to get git version')
else:
sha = 'unknown'
return sha
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import re
import sys
from os.path import exists
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
elif '@git+' in line:
info['package'] = line
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '<=', '==', '>', '<']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
def add_mim_extention():
"""Add extra files that are required to support MIM into the package.
These files will be added by creating a symlink to the originals if the
package is installed in `editable` mode (e.g. pip install -e .), or by
copying from the originals otherwise.
"""
# parse installment mode
if 'develop' in sys.argv:
# installed by `pip install -e .`
mode = 'symlink'
elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv:
# installed by `pip install .`
# or create source distribution by `python setup.py sdist`
mode = 'copy'
else:
return
filenames = ['tools', 'configs', 'demo', 'model-index.yml']
repo_path = osp.dirname(__file__)
mim_path = osp.join(repo_path, 'mmedit', '.mim')
os.makedirs(mim_path, exist_ok=True)
for filename in filenames:
if osp.exists(filename):
src_path = osp.join(repo_path, filename)
tar_path = osp.join(mim_path, filename)
if osp.isfile(tar_path) or osp.islink(tar_path):
os.remove(tar_path)
elif osp.isdir(tar_path):
shutil.rmtree(tar_path)
if mode == 'symlink':
src_relpath = osp.relpath(src_path, osp.dirname(tar_path))
try:
os.symlink(src_relpath, tar_path)
except OSError:
# Creating a symbolic link on windows may raise an
# `OSError: [WinError 1314]` due to privilege. If
# the error happens, the src file will be copied
mode = 'copy'
warnings.warn(
f'Failed to create a symbolic link for {src_relpath}, '
f'and it will be copied to {tar_path}')
else:
continue
if mode == 'copy':
if osp.isfile(src_path):
shutil.copyfile(src_path, tar_path)
elif osp.isdir(src_path):
shutil.copytree(src_path, tar_path)
else:
warnings.warn(f'Cannot copy file {src_path}.')
else:
raise ValueError(f'Invalid mode {mode}')
if __name__ == '__main__':
add_mim_extention()
setup(
name='mmedit',
version=get_version(),
description='OpenMMLab Image and Video Editing Toolbox and Benchmark',
long_description=readme(),
long_description_content_type='text/markdown',
maintainer='MMEditing Contributors',
maintainer_email='[email protected]',
keywords='computer vision, super resolution, video interpolation, '
'inpainting, matting, SISR, RefSR, VSR, GAN, VFI',
url='https://github.com/open-mmlab/mmediting',
packages=find_packages(exclude=('configs', 'tools', 'demo')),
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Image Processing',
],
license='Apache License 2.0',
install_requires=parse_requirements('requirements/runtime.txt'),
extras_require={
'all': parse_requirements('requirements.txt'),
'tests': parse_requirements('requirements/tests.txt'),
},
zip_safe=False)
| 8,503 | 34.286307 | 125 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tools/test.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.parallel import MMDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
from mmedit.apis import multi_gpu_test, set_random_seed, single_gpu_test
from mmedit.core.distributed_wrapper import DistributedDataParallelWrapper
from mmedit.datasets import build_dataloader, build_dataset
from mmedit.models import build_model
from mmedit.utils import setup_multi_processes
def parse_args():
parser = argparse.ArgumentParser(description='mmediting tester')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument('--out', help='output result pickle file')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results')
parser.add_argument(
'--save-path',
default=None,
type=str,
help='path to store images and if not given, will not save image')
parser.add_argument('--tmpdir', help='tmp dir for writing some results')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set multi-process settings
setup_multi_processes(cfg)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
rank, _ = get_dist_info()
# set random seeds
if args.seed is not None:
if rank == 0:
print('set random seed to', args.seed)
set_random_seed(args.seed, deterministic=args.deterministic)
# build the dataloader
# TODO: support multiple images per gpu (only minor changes are needed)
dataset = build_dataset(cfg.data.test)
loader_cfg = {
**dict((k, cfg.data[k]) for k in ['workers_per_gpu'] if k in cfg.data),
**dict(
samples_per_gpu=1,
drop_last=False,
shuffle=False,
dist=distributed),
**cfg.data.get('test_dataloader', {})
}
data_loader = build_dataloader(dataset, **loader_cfg)
# build the model and load checkpoint
model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
args.save_image = args.save_path is not None
empty_cache = cfg.get('empty_cache', False)
if not distributed:
_ = load_checkpoint(model, args.checkpoint, map_location='cpu')
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(
model,
data_loader,
save_path=args.save_path,
save_image=args.save_image)
else:
find_unused_parameters = cfg.get('find_unused_parameters', False)
model = DistributedDataParallelWrapper(
model,
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
device_id = torch.cuda.current_device()
_ = load_checkpoint(
model,
args.checkpoint,
map_location=lambda storage, loc: storage.cuda(device_id))
outputs = multi_gpu_test(
model,
data_loader,
args.tmpdir,
args.gpu_collect,
save_path=args.save_path,
save_image=args.save_image,
empty_cache=empty_cache)
if rank == 0 and 'eval_result' in outputs[0]:
print('')
# print metrics
stats = dataset.evaluate(outputs)
for stat in stats:
print('Eval-{}: {}'.format(stat, stats[stat]))
# save result pickle
if args.out:
print('writing results to {}'.format(args.out))
mmcv.dump(outputs, args.out)
if __name__ == '__main__':
main()
| 5,285 | 32.66879 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tools/evaluate_comp1k.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import re
import mmcv
import numpy as np
from mmedit.core.evaluation import connectivity, gradient_error, mse, sad
from mmedit.utils import modify_args
def evaluate_one(args):
"""Function to evaluate one sample of data.
Args:
args (tuple): Information needed to evaluate one sample of data.
Returns:
dict: The evaluation results including sad, mse, gradient error and
connectivity error.
"""
pred_alpha_path, alpha_path, trimap_path = args
pred_alpha = mmcv.imread(pred_alpha_path, flag='grayscale')
alpha = mmcv.imread(alpha_path, flag='grayscale')
if trimap_path is None:
trimap = np.ones_like(alpha)
else:
trimap = mmcv.imread(trimap_path, flag='grayscale')
sad_result = sad(alpha, trimap, pred_alpha)
mse_result = mse(alpha, trimap, pred_alpha)
grad_result = gradient_error(alpha, trimap, pred_alpha)
conn_result = connectivity(alpha, trimap, pred_alpha)
return (sad_result, mse_result, grad_result, conn_result)
def evaluate(pred_root, gt_root, trimap_root, verbose, nproc):
"""Evaluate test results of Adobe composition-1k dataset.
There are 50 different ground truth foregrounds and alpha mattes pairs,
each of the foreground will be composited with 20 different backgrounds,
producing 1000 images for testing. In some repo, the ground truth alpha
matte will be copied 20 times and named the same as the images. This
function accept both original alpha matte folder (contains 50 ground
truth alpha mattes) and copied alpha matte folder (contains 1000 ground
truth alpha mattes) for `gt_root`.
Example of copied name:
```
alpha_matte1.png -> alpha_matte1_0.png
alpha_matte1_1.png
...
alpha_matte1_19.png
alpha_matte1_20.png
```
Args:
pred_root (str): Path to the predicted alpha matte folder.
gt_root (str): Path to the ground truth alpha matte folder.
trimap_root (str): Path to the predicted alpha matte folder.
verbose (bool): Whether print result for each predicted alpha matte.
nproc (int): number of processers.
"""
images = sorted(mmcv.scandir(pred_root))
gt_files_num = len(list(mmcv.scandir(gt_root)))
# If ground truth alpha mattes are not copied (number of files is 50), we
# use the below pattern to recover the name of the original alpha matte.
if gt_files_num == 50:
pattern = re.compile(r'(.+)_(?:\d+)(.png)')
pairs = []
for img in images:
pred_alpha_path = osp.join(pred_root, img)
# if ground truth alpha matte are not copied, recover the original name
if gt_files_num == 50:
groups = pattern.match(img).groups()
alpha_path = osp.join(gt_root, ''.join(groups))
# if ground truth alpha matte are copied, the name should be the same
else: # gt_files_num == 1000
alpha_path = osp.join(gt_root, img)
trimap_path = (
osp.join(trimap_root, img) if trimap_root is not None else None)
pairs.append((pred_alpha_path, alpha_path, trimap_path))
results = mmcv.track_parallel_progress(evaluate_one, pairs, nproc)
if verbose:
# for sad_result, mse_result, grad_result, conn_result in results:
for i, img in enumerate(images):
sad_result, mse_result, grad_result, conn_result = results[i]
print(f'{img} SAD: {sad_result:.6g} MSE: {mse_result:.6g} '
f'GRAD: {grad_result:.6g} CONN: {conn_result:.6g}')
sad_mean, mse_mean, grad_mean, conn_mean = np.mean(results, axis=0)
print(f'MEAN: SAD: {sad_mean:.6g} MSE: {mse_mean:.6g} '
f'GRAD: {grad_mean:.6g} CONN: {conn_mean:.6g}')
def parse_args():
modify_args()
parser = argparse.ArgumentParser(
description='evaluate composition-1k prediction result')
parser.add_argument(
'pred_root', help='Path to the predicted alpha matte folder')
parser.add_argument(
'gt_root', help='Path to the ground truth alpha matte folder')
parser.add_argument(
'--trimap-root',
help='Path to trimap folder. If not specified, '
'results are calculated on the full image.')
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='Whether print result for each predicted alpha matte')
parser.add_argument(
'--nproc', type=int, default=4, help='number of processers')
return parser.parse_args()
def main():
args = parse_args()
if not osp.exists(args.pred_root):
raise FileNotFoundError(f'pred_root {args.pred_root} not found')
if not osp.exists(args.gt_root):
raise FileNotFoundError(f'gt_root {args.gt_root} not found')
evaluate(args.pred_root, args.gt_root, args.trimap_root, args.verbose,
args.nproc)
if __name__ == '__main__':
main()
| 5,073 | 36.585185 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tools/get_flops.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
from mmcv import Config
from mmcv.cnn.utils import get_model_complexity_info
from mmedit.models import build_model
def parse_args():
parser = argparse.ArgumentParser(description='Train a editor')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[250, 250],
help='input image size')
args = parser.parse_args()
return args
def main():
args = parse_args()
if len(args.shape) == 1:
input_shape = (3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (3, ) + tuple(args.shape)
elif len(args.shape) in [3, 4]: # 4 for video inputs (t, c, h, w)
input_shape = tuple(args.shape)
else:
raise ValueError('invalid input shape')
cfg = Config.fromfile(args.config)
model = build_model(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg).cuda()
model.eval()
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError(
'FLOPs counter is currently not currently supported '
f'with {model.__class__.__name__}')
flops, params = get_model_complexity_info(model, input_shape)
split_line = '=' * 30
print(f'{split_line}\nInput shape: {input_shape}\n'
f'Flops: {flops}\nParams: {params}\n{split_line}')
if len(input_shape) == 4:
print('!!!If your network computes N frames in one forward pass, you '
'may want to divide the FLOPs by N to get the average FLOPs '
'for each frame.')
print('!!!Please be cautious if you use the results in papers. '
'You may need to check if all ops are supported and verify that the '
'flops computation is correct.')
if __name__ == '__main__':
main()
| 1,956 | 29.578125 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tools/onnx2tensorrt.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import warnings
from typing import Iterable, Optional
import cv2
import mmcv
import numpy as np
import onnxruntime as ort
import torch
from mmcv.ops import get_onnxruntime_op_path
from mmcv.tensorrt import (TRTWrapper, is_tensorrt_plugin_loaded, onnx2trt,
save_trt_engine)
from mmedit.datasets.pipelines import Compose
def get_GiB(x: int):
"""return x GiB."""
return x * (1 << 30)
def _prepare_input_img(model_type: str,
img_path: str,
config: dict,
rescale_shape: Optional[Iterable] = None) -> dict:
"""Prepare the input image
Args:
model_type (str): which kind of model config belong to, \
one of ['inpainting', 'mattor', 'restorer', 'synthesizer'].
img_path (str): image path to show or verify.
config (dict): MMCV config, determined by the inpupt config file.
rescale_shape (Optional[Iterable]): to rescale the shape of the \
input tensor.
Returns:
dict: {'imgs': imgs, 'img_metas': img_metas}
"""
# remove alpha from test_pipeline
model_type = model_type
if model_type == 'mattor':
keys_to_remove = ['alpha', 'ori_alpha']
elif model_type == 'restorer':
keys_to_remove = ['gt', 'gt_path']
for key in keys_to_remove:
for pipeline in list(config.test_pipeline):
if 'key' in pipeline and key == pipeline['key']:
config.test_pipeline.remove(pipeline)
if 'keys' in pipeline and key in pipeline['keys']:
pipeline['keys'].remove(key)
if len(pipeline['keys']) == 0:
config.test_pipeline.remove(pipeline)
if 'meta_keys' in pipeline and key in pipeline['meta_keys']:
pipeline['meta_keys'].remove(key)
# build the data pipeline
test_pipeline = Compose(config.test_pipeline)
# prepare data
if model_type == 'mattor':
raise RuntimeError('Invalid model_type!', model_type)
if model_type == 'restorer':
data = dict(lq_path=img_path)
data = test_pipeline(data)
if model_type == 'restorer':
imgs = data['lq']
else:
imgs = data['img']
img_metas = [data['meta']]
if rescale_shape is not None:
for img_meta in img_metas:
img_meta['ori_shape'] = tuple(rescale_shape) + (3, )
mm_inputs = {'imgs': imgs, 'img_metas': img_metas}
return mm_inputs
def onnx2tensorrt(onnx_file: str,
trt_file: str,
config: dict,
input_config: dict,
model_type: str,
img_path: str,
fp16: bool = False,
verify: bool = False,
show: bool = False,
workspace_size: int = 1,
verbose: bool = False):
"""Convert ONNX model to TensorRT model
Args:
onnx_file (str): the path of the input ONNX file.
trt_file (str): the path to output the TensorRT file.
config (dict): MMCV configuration.
input_config (dict): contains min_shape, max_shape and \
input image path.
fp16 (bool): whether to enable fp16 mode.
verify (bool): whether to verify the outputs of TensorRT \
and ONNX are same.
show (bool): whether to show the outputs of TensorRT and ONNX.
verbose (bool): whether to print the log when generating \
TensorRT model.
"""
import tensorrt as trt
min_shape = input_config['min_shape']
max_shape = input_config['max_shape']
# create trt engine and wrapper
opt_shape_dict = {'input': [min_shape, min_shape, max_shape]}
max_workspace_size = get_GiB(workspace_size)
trt_engine = onnx2trt(
onnx_file,
opt_shape_dict,
log_level=trt.Logger.VERBOSE if verbose else trt.Logger.ERROR,
fp16_mode=fp16,
max_workspace_size=max_workspace_size)
save_dir, _ = osp.split(trt_file)
if save_dir:
os.makedirs(save_dir, exist_ok=True)
save_trt_engine(trt_engine, trt_file)
print(f'Successfully created TensorRT engine: {trt_file}')
if verify:
inputs = _prepare_input_img(
model_type=model_type, img_path=img_path, config=config)
imgs = inputs['imgs']
img_list = [imgs.unsqueeze(0)]
if max_shape[0] > 1:
# concate flip image for batch test
flip_img_list = [_.flip(-1) for _ in img_list]
img_list = [
torch.cat((ori_img, flip_img), 0)
for ori_img, flip_img in zip(img_list, flip_img_list)
]
# Get results from ONNXRuntime
ort_custom_op_path = get_onnxruntime_op_path()
session_options = ort.SessionOptions()
if osp.exists(ort_custom_op_path):
session_options.register_custom_ops_library(ort_custom_op_path)
sess = ort.InferenceSession(onnx_file, session_options)
sess.set_providers(['CPUExecutionProvider'], [{}]) # use cpu mode
onnx_output = sess.run(['output'],
{'input': img_list[0].detach().numpy()})[0][0]
# Get results from TensorRT
trt_model = TRTWrapper(trt_file, ['input'], ['output'])
with torch.no_grad():
trt_outputs = trt_model({'input': img_list[0].contiguous().cuda()})
trt_output = trt_outputs['output'][0].cpu().detach().numpy()
if show:
onnx_visualize = onnx_output.transpose(1, 2, 0)
onnx_visualize = np.clip(onnx_visualize, 0, 1)[:, :, ::-1]
trt_visualize = trt_output.transpose(1, 2, 0)
trt_visualize = np.clip(trt_visualize, 0, 1)[:, :, ::-1]
cv2.imshow('ONNXRuntime', onnx_visualize)
cv2.imshow('TensorRT', trt_visualize)
cv2.waitKey()
np.testing.assert_allclose(
onnx_output, trt_output, rtol=1e-03, atol=1e-05)
print('TensorRT and ONNXRuntime output all close.')
def parse_args():
parser = argparse.ArgumentParser(
description='Convert MMSegmentation models from ONNX to TensorRT')
parser.add_argument('config', help='Config file of the model')
parser.add_argument(
'model_type',
help='what kind of model the config belong to.',
choices=['inpainting', 'mattor', 'restorer', 'synthesizer'])
parser.add_argument('img_path', type=str, help='Image for test')
parser.add_argument('onnx_file', help='Path to the input ONNX model')
parser.add_argument(
'--trt-file',
type=str,
help='Path to the output TensorRT engine',
default='tmp.trt')
parser.add_argument(
'--max-shape',
type=int,
nargs=4,
default=[1, 3, 512, 512],
help='Maximum shape of model input.')
parser.add_argument(
'--min-shape',
type=int,
nargs=4,
default=[1, 3, 32, 32],
help='Minimum shape of model input.')
parser.add_argument(
'--workspace-size',
type=int,
default=1,
help='Max workspace size in GiB')
parser.add_argument('--fp16', action='store_true', help='Enable fp16 mode')
parser.add_argument(
'--show', action='store_true', help='Whether to show output results')
parser.add_argument(
'--verify',
action='store_true',
help='Verify the outputs of ONNXRuntime and TensorRT')
parser.add_argument(
'--verbose',
action='store_true',
help='Whether to verbose logging messages while creating \
TensorRT engine.')
args = parser.parse_args()
return args
if __name__ == '__main__':
assert is_tensorrt_plugin_loaded(), 'TensorRT plugin should be compiled.'
args = parse_args()
# check arguments
assert osp.exists(args.config), 'Config {} not found.'.format(args.config)
assert osp.exists(args.onnx_file), \
'ONNX model {} not found.'.format(args.onnx_file)
assert args.workspace_size >= 0, 'Workspace size less than 0.'
for max_value, min_value in zip(args.max_shape, args.min_shape):
assert max_value >= min_value, \
'max_shape should be larger than min shape'
config = mmcv.Config.fromfile(args.config)
config.model.pretrained = None
input_config = {
'min_shape': args.min_shape,
'max_shape': args.max_shape,
'input_path': args.img_path
}
onnx2tensorrt(
args.onnx_file,
args.trt_file,
config,
input_config,
model_type=args.model_type,
img_path=args.img_path,
fp16=args.fp16,
verify=args.verify,
show=args.show,
workspace_size=args.workspace_size,
verbose=args.verbose)
# Following strings of text style are from colorama package
bright_style, reset_style = '\x1b[1m', '\x1b[0m'
red_text, blue_text = '\x1b[31m', '\x1b[34m'
white_background = '\x1b[107m'
msg = white_background + bright_style + red_text
msg += 'DeprecationWarning: This tool will be deprecated in future. '
msg += blue_text + 'Welcome to use the unified model deployment toolbox '
msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy'
msg += reset_style
warnings.warn(msg)
| 9,445 | 34.115242 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tools/publish_model.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import subprocess
import torch
from packaging import version
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output checkpoint filename')
args = parser.parse_args()
return args
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
if version.parse(torch.__version__) >= version.parse('1.6'):
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
final_file = out_file.rstrip('.pth') + f'-{sha[:8]}.pth'
subprocess.Popen(['mv', out_file, final_file])
def main():
args = parse_args()
process_checkpoint(args.in_file, args.out_file)
if __name__ == '__main__':
main()
| 1,256 | 29.658537 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tools/pytorch2onnx.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import warnings
import cv2
import mmcv
import numpy as np
import onnx
import onnxruntime as rt
import torch
from mmcv.onnx import register_extra_symbolics
from mmcv.runner import load_checkpoint
from mmedit.datasets.pipelines import Compose
from mmedit.models import build_model
def pytorch2onnx(model,
input,
model_type,
opset_version=11,
show=False,
output_file='tmp.onnx',
verify=False,
dynamic_export=False):
"""Export Pytorch model to ONNX model and verify the outputs are same
between Pytorch and ONNX.
Args:
model (nn.Module): Pytorch model we want to export.
input (dict): We need to use this input to execute the model.
opset_version (int): The onnx op version. Default: 11.
show (bool): Whether print the computation graph. Default: False.
output_file (string): The path to where we store the output ONNX model.
Default: `tmp.onnx`.
verify (bool): Whether compare the outputs between Pytorch and ONNX.
Default: False.
"""
model.cpu().eval()
if model_type == 'mattor':
merged = input['merged'].unsqueeze(0)
trimap = input['trimap'].unsqueeze(0)
data = torch.cat((merged, trimap), 1)
elif model_type == 'restorer':
data = input['lq'].unsqueeze(0)
model.forward = model.forward_dummy
# pytorch has some bug in pytorch1.3, we have to fix it
# by replacing these existing op
register_extra_symbolics(opset_version)
dynamic_axes = None
if dynamic_export:
dynamic_axes = {
'input': {
0: 'batch',
2: 'height',
3: 'width'
},
'output': {
0: 'batch',
2: 'height',
3: 'width'
}
}
with torch.no_grad():
torch.onnx.export(
model,
data,
output_file,
input_names=['input'],
output_names=['output'],
export_params=True,
keep_initializers_as_inputs=False,
verbose=show,
opset_version=opset_version,
dynamic_axes=dynamic_axes)
print(f'Successfully exported ONNX model: {output_file}')
if verify:
# check by onnx
onnx_model = onnx.load(output_file)
onnx.checker.check_model(onnx_model)
if dynamic_export:
# scale image for dynamic shape test
data = torch.nn.functional.interpolate(data, scale_factor=1.1)
# concate flip image for batch test
flip_data = data.flip(-1)
data = torch.cat((data, flip_data), 0)
# get pytorch output, only concern pred_alpha
with torch.no_grad():
pytorch_result = model(data)
if isinstance(pytorch_result, (tuple, list)):
pytorch_result = pytorch_result[0]
pytorch_result = pytorch_result.detach().numpy()
# get onnx output
sess = rt.InferenceSession(output_file)
onnx_result = sess.run(None, {
'input': data.detach().numpy(),
})
# only concern pred_alpha value
if isinstance(onnx_result, (tuple, list)):
onnx_result = onnx_result[0]
if show:
pytorch_visualize = pytorch_result[0].transpose(1, 2, 0)
pytorch_visualize = np.clip(pytorch_visualize, 0, 1)[:, :, ::-1]
onnx_visualize = onnx_result[0].transpose(1, 2, 0)
onnx_visualize = np.clip(onnx_visualize, 0, 1)[:, :, ::-1]
cv2.imshow('PyTorch', pytorch_visualize)
cv2.imshow('ONNXRuntime', onnx_visualize)
cv2.waitKey()
# check the numerical value
assert np.allclose(
pytorch_result, onnx_result, rtol=1e-5,
atol=1e-5), 'The outputs are different between Pytorch and ONNX'
print('The numerical values are same between Pytorch and ONNX')
def parse_args():
parser = argparse.ArgumentParser(description='Convert MMediting to ONNX')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'model_type',
help='what kind of model the config belong to.',
choices=['inpainting', 'mattor', 'restorer', 'synthesizer'])
parser.add_argument('img_path', help='path to input image file')
parser.add_argument(
'--trimap-path',
default=None,
help='path to input trimap file, used in mattor model')
parser.add_argument('--show', action='store_true', help='show onnx graph')
parser.add_argument('--output-file', type=str, default='tmp.onnx')
parser.add_argument('--opset-version', type=int, default=11)
parser.add_argument(
'--verify',
action='store_true',
help='verify the onnx model output against pytorch output')
parser.add_argument(
'--dynamic-export',
action='store_true',
help='Whether to export onnx with dynamic axis.')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
model_type = args.model_type
if model_type == 'mattor' and args.trimap_path is None:
raise ValueError('Please set `--trimap-path` to convert mattor model.')
assert args.opset_version == 11, 'MMEditing only support opset 11 now'
config = mmcv.Config.fromfile(args.config)
config.model.pretrained = None
# ONNX does not support spectral norm
if model_type == 'mattor':
if hasattr(config.model.backbone.encoder, 'with_spectral_norm'):
config.model.backbone.encoder.with_spectral_norm = False
config.model.backbone.decoder.with_spectral_norm = False
config.test_cfg.metrics = None
# build the model
model = build_model(config.model, test_cfg=config.test_cfg)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
# remove alpha from test_pipeline
if model_type == 'mattor':
keys_to_remove = ['alpha', 'ori_alpha']
elif model_type == 'restorer':
keys_to_remove = ['gt', 'gt_path']
for key in keys_to_remove:
for pipeline in list(config.test_pipeline):
if 'key' in pipeline and key == pipeline['key']:
config.test_pipeline.remove(pipeline)
if 'keys' in pipeline and key in pipeline['keys']:
pipeline['keys'].remove(key)
if len(pipeline['keys']) == 0:
config.test_pipeline.remove(pipeline)
if 'meta_keys' in pipeline and key in pipeline['meta_keys']:
pipeline['meta_keys'].remove(key)
# build the data pipeline
test_pipeline = Compose(config.test_pipeline)
# prepare data
if model_type == 'mattor':
data = dict(merged_path=args.img_path, trimap_path=args.trimap_path)
elif model_type == 'restorer':
data = dict(lq_path=args.img_path)
data = test_pipeline(data)
# convert model to onnx file
pytorch2onnx(
model,
data,
model_type,
opset_version=args.opset_version,
show=args.show,
output_file=args.output_file,
verify=args.verify,
dynamic_export=args.dynamic_export)
# Following strings of text style are from colorama package
bright_style, reset_style = '\x1b[1m', '\x1b[0m'
red_text, blue_text = '\x1b[31m', '\x1b[34m'
white_background = '\x1b[107m'
msg = white_background + bright_style + red_text
msg += 'DeprecationWarning: This tool will be deprecated in future. '
msg += blue_text + 'Welcome to use the unified model deployment toolbox '
msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy'
msg += reset_style
warnings.warn(msg)
| 7,975 | 35.420091 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tools/deploy_test.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import warnings
from typing import Any
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.parallel import MMDataParallel
from torch import nn
from mmedit.apis import single_gpu_test
from mmedit.core.export import ONNXRuntimeEditing
from mmedit.datasets import build_dataloader, build_dataset
from mmedit.models import BasicRestorer, build_model
class TensorRTRestorerGenerator(nn.Module):
"""Inner class for tensorrt restorer model inference
Args:
trt_file (str): The path to the tensorrt file.
device_id (int): Which device to place the model.
"""
def __init__(self, trt_file: str, device_id: int):
super().__init__()
from mmcv.tensorrt import TRTWrapper, load_tensorrt_plugin
try:
load_tensorrt_plugin()
except (ImportError, ModuleNotFoundError):
warnings.warn('If input model has custom op from mmcv, \
you may have to build mmcv with TensorRT from source.')
model = TRTWrapper(
trt_file, input_names=['input'], output_names=['output'])
self.device_id = device_id
self.model = model
def forward(self, x):
with torch.cuda.device(self.device_id), torch.no_grad():
seg_pred = self.model({'input': x})['output']
seg_pred = seg_pred.detach().cpu()
return seg_pred
class TensorRTRestorer(nn.Module):
"""A warper class for tensorrt restorer
Args:
base_model (Any): The base model build from config.
trt_file (str): The path to the tensorrt file.
device_id (int): Which device to place the model.
"""
def __init__(self, base_model: Any, trt_file: str, device_id: int):
super().__init__()
self.base_model = base_model
restorer_generator = TensorRTRestorerGenerator(
trt_file=trt_file, device_id=device_id)
base_model.generator = restorer_generator
def forward(self, lq, gt=None, test_mode=False, **kwargs):
return self.base_model(lq, gt=gt, test_mode=test_mode, **kwargs)
class TensorRTEditing(nn.Module):
"""A class for testing tensorrt deployment
Args:
trt_file (str): The path to the tensorrt file.
cfg (Any): The configuration of the testing, \
decided by the config file.
device_id (int): Which device to place the model.
"""
def __init__(self, trt_file: str, cfg: Any, device_id: int):
super().__init__()
base_model = build_model(
cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
if isinstance(base_model, BasicRestorer):
WrapperClass = TensorRTRestorer
self.wrapper = WrapperClass(base_model, trt_file, device_id)
def forward(self, **kwargs):
return self.wrapper(**kwargs)
def parse_args():
parser = argparse.ArgumentParser(description='mmediting tester')
parser.add_argument('config', help='test config file path')
parser.add_argument('model', help='input model file')
parser.add_argument(
'backend',
help='backend of the model.',
choices=['onnxruntime', 'tensorrt'])
parser.add_argument('--out', help='output result pickle file')
parser.add_argument(
'--save-path',
default=None,
type=str,
help='path to store images and if not given, will not save image')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# init distributed env first, since logger depends on the dist info.
distributed = False
# build the dataloader
dataset = build_dataset(cfg.data.test)
loader_cfg = {
**dict((k, cfg.data[k]) for k in ['workers_per_gpu'] if k in cfg.data),
**dict(
samples_per_gpu=1,
drop_last=False,
shuffle=False,
dist=distributed),
**cfg.data.get('test_dataloader', {})
}
data_loader = build_dataloader(dataset, **loader_cfg)
# build the model
if args.backend == 'onnxruntime':
model = ONNXRuntimeEditing(args.model, cfg=cfg, device_id=0)
elif args.backend == 'tensorrt':
model = TensorRTEditing(args.model, cfg=cfg, device_id=0)
args.save_image = args.save_path is not None
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(
model,
data_loader,
save_path=args.save_path,
save_image=args.save_image)
print()
# print metrics
stats = dataset.evaluate(outputs)
for stat in stats:
print('Eval-{}: {}'.format(stat, stats[stat]))
# save result pickle
if args.out:
print('writing results to {}'.format(args.out))
mmcv.dump(outputs, args.out)
if __name__ == '__main__':
main()
# Following strings of text style are from colorama package
bright_style, reset_style = '\x1b[1m', '\x1b[0m'
red_text, blue_text = '\x1b[31m', '\x1b[34m'
white_background = '\x1b[107m'
msg = white_background + bright_style + red_text
msg += 'DeprecationWarning: This tool will be deprecated in future. '
msg += blue_text + 'Welcome to use the unified model deployment toolbox '
msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy'
msg += reset_style
warnings.warn(msg)
| 5,983 | 31.879121 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tools/train.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import os.path as osp
import time
import mmcv
import torch
import torch.distributed as dist
from mmcv import Config, DictAction
from mmcv.runner import init_dist
from mmedit import __version__
from mmedit.apis import init_random_seed, set_random_seed, train_model
from mmedit.datasets import build_dataset
from mmedit.models import build_model
from mmedit.utils import collect_env, get_root_logger, setup_multi_processes
def parse_args():
parser = argparse.ArgumentParser(description='Train an editor')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
parser.add_argument(
'--gpus',
type=int,
default=1,
help='number of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--diff_seed',
action='store_true',
help='Whether or not set different seeds for different ranks')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--autoscale-lr',
action='store_true',
help='automatically scale lr with the number of gpus')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set multi-process settings
setup_multi_processes(cfg)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# update configs according to CLI args
if args.work_dir is not None:
cfg.work_dir = args.work_dir
if args.resume_from is not None:
cfg.resume_from = args.resume_from
cfg.gpus = args.gpus
if args.autoscale_lr:
# apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# log env info
env_info_dict = collect_env.collect_env()
env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
# log some basic info
logger.info('Distributed training: {}'.format(distributed))
logger.info('mmedit Version: {}'.format(__version__))
logger.info('Config:\n{}'.format(cfg.text))
# set random seeds
seed = init_random_seed(args.seed)
seed = seed + dist.get_rank() if args.diff_seed else seed
logger.info('Set random seed to {}, deterministic: {}'.format(
seed, args.deterministic))
set_random_seed(seed, deterministic=args.deterministic)
cfg.seed = seed
model = build_model(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmedit_version=__version__,
config=cfg.text,
)
# meta information
meta = dict()
if cfg.get('exp_name', None) is None:
cfg['exp_name'] = osp.splitext(osp.basename(cfg.work_dir))[0]
meta['exp_name'] = cfg.exp_name
meta['mmedit Version'] = __version__
meta['seed'] = seed
meta['env_info'] = env_info
# add an attribute for visualization convenience
train_model(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
| 5,738 | 32.758824 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tools/deployment/test_torchserver.py | # Copyright (c) OpenMMLab. All rights reserved.
from argparse import ArgumentParser
import cv2
import numpy as np
import requests
from PIL import Image
def parse_args():
parser = ArgumentParser()
parser.add_argument('model_name', help='The model name in the server')
parser.add_argument(
'--inference-addr',
default='127.0.0.1:8080',
help='Address and port of the inference server')
parser.add_argument('--img-path', type=str, help='The input LQ image.')
parser.add_argument(
'--save-path', type=str, help='Path to save the generated GT image.')
args = parser.parse_args()
return args
def save_results(content, save_path, ori_shape):
ori_len = np.prod(ori_shape)
scale = int(np.sqrt(len(content) / ori_len))
target_size = [int(size * scale) for size in ori_shape[:2][::-1]]
# Convert to RGB and save image
img = Image.frombytes('RGB', target_size, content, 'raw', 'BGR', 0, 0)
img.save(save_path)
def main(args):
url = 'http://' + args.inference_addr + '/predictions/' + args.model_name
ori_shape = cv2.imread(args.img_path).shape
with open(args.img_path, 'rb') as image:
response = requests.post(url, image)
save_results(response.content, args.save_path, ori_shape)
if __name__ == '__main__':
parsed_args = parse_args()
main(parsed_args)
| 1,364 | 30.022727 | 77 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tools/deployment/mmedit_handler.py | # Copyright (c) OpenMMLab. All rights reserved.
import os
import random
import string
from io import BytesIO
import PIL.Image as Image
import torch
from ts.torch_handler.base_handler import BaseHandler
from mmedit.apis import init_model, restoration_inference
from mmedit.core import tensor2img
class MMEditHandler(BaseHandler):
def initialize(self, context):
print('MMEditHandler.initialize is called')
properties = context.system_properties
self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = torch.device(self.map_location + ':' +
str(properties.get('gpu_id')) if torch.cuda.
is_available() else self.map_location)
self.manifest = context.manifest
model_dir = properties.get('model_dir')
serialized_file = self.manifest['model']['serializedFile']
checkpoint = os.path.join(model_dir, serialized_file)
self.config_file = os.path.join(model_dir, 'config.py')
self.model = init_model(self.config_file, checkpoint, self.device)
self.initialized = True
def preprocess(self, data, *args, **kwargs):
body = data[0].get('data') or data[0].get('body')
result = Image.open(BytesIO(body))
# data preprocess is in inference.
return result
def inference(self, data, *args, **kwargs):
# generate temp image path for restoration_inference
temp_name = ''.join(
random.sample(string.ascii_letters + string.digits, 18))
temp_path = f'./{temp_name}.png'
data.save(temp_path)
results = restoration_inference(self.model, temp_path)
# delete the temp image path
os.remove(temp_path)
return results
def postprocess(self, data):
# convert torch tensor to numpy and then convert to bytes
output_list = []
for data_ in data:
data_np = tensor2img(data_)
data_byte = data_np.tobytes()
output_list.append(data_byte)
return output_list
| 2,099 | 34 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tools/deployment/mmedit2torchserve.py | # Copyright (c) OpenMMLab. All rights reserved.
from argparse import ArgumentParser, Namespace
from pathlib import Path
from tempfile import TemporaryDirectory
import mmcv
try:
from model_archiver.model_packaging import package_model
from model_archiver.model_packaging_utils import ModelExportUtils
except ImportError:
package_model = None
def mmedit2torchserve(
config_file: str,
checkpoint_file: str,
output_folder: str,
model_name: str,
model_version: str = '1.0',
force: bool = False,
):
"""Converts MMEditing model (config + checkpoint) to TorchServe `.mar`.
Args:
config_file:
In MMEditing config format.
The contents vary for each task repository.
checkpoint_file:
In MMEditing checkpoint format.
The contents vary for each task repository.
output_folder:
Folder where `{model_name}.mar` will be created.
The file created will be in TorchServe archive format.
model_name:
If not None, used for naming the `{model_name}.mar` file
that will be created under `output_folder`.
If None, `{Path(checkpoint_file).stem}` will be used.
model_version:
Model's version.
force:
If True, if there is an existing `{model_name}.mar`
file under `output_folder` it will be overwritten.
"""
mmcv.mkdir_or_exist(output_folder)
config = mmcv.Config.fromfile(config_file)
with TemporaryDirectory() as tmpdir:
config.dump(f'{tmpdir}/config.py')
args_ = Namespace(
**{
'model_file': f'{tmpdir}/config.py',
'serialized_file': checkpoint_file,
'handler': f'{Path(__file__).parent}/mmedit_handler.py',
'model_name': model_name or Path(checkpoint_file).stem,
'version': model_version,
'export_path': output_folder,
'force': force,
'requirements_file': None,
'extra_files': None,
'runtime': 'python',
'archive_format': 'default'
})
print(args_.model_name)
manifest = ModelExportUtils.generate_manifest_json(args_)
package_model(args_, manifest)
def parse_args():
parser = ArgumentParser(
description='Convert MMEditing models to TorchServe `.mar` format.')
parser.add_argument('config', type=str, help='config file path')
parser.add_argument('checkpoint', type=str, help='checkpoint file path')
parser.add_argument(
'--output-folder',
type=str,
required=True,
help='Folder where `{model_name}.mar` will be created.')
parser.add_argument(
'--model-name',
type=str,
default=None,
help='If not None, used for naming the `{model_name}.mar`'
'file that will be created under `output_folder`.'
'If None, `{Path(checkpoint_file).stem}` will be used.')
parser.add_argument(
'--model-version',
type=str,
default='1.0',
help='Number used for versioning.')
parser.add_argument(
'-f',
'--force',
action='store_true',
help='overwrite the existing `{model_name}.mar`')
args_ = parser.parse_args()
return args_
if __name__ == '__main__':
args = parse_args()
if package_model is None:
raise ImportError('`torch-model-archiver` is required.'
'Try: pip install torch-model-archiver')
mmedit2torchserve(args.config, args.checkpoint, args.output_folder,
args.model_name, args.model_version, args.force)
| 3,725 | 32.567568 | 76 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/.dev_scripts/github/update_model_index.py | #!/usr/bin/env python
# Copyright (c) OpenMMLab. All rights reserved.
# This tool is used to update model-index.yml which is required by MIM, and
# will be automatically called as a pre-commit hook. The updating will be
# triggered if any change of model information (.md files in configs/) has been
# detected before a commit.
import glob
import os
import posixpath as osp # Even on windows, use posixpath
import re
import sys
import warnings
from functools import reduce
import mmcv
MMEditing_ROOT = osp.dirname(osp.dirname(osp.dirname(__file__)))
all_training_data = [
'div2k', 'celeba', 'places', 'comp1k', 'vimeo90k', 'reds', 'ffhq', 'cufed',
'cat', 'facades', 'summer2winter', 'horse2zebra', 'maps', 'edges2shoes'
]
def dump_yaml_and_check_difference(obj, file):
"""Dump object to a yaml file, and check if the file content is different
from the original.
Args:
obj (any): The python object to be dumped.
file (str): YAML filename to dump the object to.
Returns:
Bool: If the target YAML file is different from the original.
"""
str_dump = mmcv.dump(
obj, None, file_format='yaml', sort_keys=True,
line_break='\n') # force use LF
if osp.isfile(file):
file_exists = True
print(f' exist {file}')
with open(file, 'r', encoding='utf-8') as f:
str_orig = f.read()
else:
file_exists = False
str_orig = None
if file_exists and str_orig == str_dump:
is_different = False
else:
is_different = True
print(f' update {file}')
with open(file, 'w', encoding='utf-8') as f:
f.write(str_dump)
return is_different
def collate_metrics(keys):
"""Collect metrics from the first row of the table.
Args:
keys (List): Elements in the first row of the table.
Returns:
dict: A dict of metrics.
"""
used_metrics = dict()
for idx, key in enumerate(keys):
if key in ['Method', 'Download']:
continue
used_metrics[key] = idx
return used_metrics
def get_task_name(md_file):
"""Get task name from README.md".
Args:
md_file (str): Path to .md file.
Returns:
Str: Task name.
"""
layers = re.split(r'[\\/]', md_file)
for i in range(len(layers) - 1):
if layers[i] == 'configs':
return layers[i + 1].capitalize()
return 'Unknown'
def generate_unique_name(md_file):
"""Search config files and return the unique name of them.
For Confin.Name.
Args:
md_file (str): Path to .md file.
Returns:
dict: dict of unique name for each config file.
"""
files = os.listdir(osp.dirname(md_file))
config_files = [f[:-3] for f in files if f[-3:] == '.py']
config_files.sort()
config_files.sort(key=lambda x: len(x))
split_names = [f.split('_') for f in config_files]
config_sets = [set(f.split('_')) for f in config_files]
common_set = reduce(lambda x, y: x & y, config_sets)
unique_lists = [[n for n in name if n not in common_set]
for name in split_names]
unique_dict = dict()
name_list = []
for i, f in enumerate(config_files):
base = split_names[i][0]
unique_dict[f] = base
if len(unique_lists[i]) > 0:
for unique in unique_lists[i]:
candidate_name = f'{base}_{unique}'
if candidate_name not in name_list and base != unique:
unique_dict[f] = candidate_name
name_list.append(candidate_name)
break
return unique_dict
def parse_md(md_file):
"""Parse .md file and convert it to a .yml file which can be used for MIM.
Args:
md_file (str): Path to .md file.
Returns:
Bool: If the target YAML file is different from the original.
"""
# See https://github.com/open-mmlab/mmediting/pull/798 for these comments
# unique_dict = generate_unique_name(md_file)
collection_name = osp.splitext(osp.basename(md_file))[0]
readme = osp.relpath(md_file, MMEditing_ROOT)
readme = readme.replace('\\', '/') # for windows
collection = dict(
Name=collection_name,
Metadata={'Architecture': []},
README=readme,
Paper=[])
models = []
# force utf-8 instead of system defined
with open(md_file, 'r', encoding='utf-8') as md:
lines = md.readlines()
i = 0
name = lines[0][2:]
name = name.split('(', 1)[0].strip()
collection['Metadata']['Architecture'].append(name)
collection['Name'] = name
collection_name = name
while i < len(lines):
# parse reference
if lines[i].startswith('> ['):
url = re.match(r'> \[.*]\((.*)\)', lines[i])
url = url.groups()[0]
collection['Paper'].append(url)
i += 1
# parse table
elif lines[i][0] == '|' and i + 1 < len(lines) and \
(lines[i + 1][:3] == '| :' or lines[i + 1][:2] == '|:'
or lines[i + 1][:2] == '|-'):
cols = [col.strip() for col in lines[i].split('|')][1:-1]
config_idx = cols.index('Method')
checkpoint_idx = cols.index('Download')
try:
flops_idx = cols.index('FLOPs')
except ValueError:
flops_idx = -1
try:
params_idx = cols.index('Params')
except ValueError:
params_idx = -1
used_metrics = collate_metrics(cols)
j = i + 2
while j < len(lines) and lines[j][0] == '|':
task = get_task_name(md_file)
line = lines[j].split('|')[1:-1]
if line[config_idx].find('](') >= 0:
left = line[config_idx].index('](') + 2
right = line[config_idx].index(')', left)
config = line[config_idx][left:right].strip('./')
elif line[config_idx].find('△') == -1:
j += 1
continue
if line[checkpoint_idx].find('](') >= 0:
left = line[checkpoint_idx].index('model](') + 7
right = line[checkpoint_idx].index(')', left)
checkpoint = line[checkpoint_idx][left:right]
name_key = osp.splitext(osp.basename(config))[0]
model_name = name_key
# See https://github.com/open-mmlab/mmediting/pull/798
# for these comments
# if name_key in unique_dict:
# model_name = unique_dict[name_key]
# else:
# model_name = name_key
# warnings.warn(
# f'Config file of {model_name} is not found,'
# 'please check it again.')
# find dataset in config file
dataset = 'Others'
config_low = config.lower()
for d in all_training_data:
if d in config_low:
dataset = d.upper()
break
metadata = {'Training Data': dataset}
if flops_idx != -1:
metadata['FLOPs'] = float(line[flops_idx])
if params_idx != -1:
metadata['Parameters'] = float(line[params_idx])
metrics = {}
for key in used_metrics:
metrics_data = line[used_metrics[key]]
metrics_data = metrics_data.replace('*', '')
if '/' not in metrics_data:
try:
metrics[key] = float(metrics_data)
except ValueError:
metrics_data = metrics_data.replace(' ', '')
else:
try:
metrics_data = [
float(d) for d in metrics_data.split('/')
]
metrics[key] = dict(
PSNR=metrics_data[0], SSIM=metrics_data[1])
except ValueError:
pass
model = {
'Name':
model_name,
'In Collection':
collection_name,
'Config':
config,
'Metadata':
metadata,
'Results': [{
'Task': task,
'Dataset': dataset,
'Metrics': metrics
}],
'Weights':
checkpoint
}
models.append(model)
j += 1
i = j
else:
i += 1
if len(models) == 0:
warnings.warn('no model is found in this md file')
result = {'Collections': [collection], 'Models': models}
yml_file = md_file.replace('README.md', 'metafile.yml')
is_different = dump_yaml_and_check_difference(result, yml_file)
return is_different
def update_model_index():
"""Update model-index.yml according to model .md files.
Returns:
Bool: If the updated model-index.yml is different from the original.
"""
configs_dir = osp.join(MMEditing_ROOT, 'configs')
yml_files = glob.glob(osp.join(configs_dir, '**', '*.yml'), recursive=True)
yml_files.sort()
model_index = {
'Import': [
osp.relpath(yml_file, MMEditing_ROOT).replace(
'\\', '/') # force using / as path separators
for yml_file in yml_files
]
}
model_index_file = osp.join(MMEditing_ROOT, 'model-index.yml')
is_different = dump_yaml_and_check_difference(model_index,
model_index_file)
return is_different
if __name__ == '__main__':
if len(sys.argv) <= 1:
configs_root = osp.join(MMEditing_ROOT, 'configs')
file_list = glob.glob(
osp.join(configs_root, '**', '*README.md'), recursive=True)
file_list.sort()
else:
file_list = [
fn for fn in sys.argv[1:] if osp.basename(fn) == 'README.md'
]
if not file_list:
sys.exit(0)
file_modified = False
for fn in file_list:
print(f'process {fn}')
file_modified |= parse_md(fn)
file_modified |= update_model_index()
sys.exit(1 if file_modified else 0)
| 11,096 | 33.039877 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_inference.py | # Copyright (c) OpenMMLab. All rights reserved.
import os
import shutil
import pytest
import torch
from mmedit.apis import (init_model, restoration_video_inference,
video_interpolation_inference)
def test_restoration_video_inference():
if torch.cuda.is_available():
# recurrent framework (BasicVSR)
model = init_model(
'./configs/restorers/basicvsr/basicvsr_reds4.py',
None,
device='cuda')
img_dir = './tests/data/vimeo90k/00001/0266'
window_size = 0
start_idx = 1
filename_tmpl = 'im{}.png'
output = restoration_video_inference(model, img_dir, window_size,
start_idx, filename_tmpl)
assert output.shape == (1, 7, 3, 256, 448)
# sliding-window framework (EDVR)
window_size = 5
model = init_model(
'./configs/restorers/edvr/edvrm_wotsa_x4_g8_600k_reds.py',
None,
device='cuda')
output = restoration_video_inference(model, img_dir, window_size,
start_idx, filename_tmpl)
assert output.shape == (1, 7, 3, 256, 448)
# without demo_pipeline
model.cfg.test_pipeline = model.cfg.demo_pipeline
model.cfg.pop('demo_pipeline')
output = restoration_video_inference(model, img_dir, window_size,
start_idx, filename_tmpl)
assert output.shape == (1, 7, 3, 256, 448)
# without test_pipeline and demo_pipeline
model.cfg.val_pipeline = model.cfg.test_pipeline
model.cfg.pop('test_pipeline')
output = restoration_video_inference(model, img_dir, window_size,
start_idx, filename_tmpl)
assert output.shape == (1, 7, 3, 256, 448)
# the first element in the pipeline must be 'GenerateSegmentIndices'
with pytest.raises(TypeError):
model.cfg.val_pipeline = model.cfg.val_pipeline[1:]
output = restoration_video_inference(model, img_dir, window_size,
start_idx, filename_tmpl)
# video (mp4) input
model = init_model(
'./configs/restorers/basicvsr/basicvsr_reds4.py',
None,
device='cuda')
img_dir = './tests/data/test_inference.mp4'
window_size = 0
start_idx = 1
filename_tmpl = 'im{}.png'
output = restoration_video_inference(model, img_dir, window_size,
start_idx, filename_tmpl)
assert output.shape == (1, 5, 3, 256, 256)
def test_video_interpolation_inference():
model = init_model(
'./configs/video_interpolators/cain/cain_b5_320k_vimeo-triplet.py',
None,
device='cpu')
model.cfg['demo_pipeline'] = [
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='inputs',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['inputs']),
dict(type='FramesToTensor', keys=['inputs']),
dict(
type='Collect', keys=['inputs'], meta_keys=['inputs_path', 'key'])
]
input_dir = './tests/data/vimeo90k/00001/0266'
output_dir = './tests/data/vimeo90k/00001/out'
os.mkdir(output_dir)
video_interpolation_inference(model, input_dir, output_dir, batch_size=10)
input_dir = './tests/data/test_inference.mp4'
output_dir = './tests/data/test_inference_out.mp4'
video_interpolation_inference(model, input_dir, output_dir)
with pytest.raises(AssertionError):
input_dir = './tests/data/test_inference.mp4'
output_dir = './tests/data/test_inference_out.mp4'
video_interpolation_inference(
model, input_dir, output_dir, fps_multiplier=-1)
if torch.cuda.is_available():
model = init_model(
'./configs/video_interpolators/cain/cain_b5_320k_vimeo-triplet.py',
None,
device='cuda')
model.cfg['demo_pipeline'] = [
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='inputs',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['inputs']),
dict(type='FramesToTensor', keys=['inputs']),
dict(
type='Collect',
keys=['inputs'],
meta_keys=['inputs_path', 'key'])
]
input_dir = './tests/data/vimeo90k/00001/0266'
output_dir = './tests/data/vimeo90k/00001'
video_interpolation_inference(
model, input_dir, output_dir, batch_size=10)
input_dir = './tests/data/test_inference.mp4'
output_dir = './tests/data/test_inference_out.mp4'
video_interpolation_inference(model, input_dir, output_dir)
with pytest.raises(AssertionError):
input_dir = './tests/data/test_inference.mp4'
output_dir = './tests/data/test_inference_out.mp4'
video_interpolation_inference(
model, input_dir, output_dir, fps_multiplier=-1)
shutil.rmtree('./tests/data/vimeo90k/00001/out')
os.remove('./tests/data/test_inference_out.mp4')
| 5,333 | 36.829787 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_runtime/test_optimizer.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmedit.core import build_optimizers
class ExampleModel(nn.Module):
def __init__(self):
super().__init__()
self.model1 = nn.Conv2d(3, 8, kernel_size=3)
self.model2 = nn.Conv2d(3, 4, kernel_size=3)
def forward(self, x):
return x
def test_build_optimizers():
base_lr = 0.0001
base_wd = 0.0002
momentum = 0.9
# basic config with ExampleModel
optimizer_cfg = dict(
model1=dict(
type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum),
model2=dict(
type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum))
model = ExampleModel()
optimizers = build_optimizers(model, optimizer_cfg)
param_dict = dict(model.named_parameters())
assert isinstance(optimizers, dict)
for i in range(2):
optimizer = optimizers[f'model{i+1}']
param_groups = optimizer.param_groups[0]
assert isinstance(optimizer, torch.optim.SGD)
assert optimizer.defaults['lr'] == base_lr
assert optimizer.defaults['momentum'] == momentum
assert optimizer.defaults['weight_decay'] == base_wd
assert len(param_groups['params']) == 2
assert torch.equal(param_groups['params'][0],
param_dict[f'model{i+1}.weight'])
assert torch.equal(param_groups['params'][1],
param_dict[f'model{i+1}.bias'])
# basic config with Parallel model
model = torch.nn.DataParallel(ExampleModel())
optimizers = build_optimizers(model, optimizer_cfg)
param_dict = dict(model.named_parameters())
assert isinstance(optimizers, dict)
for i in range(2):
optimizer = optimizers[f'model{i+1}']
param_groups = optimizer.param_groups[0]
assert isinstance(optimizer, torch.optim.SGD)
assert optimizer.defaults['lr'] == base_lr
assert optimizer.defaults['momentum'] == momentum
assert optimizer.defaults['weight_decay'] == base_wd
assert len(param_groups['params']) == 2
assert torch.equal(param_groups['params'][0],
param_dict[f'module.model{i+1}.weight'])
assert torch.equal(param_groups['params'][1],
param_dict[f'module.model{i+1}.bias'])
# basic config with ExampleModel (one optimizer)
optimizer_cfg = dict(
type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
model = ExampleModel()
optimizer = build_optimizers(model, optimizer_cfg)
param_dict = dict(model.named_parameters())
assert isinstance(optimizers, dict)
param_groups = optimizer.param_groups[0]
assert isinstance(optimizer, torch.optim.SGD)
assert optimizer.defaults['lr'] == base_lr
assert optimizer.defaults['momentum'] == momentum
assert optimizer.defaults['weight_decay'] == base_wd
assert len(param_groups['params']) == 4
assert torch.equal(param_groups['params'][0], param_dict['model1.weight'])
assert torch.equal(param_groups['params'][1], param_dict['model1.bias'])
assert torch.equal(param_groups['params'][2], param_dict['model2.weight'])
assert torch.equal(param_groups['params'][3], param_dict['model2.bias'])
# basic config with Parallel model (one optimizer)
model = torch.nn.DataParallel(ExampleModel())
optimizer = build_optimizers(model, optimizer_cfg)
param_dict = dict(model.named_parameters())
assert isinstance(optimizers, dict)
param_groups = optimizer.param_groups[0]
assert isinstance(optimizer, torch.optim.SGD)
assert optimizer.defaults['lr'] == base_lr
assert optimizer.defaults['momentum'] == momentum
assert optimizer.defaults['weight_decay'] == base_wd
assert len(param_groups['params']) == 4
assert torch.equal(param_groups['params'][0],
param_dict['module.model1.weight'])
assert torch.equal(param_groups['params'][1],
param_dict['module.model1.bias'])
assert torch.equal(param_groups['params'][2],
param_dict['module.model2.weight'])
assert torch.equal(param_groups['params'][3],
param_dict['module.model2.bias'])
| 4,279 | 40.960784 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_runtime/test_visual_hook.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
from unittest.mock import MagicMock
import mmcv
import numpy as np
import pytest
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
from mmedit.core import VisualizationHook
from mmedit.utils import get_root_logger
class ExampleDataset(Dataset):
def __getitem__(self, idx):
img = torch.zeros((3, 10, 10))
img[:, 2:9, :] = 1.
results = dict(imgs=img)
return results
def __len__(self):
return 1
class ExampleModel(nn.Module):
def __init__(self):
super().__init__()
self.test_cfg = None
def train_step(self, data_batch, optimizer):
output = dict(results=dict(img=data_batch['imgs']))
return output
def test_visual_hook():
with pytest.raises(
AssertionError), tempfile.TemporaryDirectory() as tmpdir:
VisualizationHook(tmpdir, [1, 2, 3])
test_dataset = ExampleDataset()
test_dataset.evaluate = MagicMock(return_value=dict(test='success'))
img = torch.zeros((1, 3, 10, 10))
img[:, :, 2:9, :] = 1.
model = ExampleModel()
data_loader = DataLoader(
test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False)
with tempfile.TemporaryDirectory() as tmpdir:
visual_hook = VisualizationHook(
tmpdir, ['img'], interval=8, rerange=False)
runner = mmcv.runner.IterBasedRunner(
model=model,
optimizer=None,
work_dir=tmpdir,
logger=get_root_logger())
runner.register_hook(visual_hook)
runner.run([data_loader], [('train', 10)], 10)
img_saved = mmcv.imread(
osp.join(tmpdir, 'iter_8.png'), flag='unchanged')
np.testing.assert_almost_equal(img_saved,
img[0].permute(1, 2, 0) * 255)
with tempfile.TemporaryDirectory() as tmpdir:
visual_hook = VisualizationHook(
tmpdir, ['img'], interval=8, rerange=True)
runner = mmcv.runner.IterBasedRunner(
model=model,
optimizer=None,
work_dir=tmpdir,
logger=get_root_logger())
runner.register_hook(visual_hook)
runner.run([data_loader], [('train', 10)], 10)
img_saved = mmcv.imread(
osp.join(tmpdir, 'iter_8.png'), flag='unchanged')
assert osp.exists(osp.join(tmpdir, 'iter_8.png'))
| 2,480 | 28.891566 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_runtime/test_dataset_builder.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
from torch.utils.data import ConcatDataset, RandomSampler, SequentialSampler
from mmedit.datasets import (DATASETS, RepeatDataset, build_dataloader,
build_dataset)
from mmedit.datasets.samplers import DistributedSampler
@DATASETS.register_module()
class ToyDataset:
def __init__(self, ann_file=None, cnt=0):
self.ann_file = ann_file
self.cnt = cnt
def __item__(self, idx):
return idx
def __len__(self):
return 100
@DATASETS.register_module()
class ToyDatasetWithAnnFile:
def __init__(self, ann_file):
self.ann_file = ann_file
def __item__(self, idx):
return idx
def __len__(self):
return 100
def test_build_dataset():
cfg = dict(type='ToyDataset')
dataset = build_dataset(cfg)
assert isinstance(dataset, ToyDataset)
assert dataset.cnt == 0
# test default_args
dataset = build_dataset(cfg, default_args=dict(cnt=1))
assert isinstance(dataset, ToyDataset)
assert dataset.cnt == 1
# test RepeatDataset
cfg = dict(type='RepeatDataset', dataset=dict(type='ToyDataset'), times=3)
dataset = build_dataset(cfg)
assert isinstance(dataset, RepeatDataset)
assert isinstance(dataset.dataset, ToyDataset)
assert dataset.times == 3
# test when ann_file is a list
cfg = dict(
type='ToyDatasetWithAnnFile', ann_file=['ann_file_a', 'ann_file_b'])
dataset = build_dataset(cfg)
assert isinstance(dataset, ConcatDataset)
assert isinstance(dataset.datasets, list)
assert isinstance(dataset.datasets[0], ToyDatasetWithAnnFile)
assert dataset.datasets[0].ann_file == 'ann_file_a'
assert isinstance(dataset.datasets[1], ToyDatasetWithAnnFile)
assert dataset.datasets[1].ann_file == 'ann_file_b'
# test concat dataset
cfg = (dict(type='ToyDataset'),
dict(type='ToyDatasetWithAnnFile', ann_file='ann_file'))
dataset = build_dataset(cfg)
assert isinstance(dataset, ConcatDataset)
assert isinstance(dataset.datasets, list)
assert isinstance(dataset.datasets[0], ToyDataset)
assert isinstance(dataset.datasets[1], ToyDatasetWithAnnFile)
def test_build_dataloader():
dataset = ToyDataset()
samples_per_gpu = 3
# dist=True, shuffle=True, 1GPU
dataloader = build_dataloader(
dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=2)
assert dataloader.batch_size == samples_per_gpu
assert len(dataloader) == int(math.ceil(len(dataset) / samples_per_gpu))
assert isinstance(dataloader.sampler, DistributedSampler)
assert dataloader.sampler.shuffle
# dist=True, shuffle=False, 1GPU
dataloader = build_dataloader(
dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=2,
shuffle=False)
assert dataloader.batch_size == samples_per_gpu
assert len(dataloader) == int(math.ceil(len(dataset) / samples_per_gpu))
assert isinstance(dataloader.sampler, DistributedSampler)
assert not dataloader.sampler.shuffle
# dist=True, shuffle=True, 8GPU
dataloader = build_dataloader(
dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=2,
num_gpus=8)
assert dataloader.batch_size == samples_per_gpu
assert len(dataloader) == int(math.ceil(len(dataset) / samples_per_gpu))
assert dataloader.num_workers == 2
# dist=False, shuffle=True, 1GPU
dataloader = build_dataloader(
dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=2,
dist=False)
assert dataloader.batch_size == samples_per_gpu
assert len(dataloader) == int(math.ceil(len(dataset) / samples_per_gpu))
assert isinstance(dataloader.sampler, RandomSampler)
assert dataloader.num_workers == 2
# dist=False, shuffle=False, 1GPU
dataloader = build_dataloader(
dataset,
samples_per_gpu=3,
workers_per_gpu=2,
shuffle=False,
dist=False)
assert dataloader.batch_size == samples_per_gpu
assert len(dataloader) == int(math.ceil(len(dataset) / samples_per_gpu))
assert isinstance(dataloader.sampler, SequentialSampler)
assert dataloader.num_workers == 2
# dist=False, shuffle=True, 8GPU
dataloader = build_dataloader(
dataset, samples_per_gpu=3, workers_per_gpu=2, num_gpus=8, dist=False)
assert dataloader.batch_size == samples_per_gpu * 8
assert len(dataloader) == int(
math.ceil(len(dataset) / samples_per_gpu / 8))
assert isinstance(dataloader.sampler, RandomSampler)
assert dataloader.num_workers == 16
| 4,660 | 32.056738 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_runtime/test_eval_hook.py | # Copyright (c) OpenMMLab. All rights reserved.
import logging
import tempfile
from unittest.mock import MagicMock
import mmcv.runner
import pytest
import torch
import torch.nn as nn
from mmcv.runner import obj_from_dict
from torch.utils.data import DataLoader, Dataset
from mmedit.core import EvalIterHook
class ExampleDataset(Dataset):
def __getitem__(self, idx):
results = dict(imgs=torch.tensor([1]))
return results
def __len__(self):
return 1
class ExampleModel(nn.Module):
def __init__(self):
super().__init__()
self.test_cfg = None
self.conv = nn.Conv2d(3, 3, 3)
def forward(self, imgs, test_mode=False, **kwargs):
return imgs
def train_step(self, data_batch, optimizer):
rlt = self.forward(data_batch)
return dict(result=rlt)
def test_eval_hook():
with pytest.raises(TypeError):
test_dataset = ExampleModel()
data_loader = [
DataLoader(
test_dataset,
batch_size=1,
sampler=None,
num_worker=0,
shuffle=False)
]
EvalIterHook(data_loader)
test_dataset = ExampleDataset()
test_dataset.evaluate = MagicMock(return_value=dict(test='success'))
loader = DataLoader(test_dataset, batch_size=1)
model = ExampleModel()
data_loader = DataLoader(
test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False)
eval_hook = EvalIterHook(data_loader)
optim_cfg = dict(type='Adam', lr=2e-4, betas=(0.9, 0.999))
optimizer = obj_from_dict(optim_cfg, torch.optim,
dict(params=model.parameters()))
with tempfile.TemporaryDirectory() as tmpdir:
runner = mmcv.runner.IterBasedRunner(
model=model,
optimizer=optimizer,
work_dir=tmpdir,
logger=logging.getLogger())
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 1)
test_dataset.evaluate.assert_called_with([torch.tensor([1])],
logger=runner.logger)
| 2,150 | 28.067568 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_runtime/test_apis.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmedit.apis.train import init_random_seed, set_random_seed
def test_init_random_seed():
init_random_seed(0, device='cpu')
init_random_seed(device='cpu')
# test on gpu
if torch.cuda.is_available():
init_random_seed(0, device='cuda')
init_random_seed(device='cuda')
def test_set_random_seed():
set_random_seed(0, deterministic=False)
set_random_seed(0, deterministic=True)
| 482 | 24.421053 | 63 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_runtime/test_ema_hook.py | # Copyright (c) OpenMMLab. All rights reserved.
from copy import deepcopy
import pytest
import torch
import torch.nn as nn
from packaging import version
from torch.nn.parallel import DataParallel
from mmedit.core.hooks import ExponentialMovingAverageHook
class SimpleModule(nn.Module):
def __init__(self):
super().__init__()
self.a = nn.Parameter(torch.tensor([1., 2.]))
if version.parse(torch.__version__) >= version.parse('1.7.0'):
self.register_buffer('b', torch.tensor([2., 3.]), persistent=True)
self.register_buffer('c', torch.tensor([0., 1.]), persistent=False)
else:
self.register_buffer('b', torch.tensor([2., 3.]))
self.c = torch.tensor([0., 1.])
class SimpleModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.module_a = SimpleModule()
self.module_b = SimpleModule()
self.module_a_ema = SimpleModule()
self.module_b_ema = SimpleModule()
class SimpleModelNoEMA(nn.Module):
def __init__(self) -> None:
super().__init__()
self.module_a = SimpleModule()
self.module_b = SimpleModule()
class SimpleRunner:
def __init__(self):
self.model = SimpleModel()
self.iter = 0
class TestEMA:
@classmethod
def setup_class(cls):
cls.default_config = dict(
module_keys=('module_a_ema', 'module_b_ema'),
interval=1,
interp_cfg=dict(momentum=0.5))
cls.runner = SimpleRunner()
@torch.no_grad()
def test_ema_hook(self):
cfg_ = deepcopy(self.default_config)
cfg_['interval'] = -1
ema = ExponentialMovingAverageHook(**cfg_)
ema.before_run(self.runner)
ema.after_train_iter(self.runner)
module_a = self.runner.model.module_a
module_a_ema = self.runner.model.module_a_ema
ema_states = module_a_ema.state_dict()
assert torch.equal(ema_states['a'], torch.tensor([1., 2.]))
ema = ExponentialMovingAverageHook(**self.default_config)
ema.after_train_iter(self.runner)
ema_states = module_a_ema.state_dict()
assert torch.equal(ema_states['a'], torch.tensor([1., 2.]))
module_a.b /= 2.
module_a.a.data /= 2.
module_a.c /= 2.
self.runner.iter += 1
ema.after_train_iter(self.runner)
ema_states = module_a_ema.state_dict()
assert torch.equal(self.runner.model.module_a.a,
torch.tensor([0.5, 1.]))
assert torch.equal(ema_states['a'], torch.tensor([0.75, 1.5]))
assert torch.equal(ema_states['b'], torch.tensor([1., 1.5]))
assert 'c' not in ema_states
# check for the validity of args
with pytest.raises(AssertionError):
_ = ExponentialMovingAverageHook(module_keys=['a'])
with pytest.raises(AssertionError):
_ = ExponentialMovingAverageHook(module_keys=('a'))
with pytest.raises(AssertionError):
_ = ExponentialMovingAverageHook(
module_keys=('module_a_ema'), interp_mode='xxx')
# test before run
ema = ExponentialMovingAverageHook(**self.default_config)
self.runner.model = SimpleModelNoEMA()
self.runner.iter = 0
ema.before_run(self.runner)
assert hasattr(self.runner.model, 'module_a_ema')
module_a = self.runner.model.module_a
module_a_ema = self.runner.model.module_a_ema
ema.after_train_iter(self.runner)
ema_states = module_a_ema.state_dict()
assert torch.equal(ema_states['a'], torch.tensor([1., 2.]))
module_a.b /= 2.
module_a.a.data /= 2.
module_a.c /= 2.
self.runner.iter += 1
ema.after_train_iter(self.runner)
ema_states = module_a_ema.state_dict()
assert torch.equal(self.runner.model.module_a.a,
torch.tensor([0.5, 1.]))
assert torch.equal(ema_states['a'], torch.tensor([0.75, 1.5]))
assert torch.equal(ema_states['b'], torch.tensor([1., 1.5]))
assert 'c' not in ema_states
# test ema with simple warm up
runner = SimpleRunner()
cfg_ = deepcopy(self.default_config)
cfg_.update(dict(start_iter=3, interval=1))
ema = ExponentialMovingAverageHook(**cfg_)
ema.before_run(runner)
module_a = runner.model.module_a
module_a_ema = runner.model.module_a_ema
module_a.a.data /= 2.
runner.iter += 1
ema.after_train_iter(runner)
ema_states = module_a_ema.state_dict()
assert torch.equal(runner.model.module_a.a, torch.tensor([0.5, 1.]))
assert torch.equal(ema_states['a'], torch.tensor([0.5, 1.]))
module_a.a.data /= 2
runner.iter += 2
ema.after_train_iter(runner)
ema_states = module_a_ema.state_dict()
assert torch.equal(runner.model.module_a.a, torch.tensor([0.25, 0.5]))
assert torch.equal(ema_states['a'], torch.tensor([0.375, 0.75]))
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_ema_hook_cuda(self):
ema = ExponentialMovingAverageHook(**self.default_config)
cuda_runner = SimpleRunner()
cuda_runner.model = cuda_runner.model.cuda()
ema.after_train_iter(cuda_runner)
module_a = cuda_runner.model.module_a
module_a_ema = cuda_runner.model.module_a_ema
ema_states = module_a_ema.state_dict()
assert torch.equal(ema_states['a'], torch.tensor([1., 2.]).cuda())
module_a.b /= 2.
module_a.a.data /= 2.
module_a.c /= 2.
cuda_runner.iter += 1
ema.after_train_iter(cuda_runner)
ema_states = module_a_ema.state_dict()
assert torch.equal(cuda_runner.model.module_a.a,
torch.tensor([0.5, 1.]).cuda())
assert torch.equal(ema_states['a'], torch.tensor([0.75, 1.5]).cuda())
assert torch.equal(ema_states['b'], torch.tensor([1., 1.5]).cuda())
assert 'c' not in ema_states
# test before run
ema = ExponentialMovingAverageHook(**self.default_config)
self.runner.model = SimpleModelNoEMA().cuda()
self.runner.model = DataParallel(self.runner.model)
self.runner.iter = 0
ema.before_run(self.runner)
assert hasattr(self.runner.model.module, 'module_a_ema')
module_a = self.runner.model.module.module_a
module_a_ema = self.runner.model.module.module_a_ema
ema.after_train_iter(self.runner)
ema_states = module_a_ema.state_dict()
assert torch.equal(ema_states['a'], torch.tensor([1., 2.]).cuda())
module_a.b /= 2.
module_a.a.data /= 2.
module_a.c /= 2.
self.runner.iter += 1
ema.after_train_iter(self.runner)
ema_states = module_a_ema.state_dict()
assert torch.equal(self.runner.model.module.module_a.a,
torch.tensor([0.5, 1.]).cuda())
assert torch.equal(ema_states['a'], torch.tensor([0.75, 1.5]).cuda())
assert torch.equal(ema_states['b'], torch.tensor([1., 1.5]).cuda())
assert 'c' not in ema_states
# test ema with simple warm up
runner = SimpleRunner()
runner.model = runner.model.cuda()
cfg_ = deepcopy(self.default_config)
cfg_.update(dict(start_iter=3, interval=1))
ema = ExponentialMovingAverageHook(**cfg_)
ema.before_run(runner)
module_a = runner.model.module_a
module_a_ema = runner.model.module_a_ema
module_a.a.data /= 2.
runner.iter += 1
ema.after_train_iter(runner)
ema_states = module_a_ema.state_dict()
assert torch.equal(runner.model.module_a.a,
torch.tensor([0.5, 1.]).cuda())
assert torch.equal(ema_states['a'], torch.tensor([0.5, 1.]).cuda())
module_a.a.data /= 2
runner.iter += 2
ema.after_train_iter(runner)
ema_states = module_a_ema.state_dict()
assert torch.equal(runner.model.module_a.a,
torch.tensor([0.25, 0.5]).cuda())
assert torch.equal(ema_states['a'], torch.tensor([0.375, 0.75]).cuda())
| 8,288 | 33.682008 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_base_model.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest.mock import patch
import pytest
import torch
from mmedit.models import BaseModel
class TestBaseModel(unittest.TestCase):
@patch.multiple(BaseModel, __abstractmethods__=set())
def test_parse_losses(self):
self.base_model = BaseModel()
with pytest.raises(TypeError):
losses = dict(loss=0.5)
self.base_model.parse_losses(losses)
a_loss = [torch.randn(5, 5), torch.randn(5, 5)]
b_loss = torch.randn(5, 5)
losses = dict(a_loss=a_loss, b_loss=b_loss)
r_a_loss = sum(_loss.mean() for _loss in a_loss)
r_b_loss = b_loss.mean()
r_loss = [r_a_loss, r_b_loss]
r_loss = sum(r_loss)
loss, log_vars = self.base_model.parse_losses(losses)
assert r_loss == loss
assert set(log_vars.keys()) == set(['a_loss', 'b_loss', 'loss'])
assert log_vars['a_loss'] == r_a_loss
assert log_vars['b_loss'] == r_b_loss
assert log_vars['loss'] == r_loss
| 1,059 | 29.285714 | 72 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_backbones/test_generation_backbones/test_generators.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import numpy as np
import pytest
import torch
from mmedit.models import build_backbone
from mmedit.models.common import (ResidualBlockWithDropout,
UnetSkipConnectionBlock)
def test_unet_skip_connection_block():
_cfg = dict(
outer_channels=1,
inner_channels=1,
in_channels=None,
submodule=None,
is_outermost=False,
is_innermost=False,
norm_cfg=dict(type='BN'),
use_dropout=True)
feature_shape = (1, 1, 8, 8)
feature = _demo_inputs(feature_shape)
input_shape = (1, 3, 8, 8)
img = _demo_inputs(input_shape)
# innermost
cfg = copy.deepcopy(_cfg)
cfg['is_innermost'] = True
block = UnetSkipConnectionBlock(**cfg)
# cpu
output = block(feature)
assert output.shape == (1, 2, 8, 8)
# gpu
if torch.cuda.is_available():
block.cuda()
output = block(feature.cuda())
assert output.shape == (1, 2, 8, 8)
block.cpu()
# intermediate
cfg = copy.deepcopy(_cfg)
cfg['submodule'] = block
block = UnetSkipConnectionBlock(**cfg)
# cpu
output = block(feature)
assert output.shape == (1, 2, 8, 8)
# gpu
if torch.cuda.is_available():
block.cuda()
output = block(feature.cuda())
assert output.shape == (1, 2, 8, 8)
block.cpu()
# outermost
cfg = copy.deepcopy(_cfg)
cfg['submodule'] = block
cfg['is_outermost'] = True
cfg['in_channels'] = 3
cfg['outer_channels'] = 3
block = UnetSkipConnectionBlock(**cfg)
# cpu
output = block(img)
assert output.shape == (1, 3, 8, 8)
# gpu
if torch.cuda.is_available():
block.cuda()
output = block(img.cuda())
assert output.shape == (1, 3, 8, 8)
block.cpu()
# test cannot be both innermost and outermost
cfg = copy.deepcopy(_cfg)
cfg['is_innermost'] = True
cfg['is_outermost'] = True
with pytest.raises(AssertionError):
_ = UnetSkipConnectionBlock(**cfg)
# test norm_cfg assertions
bad_cfg = copy.deepcopy(_cfg)
bad_cfg['is_innermost'] = True
bad_cfg['norm_cfg'] = None
with pytest.raises(AssertionError):
_ = UnetSkipConnectionBlock(**bad_cfg)
bad_cfg['norm_cfg'] = dict(tp='BN')
with pytest.raises(AssertionError):
_ = UnetSkipConnectionBlock(**bad_cfg)
def test_unet_generator():
# color to color
cfg = dict(
type='UnetGenerator',
in_channels=3,
out_channels=3,
num_down=8,
base_channels=64,
norm_cfg=dict(type='BN'),
use_dropout=True,
init_cfg=dict(type='normal', gain=0.02))
net = build_backbone(cfg)
net.init_weights(pretrained=None)
# cpu
input_shape = (1, 3, 256, 256)
img = _demo_inputs(input_shape)
output = net(img)
assert output.shape == (1, 3, 256, 256)
# gpu
if torch.cuda.is_available():
net = net.cuda()
output = net(img.cuda())
assert output.shape == (1, 3, 256, 256)
# gray to color
cfg = dict(
type='UnetGenerator',
in_channels=1,
out_channels=3,
num_down=8,
base_channels=64,
norm_cfg=dict(type='BN'),
use_dropout=True,
init_cfg=dict(type='normal', gain=0.02))
net = build_backbone(cfg)
net.init_weights(pretrained=None)
# cpu
input_shape = (1, 1, 256, 256)
img = _demo_inputs(input_shape)
output = net(img)
assert output.shape == (1, 3, 256, 256)
# gpu
if torch.cuda.is_available():
net = net.cuda()
output = net(img.cuda())
assert output.shape == (1, 3, 256, 256)
# color to gray
cfg = dict(
type='UnetGenerator',
in_channels=3,
out_channels=1,
num_down=8,
base_channels=64,
norm_cfg=dict(type='BN'),
use_dropout=True,
init_cfg=dict(type='normal', gain=0.02))
net = build_backbone(cfg)
net.init_weights(pretrained=None)
# cpu
input_shape = (1, 3, 256, 256)
img = _demo_inputs(input_shape)
output = net(img)
assert output.shape == (1, 1, 256, 256)
# gpu
if torch.cuda.is_available():
net = net.cuda()
output = net(img.cuda())
assert output.shape == (1, 1, 256, 256)
# pretrained should be str or None
with pytest.raises(TypeError):
net.init_weights(pretrained=[1])
# test norm_cfg assertions
bad_cfg = copy.deepcopy(cfg)
bad_cfg['norm_cfg'] = None
with pytest.raises(AssertionError):
_ = build_backbone(bad_cfg)
bad_cfg['norm_cfg'] = dict(tp='BN')
with pytest.raises(AssertionError):
_ = build_backbone(bad_cfg)
def test_residual_block_with_dropout():
_cfg = dict(
channels=3,
padding_mode='reflect',
norm_cfg=dict(type='BN'),
use_dropout=True)
feature_shape = (1, 3, 32, 32)
feature = _demo_inputs(feature_shape)
# reflect padding, BN, use_dropout=True
block = ResidualBlockWithDropout(**_cfg)
# cpu
output = block(feature)
assert output.shape == (1, 3, 32, 32)
# gpu
if torch.cuda.is_available():
block = block.cuda()
output = block(feature.cuda())
assert output.shape == (1, 3, 32, 32)
# test other padding types
# replicate padding
cfg = copy.deepcopy(_cfg)
cfg['padding_mode'] = 'replicate'
block = ResidualBlockWithDropout(**cfg)
# cpu
output = block(feature)
assert output.shape == (1, 3, 32, 32)
# gpu
if torch.cuda.is_available():
block = block.cuda()
output = block(feature.cuda())
assert output.shape == (1, 3, 32, 32)
# zero padding
cfg = copy.deepcopy(_cfg)
cfg['padding_mode'] = 'zeros'
block = ResidualBlockWithDropout(**cfg)
# cpu
output = block(feature)
assert output.shape == (1, 3, 32, 32)
# gpu
if torch.cuda.is_available():
block = block.cuda()
output = block(feature.cuda())
assert output.shape == (1, 3, 32, 32)
# not implemented padding
cfg = copy.deepcopy(_cfg)
cfg['padding_mode'] = 'abc'
with pytest.raises(KeyError):
block = ResidualBlockWithDropout(**cfg)
# test other norm
cfg = copy.deepcopy(_cfg)
cfg['norm_cfg'] = dict(type='IN')
block = ResidualBlockWithDropout(**cfg)
# cpu
output = block(feature)
assert output.shape == (1, 3, 32, 32)
# gpu
if torch.cuda.is_available():
block = block.cuda()
output = block(feature.cuda())
assert output.shape == (1, 3, 32, 32)
# test use_dropout=False
cfg = copy.deepcopy(_cfg)
cfg['use_dropout'] = False
block = ResidualBlockWithDropout(**cfg)
# cpu
output = block(feature)
assert output.shape == (1, 3, 32, 32)
# gpu
if torch.cuda.is_available():
block = block.cuda()
output = block(feature.cuda())
assert output.shape == (1, 3, 32, 32)
# test norm_cfg assertions
bad_cfg = copy.deepcopy(_cfg)
bad_cfg['norm_cfg'] = None
with pytest.raises(AssertionError):
_ = ResidualBlockWithDropout(**bad_cfg)
bad_cfg['norm_cfg'] = dict(tp='BN')
with pytest.raises(AssertionError):
_ = ResidualBlockWithDropout(**bad_cfg)
def test_resnet_generator():
# color to color
cfg = dict(
type='ResnetGenerator',
in_channels=3,
out_channels=3,
base_channels=64,
norm_cfg=dict(type='IN'),
use_dropout=False,
num_blocks=9,
padding_mode='reflect',
init_cfg=dict(type='normal', gain=0.02))
net = build_backbone(cfg)
net.init_weights(pretrained=None)
# cpu
input_shape = (1, 3, 256, 256)
img = _demo_inputs(input_shape)
output = net(img)
assert output.shape == (1, 3, 256, 256)
# gpu
if torch.cuda.is_available():
net = net.cuda()
output = net(img.cuda())
assert output.shape == (1, 3, 256, 256)
# gray to color
cfg = dict(
type='ResnetGenerator',
in_channels=1,
out_channels=3,
base_channels=64,
norm_cfg=dict(type='IN'),
use_dropout=False,
num_blocks=9,
padding_mode='reflect',
init_cfg=dict(type='normal', gain=0.02))
net = build_backbone(cfg)
net.init_weights(pretrained=None)
# cpu
input_shape = (1, 1, 256, 256)
img = _demo_inputs(input_shape)
output = net(img)
assert output.shape == (1, 3, 256, 256)
# gpu
if torch.cuda.is_available():
net = net.cuda()
output = net(img.cuda())
assert output.shape == (1, 3, 256, 256)
# color to gray
cfg = dict(
type='ResnetGenerator',
in_channels=3,
out_channels=1,
base_channels=64,
norm_cfg=dict(type='IN'),
use_dropout=False,
num_blocks=9,
padding_mode='reflect',
init_cfg=dict(type='normal', gain=0.02))
net = build_backbone(cfg)
net.init_weights(pretrained=None)
# cpu
input_shape = (1, 3, 256, 256)
img = _demo_inputs(input_shape)
output = net(img)
assert output.shape == (1, 1, 256, 256)
# gpu
if torch.cuda.is_available():
net = net.cuda()
output = net(img.cuda())
assert output.shape == (1, 1, 256, 256)
# test num_blocks non-negative
bad_cfg = copy.deepcopy(cfg)
bad_cfg['num_blocks'] = -1
with pytest.raises(AssertionError):
net = build_backbone(bad_cfg)
# pretrained should be str or None
with pytest.raises(TypeError):
net.init_weights(pretrained=[1])
# test norm_cfg assertions
bad_cfg = copy.deepcopy(cfg)
bad_cfg['norm_cfg'] = None
with pytest.raises(AssertionError):
_ = build_backbone(bad_cfg)
bad_cfg['norm_cfg'] = dict(tp='IN')
with pytest.raises(AssertionError):
_ = build_backbone(bad_cfg)
def _demo_inputs(input_shape=(1, 3, 64, 64)):
"""Create a superset of inputs needed to run backbone.
Args:
input_shape (tuple): input batch dimensions.
Default: (1, 3, 64, 64).
Returns:
imgs: (Tensor): Images in FloatTensor with desired shapes.
"""
imgs = np.random.random(input_shape)
imgs = torch.FloatTensor(imgs)
return imgs
| 10,368 | 27.17663 | 66 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_backbones/test_sr_backbones/test_real_basicvsr_net.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmedit.models.backbones.sr_backbones.real_basicvsr_net import \
RealBasicVSRNet
def test_real_basicvsr_net():
"""Test RealBasicVSR."""
# cpu
# is_fix_cleaning = False
real_basicvsr = RealBasicVSRNet(is_fix_cleaning=False)
# is_sequential_cleaning = False
real_basicvsr = RealBasicVSRNet(
is_fix_cleaning=True, is_sequential_cleaning=False)
input_tensor = torch.rand(1, 5, 3, 64, 64)
real_basicvsr.init_weights(pretrained=None)
output = real_basicvsr(input_tensor)
assert output.shape == (1, 5, 3, 256, 256)
# is_sequential_cleaning = True, return_lq = True
real_basicvsr = RealBasicVSRNet(
is_fix_cleaning=True, is_sequential_cleaning=True)
output, lq = real_basicvsr(input_tensor, return_lqs=True)
assert output.shape == (1, 5, 3, 256, 256)
assert lq.shape == (1, 5, 3, 64, 64)
with pytest.raises(TypeError):
# pretrained should be str or None
real_basicvsr.init_weights(pretrained=[1])
# gpu
if torch.cuda.is_available():
# is_fix_cleaning = False
real_basicvsr = RealBasicVSRNet(is_fix_cleaning=False).cuda()
# is_sequential_cleaning = False
real_basicvsr = RealBasicVSRNet(
is_fix_cleaning=True, is_sequential_cleaning=False).cuda()
input_tensor = torch.rand(1, 5, 3, 64, 64).cuda()
real_basicvsr.init_weights(pretrained=None)
output = real_basicvsr(input_tensor)
assert output.shape == (1, 5, 3, 256, 256)
# is_sequential_cleaning = True, return_lq = True
real_basicvsr = RealBasicVSRNet(
is_fix_cleaning=True, is_sequential_cleaning=True).cuda()
output, lq = real_basicvsr(input_tensor, return_lqs=True)
assert output.shape == (1, 5, 3, 256, 256)
assert lq.shape == (1, 5, 3, 64, 64)
with pytest.raises(TypeError):
# pretrained should be str or None
real_basicvsr.init_weights(pretrained=[1])
| 2,058 | 34.5 | 70 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_backbones/test_sr_backbones/test_sr_backbones.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmedit.models.backbones import EDSR, SRCNN, MSRResNet, RRDBNet
from mmedit.models.components import ModifiedVGG
def test_srresnet_backbone():
"""Test SRResNet backbone."""
# x2 model
MSRResNet(
in_channels=3,
out_channels=3,
mid_channels=8,
num_blocks=2,
upscale_factor=2)
# x3 model, initialization and forward (cpu)
net = MSRResNet(
in_channels=3,
out_channels=3,
mid_channels=8,
num_blocks=2,
upscale_factor=3)
net.init_weights(pretrained=None)
input_shape = (1, 3, 12, 12)
img = _demo_inputs(input_shape)
output = net(img)
assert output.shape == (1, 3, 36, 36)
# x4 modeland, initialization and forward (cpu)
net = MSRResNet(
in_channels=3,
out_channels=3,
mid_channels=8,
num_blocks=2,
upscale_factor=4)
net.init_weights(pretrained=None)
output = net(img)
assert output.shape == (1, 3, 48, 48)
# x4 model forward (gpu)
if torch.cuda.is_available():
net = net.cuda()
output = net(img.cuda())
assert output.shape == (1, 3, 48, 48)
with pytest.raises(TypeError):
# pretrained should be str or None
net.init_weights(pretrained=[1])
with pytest.raises(ValueError):
# Currently supported upscale_factor is [2, 3, 4]
MSRResNet(
in_channels=3,
out_channels=3,
mid_channels=64,
num_blocks=16,
upscale_factor=16)
def test_edsr():
"""Test EDSR."""
# x2 model
EDSR(
in_channels=3,
out_channels=3,
mid_channels=8,
num_blocks=2,
upscale_factor=2)
# x3 model, initialization and forward (cpu)
net = EDSR(
in_channels=3,
out_channels=3,
mid_channels=8,
num_blocks=2,
upscale_factor=3)
net.init_weights(pretrained=None)
input_shape = (1, 3, 12, 12)
img = _demo_inputs(input_shape)
output = net(img)
assert output.shape == (1, 3, 36, 36)
# x4 modeland, initialization and forward (cpu)
net = EDSR(
in_channels=3,
out_channels=3,
mid_channels=8,
num_blocks=2,
upscale_factor=4)
net.init_weights(pretrained=None)
output = net(img)
assert output.shape == (1, 3, 48, 48)
# gray x4 modeland, initialization and forward (cpu)
net = EDSR(
in_channels=1,
out_channels=1,
mid_channels=8,
num_blocks=2,
upscale_factor=4,
rgb_mean=[0],
rgb_std=[1])
net.init_weights(pretrained=None)
gray = _demo_inputs((1, 1, 12, 12))
output = net(gray)
assert output.shape == (1, 1, 48, 48)
# x4 model forward (gpu)
if torch.cuda.is_available():
net = net.cuda()
output = net(gray.cuda())
assert output.shape == (1, 1, 48, 48)
with pytest.raises(TypeError):
# pretrained should be str or None
net.init_weights(pretrained=[1])
with pytest.raises(ValueError):
# Currently supported upscale_factor is 2^n and 3
EDSR(
in_channels=3,
out_channels=3,
mid_channels=64,
num_blocks=16,
upscale_factor=5)
def test_discriminator():
"""Test discriminator backbone."""
# model, initialization and forward (cpu)
net = ModifiedVGG(in_channels=3, mid_channels=64)
net.init_weights(pretrained=None)
input_shape = (1, 3, 128, 128)
img = _demo_inputs(input_shape)
output = net(img)
assert output.shape == (1, 1)
# model, initialization and forward (gpu)
if torch.cuda.is_available():
net.init_weights(pretrained=None)
net.cuda()
output = net(img.cuda())
assert output.shape == (1, 1)
with pytest.raises(TypeError):
# pretrained should be str or None
net.init_weights(pretrained=[1])
with pytest.raises(AssertionError):
# input size must be 128 * 128
input_shape = (1, 3, 64, 64)
img = _demo_inputs(input_shape)
output = net(img)
def test_rrdbnet_backbone():
"""Test RRDBNet backbone."""
# model, initialization and forward (cpu)
# x4 model
net = RRDBNet(
in_channels=3,
out_channels=3,
mid_channels=8,
num_blocks=2,
growth_channels=4,
upscale_factor=4)
net.init_weights(pretrained=None)
input_shape = (1, 3, 12, 12)
img = _demo_inputs(input_shape)
output = net(img)
assert output.shape == (1, 3, 48, 48)
# x3 model
with pytest.raises(ValueError):
net = RRDBNet(
in_channels=3,
out_channels=3,
mid_channels=8,
num_blocks=2,
growth_channels=4,
upscale_factor=3)
# x2 model
net = RRDBNet(
in_channels=3,
out_channels=3,
mid_channels=8,
num_blocks=2,
growth_channels=4,
upscale_factor=2)
net.init_weights(pretrained=None)
input_shape = (1, 3, 12, 12)
img = _demo_inputs(input_shape)
output = net(img)
assert output.shape == (1, 3, 24, 24)
# model forward (gpu)
if torch.cuda.is_available():
net = net.cuda()
output = net(img.cuda())
assert output.shape == (1, 3, 24, 24)
with pytest.raises(TypeError):
# pretrained should be str or None
net.init_weights(pretrained=[1])
def test_srcnn():
# model, initialization and forward (cpu)
net = SRCNN(
channels=(3, 4, 6, 3), kernel_sizes=(9, 1, 5), upscale_factor=4)
net.init_weights(pretrained=None)
input_shape = (1, 3, 4, 4)
img = _demo_inputs(input_shape)
output = net(img)
assert output.shape == (1, 3, 16, 16)
net = SRCNN(
channels=(1, 4, 8, 1), kernel_sizes=(3, 3, 3), upscale_factor=2)
net.init_weights(pretrained=None)
input_shape = (1, 1, 4, 4)
img = _demo_inputs(input_shape)
output = net(img)
assert output.shape == (1, 1, 8, 8)
# model forward (gpu)
if torch.cuda.is_available():
net = net.cuda()
output = net(img.cuda())
assert output.shape == (1, 1, 8, 8)
with pytest.raises(AssertionError):
# The length of channel tuple should be 4
net = SRCNN(
channels=(3, 4, 3), kernel_sizes=(9, 1, 5), upscale_factor=4)
with pytest.raises(AssertionError):
# The length of kernel tuple should be 3
net = SRCNN(
channels=(3, 4, 4, 3), kernel_sizes=(9, 1, 1, 5), upscale_factor=4)
with pytest.raises(TypeError):
# pretrained should be str or None
net.init_weights(pretrained=[1])
def _demo_inputs(input_shape=(1, 3, 64, 64)):
"""Create a superset of inputs needed to run backbone.
Args:
input_shape (tuple): input batch dimensions.
Default: (1, 3, 64, 64).
Returns:
imgs: (Tensor): Images in FloatTensor with desired shapes.
"""
imgs = np.random.random(input_shape)
imgs = torch.FloatTensor(imgs)
return imgs
| 7,196 | 26.469466 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_backbones/test_sr_backbones/test_duf.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmedit.models.backbones.sr_backbones.duf import DynamicUpsamplingFilter
def test_dynamic_upsampling_filter():
"""Test DynamicUpsamplingFilter."""
with pytest.raises(TypeError):
# The type of filter_size must be tuple
DynamicUpsamplingFilter(filter_size=3)
with pytest.raises(ValueError):
# The length of filter size must be 2
DynamicUpsamplingFilter(filter_size=(3, 3, 3))
duf = DynamicUpsamplingFilter(filter_size=(5, 5))
x = torch.rand(1, 3, 4, 4)
filters = torch.rand(1, 25, 16, 4, 4)
output = duf(x, filters)
assert output.shape == (1, 48, 4, 4)
duf = DynamicUpsamplingFilter(filter_size=(3, 3))
x = torch.rand(1, 3, 4, 4)
filters = torch.rand(1, 9, 16, 4, 4)
output = duf(x, filters)
assert output.shape == (1, 48, 4, 4)
# gpu (since it has dcn, only supports gpu testing)
if torch.cuda.is_available():
duf = DynamicUpsamplingFilter(filter_size=(3, 3)).cuda()
x = torch.rand(1, 3, 4, 4).cuda()
filters = torch.rand(1, 9, 16, 4, 4).cuda()
output = duf(x, filters)
assert output.shape == (1, 48, 4, 4)
| 1,223 | 33 | 76 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_backbones/test_sr_backbones/test_liif_net.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmedit.models import build_backbone
def test_liif_edsr():
model_cfg = dict(
type='LIIFEDSR',
encoder=dict(
type='EDSR',
in_channels=3,
out_channels=3,
mid_channels=64,
num_blocks=16),
imnet=dict(
type='MLPRefiner',
in_dim=64,
out_dim=3,
hidden_list=[256, 256, 256, 256]),
local_ensemble=True,
feat_unfold=True,
cell_decode=True,
eval_bsize=30000)
# build model
model = build_backbone(model_cfg)
# test attributes
assert model.__class__.__name__ == 'LIIFEDSR'
# prepare data
inputs = torch.rand(1, 3, 22, 11)
targets = torch.rand(1, 128 * 64, 3)
coord = torch.rand(1, 128 * 64, 2)
cell = torch.rand(1, 128 * 64, 2)
# test on cpu
output = model(inputs, coord, cell)
output = model(inputs, coord, cell, True)
assert torch.is_tensor(output)
assert output.shape == targets.shape
# test on gpu
if torch.cuda.is_available():
model = model.cuda()
inputs = inputs.cuda()
targets = targets.cuda()
coord = coord.cuda()
cell = cell.cuda()
output = model(inputs, coord, cell)
output = model(inputs, coord, cell, True)
assert torch.is_tensor(output)
assert output.shape == targets.shape
def test_liif_rdn():
model_cfg = dict(
type='LIIFRDN',
encoder=dict(
type='RDN',
in_channels=3,
out_channels=3,
mid_channels=64,
num_blocks=16,
upscale_factor=4,
num_layers=8,
channel_growth=64),
imnet=dict(
type='MLPRefiner',
in_dim=64,
out_dim=3,
hidden_list=[256, 256, 256, 256]),
local_ensemble=True,
feat_unfold=True,
cell_decode=True,
eval_bsize=30000)
# build model
model = build_backbone(model_cfg)
# test attributes
assert model.__class__.__name__ == 'LIIFRDN'
# prepare data
inputs = torch.rand(1, 3, 22, 11)
targets = torch.rand(1, 128 * 64, 3)
coord = torch.rand(1, 128 * 64, 2)
cell = torch.rand(1, 128 * 64, 2)
# test on cpu
output = model(inputs, coord, cell)
output = model(inputs, coord, cell, True)
assert torch.is_tensor(output)
assert output.shape == targets.shape
# test on gpu
if torch.cuda.is_available():
model = model.cuda()
inputs = inputs.cuda()
targets = targets.cuda()
coord = coord.cuda()
cell = cell.cuda()
output = model(inputs, coord, cell)
output = model(inputs, coord, cell, True)
assert torch.is_tensor(output)
assert output.shape == targets.shape
| 2,884 | 25.227273 | 49 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_backbones/test_sr_backbones/test_iconvsr.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmedit.models.backbones.sr_backbones.iconvsr import IconVSR
def test_iconvsr():
"""Test IconVSR."""
# gpu (since IconVSR contains DCN, only GPU mode is available)
if torch.cuda.is_available():
iconvsr = IconVSR(
mid_channels=64,
num_blocks=30,
keyframe_stride=5,
padding=2,
spynet_pretrained=None,
edvr_pretrained=None).cuda()
input_tensor = torch.rand(1, 5, 3, 64, 64).cuda()
iconvsr.init_weights(pretrained=None)
output = iconvsr(input_tensor)
assert output.shape == (1, 5, 3, 256, 256)
with pytest.raises(AssertionError):
# The height and width of inputs should be at least 64
input_tensor = torch.rand(1, 5, 3, 61, 61)
iconvsr(input_tensor)
with pytest.raises(TypeError):
# pretrained should be str or None
iconvsr.init_weights(pretrained=[1])
# spynet_pretrained should be str or None
with pytest.raises(TypeError):
iconvsr = IconVSR(
mid_channels=64,
num_blocks=30,
keyframe_stride=5,
padding=2,
spynet_pretrained=123,
edvr_pretrained=None).cuda()
# edvr_pretrained should be str or None
with pytest.raises(TypeError):
iconvsr = IconVSR(
mid_channels=64,
num_blocks=30,
keyframe_stride=5,
padding=2,
spynet_pretrained=None,
edvr_pretrained=123).cuda()
| 1,695 | 30.407407 | 66 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_backbones/test_sr_backbones/test_tdan_net.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmedit.models.backbones.sr_backbones.tdan_net import TDANNet
def test_tdan_net():
"""Test TDANNet."""
# gpu (DCN is available only on GPU)
if torch.cuda.is_available():
tdan = TDANNet().cuda()
input_tensor = torch.rand(1, 5, 3, 64, 64).cuda()
tdan.init_weights(pretrained=None)
output = tdan(input_tensor)
assert len(output) == 2 # (1) HR center + (2) aligned LRs
assert output[0].shape == (1, 3, 256, 256) # HR center frame
assert output[1].shape == (1, 5, 3, 64, 64) # aligned LRs
with pytest.raises(TypeError):
# pretrained should be str or None
tdan.init_weights(pretrained=[1])
| 772 | 29.92 | 69 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_backbones/test_sr_backbones/test_basicvsr_net.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmedit.models.backbones.sr_backbones.basicvsr_net import BasicVSRNet
def test_basicvsr_net():
"""Test BasicVSR."""
# cpu
basicvsr = BasicVSRNet(
mid_channels=64, num_blocks=30, spynet_pretrained=None)
input_tensor = torch.rand(1, 5, 3, 64, 64)
basicvsr.init_weights(pretrained=None)
output = basicvsr(input_tensor)
assert output.shape == (1, 5, 3, 256, 256)
# gpu
if torch.cuda.is_available():
basicvsr = BasicVSRNet(
mid_channels=64, num_blocks=30, spynet_pretrained=None).cuda()
input_tensor = torch.rand(1, 5, 3, 64, 64).cuda()
basicvsr.init_weights(pretrained=None)
output = basicvsr(input_tensor)
assert output.shape == (1, 5, 3, 256, 256)
with pytest.raises(AssertionError):
# The height and width of inputs should be at least 64
input_tensor = torch.rand(1, 5, 3, 61, 61)
basicvsr(input_tensor)
with pytest.raises(TypeError):
# pretrained should be str or None
basicvsr.init_weights(pretrained=[1])
| 1,137 | 30.611111 | 74 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_backbones/test_sr_backbones/test_dic_net.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
import torch.nn as nn
from mmedit.models import build_backbone
from mmedit.models.backbones.sr_backbones.dic_net import (
FeedbackBlock, FeedbackBlockCustom, FeedbackBlockHeatmapAttention)
def test_feedback_block():
x1 = torch.rand(2, 16, 32, 32)
model = FeedbackBlock(16, 3, 8)
x2 = model(x1)
assert x2.shape == x1.shape
x3 = model(x2)
assert x3.shape == x2.shape
def test_feedback_block_custom():
x1 = torch.rand(2, 3, 32, 32)
model = FeedbackBlockCustom(3, 16, 3, 8)
x2 = model(x1)
assert x2.shape == (2, 16, 32, 32)
def test_feedback_block_heatmap_attention():
x1 = torch.rand(2, 16, 32, 32)
heatmap = torch.rand(2, 5, 32, 32)
model = FeedbackBlockHeatmapAttention(16, 2, 8, 5, 2)
x2 = model(x1, heatmap)
assert x2.shape == x1.shape
x3 = model(x2, heatmap)
assert x3.shape == x2.shape
def test_dic_net():
model_cfg = dict(
type='DICNet',
in_channels=3,
out_channels=3,
mid_channels=48,
num_blocks=6,
hg_mid_channels=256,
hg_num_keypoints=68,
num_steps=4,
upscale_factor=8,
detach_attention=False)
# build model
model = build_backbone(model_cfg)
# test attributes
assert model.__class__.__name__ == 'DICNet'
# prepare data
inputs = torch.rand(1, 3, 16, 16)
targets = torch.rand(1, 3, 128, 128)
# prepare loss
loss_function = nn.L1Loss()
# prepare optimizer
optimizer = torch.optim.Adam(model.parameters())
# test on cpu
output, _ = model(inputs)
optimizer.zero_grad()
loss = loss_function(output[-1], targets)
loss.backward()
optimizer.step()
assert len(output) == 4
assert torch.is_tensor(output[-1])
assert output[-1].shape == targets.shape
# test on gpu
if torch.cuda.is_available():
model = model.cuda()
optimizer = torch.optim.Adam(model.parameters())
inputs = inputs.cuda()
targets = targets.cuda()
output, _ = model(inputs)
optimizer.zero_grad()
loss = loss_function(output[-1], targets)
loss.backward()
optimizer.step()
assert len(output) == 4
assert torch.is_tensor(output[-1])
assert output[-1].shape == targets.shape
with pytest.raises(OSError):
model.init_weights('')
with pytest.raises(TypeError):
model.init_weights(1)
| 2,496 | 24.222222 | 70 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_backbones/test_sr_backbones/test_glean_net.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmedit.models.backbones.sr_backbones.glean_styleganv2 import \
GLEANStyleGANv2
class TestGLEANNet:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(in_size=16, out_size=256, style_channels=512)
cls.size_cfg = dict(in_size=16, out_size=16, style_channels=512)
def test_glean_styleganv2_cpu(self):
# test default config
glean = GLEANStyleGANv2(**self.default_cfg)
img = torch.randn(2, 3, 16, 16)
res = glean(img)
assert res.shape == (2, 3, 256, 256)
with pytest.raises(TypeError):
# pretrained should be str or None
glean.init_weights(pretrained=[1])
# input tensor size must equal self.in_size
with pytest.raises(AssertionError):
res = glean(torch.randn(2, 3, 17, 32))
# input size must be strictly smaller than output size
with pytest.raises(ValueError):
glean = GLEANStyleGANv2(**self.size_cfg)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_glean_styleganv2_cuda(self):
# test default config
glean = GLEANStyleGANv2(**self.default_cfg).cuda()
img = torch.randn(2, 3, 16, 16).cuda()
res = glean(img)
assert res.shape == (2, 3, 256, 256)
with pytest.raises(TypeError):
# pretrained should be str or None
glean.init_weights(pretrained=[1])
# input tensor size must equal self.in_size
with pytest.raises(AssertionError):
res = glean(torch.randn(2, 3, 32, 17).cuda())
# input size must be strictly smaller than output size
with pytest.raises(ValueError):
glean = GLEANStyleGANv2(**self.size_cfg).cuda()
| 1,834 | 32.981481 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_backbones/test_sr_backbones/test_edvr_net.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmedit.models.backbones.sr_backbones.edvr_net import (EDVRNet,
PCDAlignment,
TSAFusion)
def test_pcd_alignment():
"""Test PCDAlignment."""
# cpu
pcd_alignment = PCDAlignment(mid_channels=4, deform_groups=2)
input_list = []
for i in range(3, 0, -1):
input_list.append(torch.rand(1, 4, 2**i, 2**i))
pcd_alignment = pcd_alignment
input_list = [v for v in input_list]
output = pcd_alignment(input_list, input_list)
assert output.shape == (1, 4, 8, 8)
with pytest.raises(AssertionError):
pcd_alignment(input_list[0:2], input_list)
# gpu
if torch.cuda.is_available():
pcd_alignment = PCDAlignment(mid_channels=4, deform_groups=2)
input_list = []
for i in range(3, 0, -1):
input_list.append(torch.rand(1, 4, 2**i, 2**i))
pcd_alignment = pcd_alignment.cuda()
input_list = [v.cuda() for v in input_list]
output = pcd_alignment(input_list, input_list)
assert output.shape == (1, 4, 8, 8)
with pytest.raises(AssertionError):
pcd_alignment(input_list[0:2], input_list)
def test_tsa_fusion():
"""Test TSAFusion."""
# cpu
tsa_fusion = TSAFusion(mid_channels=4, num_frames=5, center_frame_idx=2)
input_tensor = torch.rand(1, 5, 4, 8, 8)
output = tsa_fusion(input_tensor)
assert output.shape == (1, 4, 8, 8)
# gpu
if torch.cuda.is_available():
tsa_fusion = tsa_fusion.cuda()
input_tensor = input_tensor.cuda()
output = tsa_fusion(input_tensor)
assert output.shape == (1, 4, 8, 8)
def test_edvrnet():
"""Test EDVRNet."""
# cpu
# with tsa
edvrnet = EDVRNet(
3,
3,
mid_channels=8,
num_frames=5,
deform_groups=2,
num_blocks_extraction=1,
num_blocks_reconstruction=1,
center_frame_idx=2,
with_tsa=True)
input_tensor = torch.rand(1, 5, 3, 8, 8)
edvrnet.init_weights(pretrained=None)
output = edvrnet(input_tensor)
assert output.shape == (1, 3, 32, 32)
# without tsa
edvrnet = EDVRNet(
3,
3,
mid_channels=8,
num_frames=5,
deform_groups=2,
num_blocks_extraction=1,
num_blocks_reconstruction=1,
center_frame_idx=2,
with_tsa=False)
output = edvrnet(input_tensor)
assert output.shape == (1, 3, 32, 32)
with pytest.raises(AssertionError):
# The height and width of inputs should be a multiple of 4
input_tensor = torch.rand(1, 5, 3, 3, 3)
edvrnet(input_tensor)
with pytest.raises(TypeError):
# pretrained should be str or None
edvrnet.init_weights(pretrained=[1])
# gpu
if torch.cuda.is_available():
# with tsa
edvrnet = EDVRNet(
3,
3,
mid_channels=8,
num_frames=5,
deform_groups=2,
num_blocks_extraction=1,
num_blocks_reconstruction=1,
center_frame_idx=2,
with_tsa=True).cuda()
input_tensor = torch.rand(1, 5, 3, 8, 8).cuda()
edvrnet.init_weights(pretrained=None)
output = edvrnet(input_tensor)
assert output.shape == (1, 3, 32, 32)
# without tsa
edvrnet = EDVRNet(
3,
3,
mid_channels=8,
num_frames=5,
deform_groups=2,
num_blocks_extraction=1,
num_blocks_reconstruction=1,
center_frame_idx=2,
with_tsa=False).cuda()
output = edvrnet(input_tensor)
assert output.shape == (1, 3, 32, 32)
with pytest.raises(AssertionError):
# The height and width of inputs should be a multiple of 4
input_tensor = torch.rand(1, 5, 3, 3, 3).cuda()
edvrnet(input_tensor)
with pytest.raises(TypeError):
# pretrained should be str or None
edvrnet.init_weights(pretrained=[1])
| 4,187 | 27.489796 | 76 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_backbones/test_sr_backbones/test_tof.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmedit.models.backbones import TOFlow
def test_tof():
"""Test TOFlow."""
# cpu
tof = TOFlow(adapt_official_weights=True)
input_tensor = torch.rand(1, 7, 3, 32, 32)
tof.init_weights(pretrained=None)
output = tof(input_tensor)
assert output.shape == (1, 3, 32, 32)
tof = TOFlow(adapt_official_weights=False)
tof.init_weights(pretrained=None)
output = tof(input_tensor)
assert output.shape == (1, 3, 32, 32)
with pytest.raises(TypeError):
# pretrained should be str or None
tof.init_weights(pretrained=[1])
# gpu
if torch.cuda.is_available():
tof = TOFlow(adapt_official_weights=True).cuda()
input_tensor = torch.rand(1, 7, 3, 32, 32).cuda()
tof.init_weights(pretrained=None)
output = tof(input_tensor)
assert output.shape == (1, 3, 32, 32)
| 937 | 26.588235 | 57 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_backbones/test_sr_backbones/test_rdn.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmedit.models import build_backbone
def test_rdn():
scale = 4
model_cfg = dict(
type='RDN',
in_channels=3,
out_channels=3,
mid_channels=64,
num_blocks=16,
upscale_factor=scale)
# build model
model = build_backbone(model_cfg)
# test attributes
assert model.__class__.__name__ == 'RDN'
# prepare data
inputs = torch.rand(1, 3, 32, 16)
targets = torch.rand(1, 3, 128, 64)
# prepare loss
loss_function = nn.L1Loss()
# prepare optimizer
optimizer = torch.optim.Adam(model.parameters())
# test on cpu
output = model(inputs)
optimizer.zero_grad()
loss = loss_function(output, targets)
loss.backward()
optimizer.step()
assert torch.is_tensor(output)
assert output.shape == targets.shape
# test on gpu
if torch.cuda.is_available():
model = model.cuda()
optimizer = torch.optim.Adam(model.parameters())
inputs = inputs.cuda()
targets = targets.cuda()
output = model(inputs)
optimizer.zero_grad()
loss = loss_function(output, targets)
loss.backward()
optimizer.step()
assert torch.is_tensor(output)
assert output.shape == targets.shape
| 1,353 | 22.344828 | 56 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_backbones/test_sr_backbones/test_basicvsr_plusplus.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmedit.models.backbones.sr_backbones.basicvsr_pp import BasicVSRPlusPlus
def test_basicvsr_plusplus():
"""Test BasicVSR++."""
# cpu
model = BasicVSRPlusPlus(
mid_channels=64,
num_blocks=7,
is_low_res_input=True,
spynet_pretrained=None,
cpu_cache_length=100)
input_tensor = torch.rand(1, 5, 3, 64, 64)
model.init_weights(pretrained=None)
output = model(input_tensor)
assert output.shape == (1, 5, 3, 256, 256)
# with cpu_cache (no effect on cpu)
model = BasicVSRPlusPlus(
mid_channels=64,
num_blocks=7,
is_low_res_input=True,
spynet_pretrained=None,
cpu_cache_length=3)
output = model(input_tensor)
assert output.shape == (1, 5, 3, 256, 256)
with pytest.raises(AssertionError):
# The height and width of inputs should be at least 64
input_tensor = torch.rand(1, 5, 3, 61, 61)
model(input_tensor)
with pytest.raises(TypeError):
# pretrained should be str or None
model.init_weights(pretrained=[1])
# output has the same size as input
model = BasicVSRPlusPlus(
mid_channels=64,
num_blocks=7,
is_low_res_input=False,
spynet_pretrained=None,
cpu_cache_length=100)
input_tensor = torch.rand(1, 5, 3, 256, 256)
output = model(input_tensor)
assert output.shape == (1, 5, 3, 256, 256)
# gpu
if torch.cuda.is_available():
model = BasicVSRPlusPlus(
mid_channels=64,
num_blocks=7,
is_low_res_input=True,
spynet_pretrained=None,
cpu_cache_length=100).cuda()
input_tensor = torch.rand(1, 5, 3, 64, 64).cuda()
model.init_weights(pretrained=None)
output = model(input_tensor)
assert output.shape == (1, 5, 3, 256, 256)
# with cpu_cache
model = BasicVSRPlusPlus(
mid_channels=64,
num_blocks=7,
is_low_res_input=True,
spynet_pretrained=None,
cpu_cache_length=3).cuda()
output = model(input_tensor)
assert output.shape == (1, 5, 3, 256, 256)
with pytest.raises(AssertionError):
# The height and width of inputs should be at least 64
input_tensor = torch.rand(1, 5, 3, 61, 61).cuda()
model(input_tensor)
with pytest.raises(TypeError):
# pretrained should be str or None
model.init_weights(pretrained=[1]).cuda()
# output has the same size as input
model = BasicVSRPlusPlus(
mid_channels=64,
num_blocks=7,
is_low_res_input=False,
spynet_pretrained=None,
cpu_cache_length=100).cuda()
input_tensor = torch.rand(1, 5, 3, 256, 256).cuda()
output = model(input_tensor)
assert output.shape == (1, 5, 3, 256, 256)
| 2,987 | 30.452632 | 77 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_backbones/test_vfi_backbones/test_cain_net.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmedit.models import build_backbone
def test_cain_net():
model_cfg = dict(type='CAINNet')
# build model
model = build_backbone(model_cfg)
# test attributes
assert model.__class__.__name__ == 'CAINNet'
# prepare data
inputs0 = torch.rand(1, 2, 3, 5, 5)
target0 = torch.rand(1, 3, 5, 5)
inputs = torch.rand(1, 2, 3, 256, 248)
target = torch.rand(1, 3, 256, 248)
# test on cpu
output = model(inputs)
output = model(inputs, padding_flag=True)
model(inputs0, padding_flag=True)
assert torch.is_tensor(output)
assert output.shape == target.shape
with pytest.raises(AssertionError):
output = model(inputs[:, :1])
with pytest.raises(OSError):
model.init_weights('')
with pytest.raises(TypeError):
model.init_weights(1)
model_cfg = dict(type='CAINNet', norm='in')
model = build_backbone(model_cfg)
model(inputs)
model_cfg = dict(type='CAINNet', norm='bn')
model = build_backbone(model_cfg)
model(inputs)
with pytest.raises(ValueError):
model_cfg = dict(type='CAINNet', norm='lys')
build_backbone(model_cfg)
# test on gpu
if torch.cuda.is_available():
model = model.cuda()
inputs = inputs.cuda()
target = target.cuda()
output = model(inputs)
output = model(inputs, True)
assert torch.is_tensor(output)
assert output.shape == target.shape
inputs0 = inputs0.cuda()
target0 = target0.cuda()
model(inputs0, padding_flag=True)
model_cfg = dict(type='CAINNet', norm='in')
model = build_backbone(model_cfg).cuda()
model(inputs)
model_cfg = dict(type='CAINNet', norm='bn')
model = build_backbone(model_cfg).cuda()
model(inputs)
with pytest.raises(ValueError):
model_cfg = dict(type='CAINNet', norm='lys')
build_backbone(model_cfg).cuda()
| 2,023 | 28.333333 | 56 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_backbones/test_vfi_backbones/test_tof_vfi_net.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmedit.models import build_backbone
def test_tof_vfi_net():
model_cfg = dict(type='TOFlowVFINet')
# build model
model = build_backbone(model_cfg)
# test attributes
assert model.__class__.__name__ == 'TOFlowVFINet'
# prepare data
inputs = torch.rand(1, 2, 3, 256, 248)
# test on cpu
output = model(inputs)
assert torch.is_tensor(output)
assert output.shape == (1, 3, 256, 248)
# test on gpu
if torch.cuda.is_available():
model = model.cuda()
inputs = inputs.cuda()
output = model(inputs)
output = model(inputs, True)
assert torch.is_tensor(output)
assert output.shape == (1, 3, 256, 256)
inputs = torch.rand(1, 2, 3, 256, 256)
output = model(inputs)
assert torch.is_tensor(output)
with pytest.raises(OSError):
model.init_weights('')
with pytest.raises(TypeError):
model.init_weights(1)
with pytest.raises(OSError):
model_cfg = dict(
type='TOFlowVFINet', flow_cfg=dict(norm_cfg=None, pretrained=''))
model = build_backbone(model_cfg)
with pytest.raises(TypeError):
model_cfg = dict(
type='TOFlowVFINet', flow_cfg=dict(norm_cfg=None, pretrained=1))
model = build_backbone(model_cfg)
| 1,371 | 25.901961 | 77 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_backbones/test_encoder_decoders/test_gl_model.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmedit.models import build_backbone, build_component
from mmedit.models.backbones import GLDilationNeck
from mmedit.models.common import SimpleGatedConvModule
def test_gl_encdec():
input_x = torch.randn(1, 4, 256, 256)
template_cfg = dict(type='GLEncoderDecoder')
gl_encdec = build_backbone(template_cfg)
gl_encdec.init_weights()
output = gl_encdec(input_x)
assert output.shape == (1, 3, 256, 256)
cfg_ = template_cfg.copy()
cfg_['decoder'] = dict(type='GLDecoder', out_act='sigmoid')
gl_encdec = build_backbone(cfg_)
output = gl_encdec(input_x)
assert output.shape == (1, 3, 256, 256)
with pytest.raises(ValueError):
cfg_ = template_cfg.copy()
cfg_['decoder'] = dict(type='GLDecoder', out_act='igccc')
gl_encdec = build_backbone(cfg_)
with pytest.raises(TypeError):
gl_encdec.init_weights(pretrained=dict(igccc=4396))
if torch.cuda.is_available():
gl_encdec = build_backbone(template_cfg)
gl_encdec.init_weights()
gl_encdec = gl_encdec.cuda()
output = gl_encdec(input_x.cuda())
assert output.shape == (1, 3, 256, 256)
def test_gl_dilation_neck():
neck = GLDilationNeck(in_channels=8)
x = torch.rand((2, 8, 64, 64))
res = neck(x)
assert res.shape == (2, 8, 64, 64)
if torch.cuda.is_available():
neck = GLDilationNeck(in_channels=8).cuda()
x = torch.rand((2, 8, 64, 64)).cuda()
res = neck(x)
assert res.shape == (2, 8, 64, 64)
neck = GLDilationNeck(in_channels=8, conv_type='gated_conv').cuda()
res = neck(x)
assert isinstance(neck.dilation_convs[0], SimpleGatedConvModule)
assert res.shape == (2, 8, 64, 64)
def test_gl_discs():
global_disc_cfg = dict(
in_channels=3,
max_channels=512,
fc_in_channels=512 * 4 * 4,
fc_out_channels=1024,
num_convs=6,
norm_cfg=dict(type='BN'))
local_disc_cfg = dict(
in_channels=3,
max_channels=512,
fc_in_channels=512 * 4 * 4,
fc_out_channels=1024,
num_convs=5,
norm_cfg=dict(type='BN'))
gl_disc_cfg = dict(
type='GLDiscs',
global_disc_cfg=global_disc_cfg,
local_disc_cfg=local_disc_cfg)
gl_discs = build_component(gl_disc_cfg)
gl_discs.init_weights()
input_g = torch.randn(1, 3, 256, 256)
input_l = torch.randn(1, 3, 128, 128)
output = gl_discs((input_g, input_l))
assert output.shape == (1, 1)
with pytest.raises(TypeError):
gl_discs.init_weights(pretrained=dict(igccc=777))
if torch.cuda.is_available():
gl_discs = gl_discs.cuda()
input_g = torch.randn(1, 3, 256, 256).cuda()
input_l = torch.randn(1, 3, 128, 128).cuda()
output = gl_discs((input_g, input_l))
assert output.shape == (1, 1)
| 2,945 | 30.010526 | 75 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_backbones/test_encoder_decoders/test_deepfill_encoder.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmedit.models.backbones import ContextualAttentionNeck, DeepFillEncoder
from mmedit.models.common import SimpleGatedConvModule
def test_deepfill_enc():
encoder = DeepFillEncoder()
x = torch.randn((2, 5, 256, 256))
outputs = encoder(x)
assert isinstance(outputs, dict)
assert 'out' in outputs
res = outputs['out']
assert res.shape == (2, 128, 64, 64)
assert encoder.enc2.stride == (2, 2)
assert encoder.enc2.out_channels == 64
encoder = DeepFillEncoder(encoder_type='stage2_conv')
x = torch.randn((2, 5, 256, 256))
outputs = encoder(x)
assert isinstance(outputs, dict)
assert 'out' in outputs
res = outputs['out']
assert res.shape == (2, 128, 64, 64)
assert encoder.enc2.out_channels == 32
assert encoder.enc3.out_channels == 64
assert encoder.enc4.out_channels == 64
encoder = DeepFillEncoder(encoder_type='stage2_attention')
x = torch.randn((2, 5, 256, 256))
outputs = encoder(x)
assert isinstance(outputs, dict)
assert 'out' in outputs
res = outputs['out']
assert res.shape == (2, 128, 64, 64)
assert encoder.enc2.out_channels == 32
assert encoder.enc3.out_channels == 64
assert encoder.enc4.out_channels == 128
if torch.cuda.is_available():
encoder = DeepFillEncoder().cuda()
x = torch.randn((2, 5, 256, 256)).cuda()
outputs = encoder(x)
assert isinstance(outputs, dict)
assert 'out' in outputs
res = outputs['out']
assert res.shape == (2, 128, 64, 64)
assert encoder.enc2.stride == (2, 2)
assert encoder.enc2.out_channels == 64
encoder = DeepFillEncoder(encoder_type='stage2_conv').cuda()
x = torch.randn((2, 5, 256, 256)).cuda()
outputs = encoder(x)
assert isinstance(outputs, dict)
assert 'out' in outputs
res = outputs['out']
assert res.shape == (2, 128, 64, 64)
assert encoder.enc2.out_channels == 32
assert encoder.enc3.out_channels == 64
assert encoder.enc4.out_channels == 64
encoder = DeepFillEncoder(encoder_type='stage2_attention').cuda()
x = torch.randn((2, 5, 256, 256)).cuda()
outputs = encoder(x)
assert isinstance(outputs, dict)
assert 'out' in outputs
res = outputs['out']
assert res.shape == (2, 128, 64, 64)
assert encoder.enc2.out_channels == 32
assert encoder.enc3.out_channels == 64
assert encoder.enc4.out_channels == 128
encoder = DeepFillEncoder(
conv_type='gated_conv', channel_factor=0.75).cuda()
x = torch.randn((2, 5, 256, 256)).cuda()
outputs = encoder(x)
assert isinstance(outputs, dict)
assert 'out' in outputs
res = outputs['out']
assert res.shape == (2, 96, 64, 64)
assert isinstance(encoder.enc2, SimpleGatedConvModule)
assert encoder.enc2.conv.stride == (2, 2)
assert encoder.enc2.conv.out_channels == 48 * 2
def test_deepfill_contextual_attention_neck():
# TODO: add unittest for contextual attention module
neck = ContextualAttentionNeck(in_channels=128)
x = torch.rand((2, 128, 64, 64))
mask = torch.zeros((2, 1, 64, 64))
mask[..., 20:100, 23:90] = 1.
res, offset = neck(x, mask)
assert res.shape == (2, 128, 64, 64)
assert offset.shape == (2, 32, 32, 32, 32)
if torch.cuda.is_available():
neck.cuda()
res, offset = neck(x.cuda(), mask.cuda())
assert res.shape == (2, 128, 64, 64)
assert offset.shape == (2, 32, 32, 32, 32)
neck = ContextualAttentionNeck(
in_channels=128, conv_type='gated_conv').cuda()
res, offset = neck(x.cuda(), mask.cuda())
assert res.shape == (2, 128, 64, 64)
assert offset.shape == (2, 32, 32, 32, 32)
assert isinstance(neck.conv1, SimpleGatedConvModule)
| 3,966 | 34.738739 | 76 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_backbones/test_encoder_decoders/test_aot_model.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmedit.models import build_backbone
from mmedit.models.backbones.encoder_decoders.necks import AOTBlockNeck
def test_gl_encdec():
input_x = torch.randn(1, 4, 256, 256)
template_cfg = dict(type='AOTEncoderDecoder')
aot_encdec = build_backbone(template_cfg)
aot_encdec.init_weights()
output = aot_encdec(input_x)
assert output.shape == (1, 3, 256, 256)
cfg_ = template_cfg.copy()
cfg_['encoder'] = dict(type='AOTEncoder')
aot_encdec = build_backbone(cfg_)
output = aot_encdec(input_x)
assert output.shape == (1, 3, 256, 256)
cfg_ = template_cfg.copy()
cfg_['decoder'] = dict(type='AOTDecoder')
aot_encdec = build_backbone(cfg_)
output = aot_encdec(input_x)
assert output.shape == (1, 3, 256, 256)
if torch.cuda.is_available():
aot_encdec = build_backbone(template_cfg)
aot_encdec.init_weights()
aot_encdec = aot_encdec.cuda()
output = aot_encdec(input_x.cuda())
assert output.shape == (1, 3, 256, 256)
def test_aot_dilation_neck():
neck = AOTBlockNeck(
in_channels=256, dilation_rates=(1, 2, 4, 8), num_aotblock=8)
x = torch.rand((2, 256, 64, 64))
res = neck(x)
assert res.shape == (2, 256, 64, 64)
if torch.cuda.is_available():
neck = AOTBlockNeck(
in_channels=256, dilation_rates=(1, 2, 4, 8),
num_aotblock=8).cuda()
x = torch.rand((2, 256, 64, 64)).cuda()
res = neck(x)
assert res.shape == (2, 256, 64, 64)
| 1,576 | 29.921569 | 71 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_backbones/test_encoder_decoders/test_deepfill_decoder.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmedit.models.backbones import DeepFillDecoder
def test_deepfill_dec():
decoder = DeepFillDecoder(128, out_act_cfg=None)
assert not decoder.with_out_activation
decoder = DeepFillDecoder(128)
x = torch.randn((2, 128, 64, 64))
input_dict = dict(out=x)
res = decoder(input_dict)
assert res.shape == (2, 3, 256, 256)
assert decoder.dec2.stride == (1, 1)
assert decoder.dec2.out_channels == 128
assert not decoder.dec7.with_activation
assert res.min().item() >= -1. and res.max().item() <= 1
if torch.cuda.is_available():
decoder = DeepFillDecoder(128).cuda()
x = torch.randn((2, 128, 64, 64)).cuda()
input_dict = dict(out=x)
res = decoder(input_dict)
assert res.shape == (2, 3, 256, 256)
assert decoder.dec2.stride == (1, 1)
assert decoder.dec2.out_channels == 128
assert not decoder.dec7.with_activation
assert res.min().item() >= -1. and res.max().item() <= 1
decoder = DeepFillDecoder(
128, conv_type='gated_conv', channel_factor=0.75).cuda()
x = torch.randn((2, 128, 64, 64)).cuda()
input_dict = dict(out=x)
res = decoder(input_dict)
assert res.shape == (2, 3, 256, 256)
assert decoder.dec2.conv.stride == (1, 1)
assert decoder.dec2.conv.out_channels == 96 * 2
assert not decoder.dec7.with_feat_act
assert res.min().item() >= -1. and res.max().item() <= 1
| 1,533 | 34.674419 | 68 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_backbones/test_encoder_decoders/test_decoders.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmedit.models.backbones import (VGG16, FBADecoder, IndexedUpsample,
IndexNetDecoder, IndexNetEncoder,
PlainDecoder, ResGCADecoder,
ResGCAEncoder, ResNetDec, ResNetEnc,
ResShortcutDec, ResShortcutEnc)
def assert_tensor_with_shape(tensor, shape):
""""Check if the shape of the tensor is equal to the target shape."""
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == shape
def _demo_inputs(input_shape=(1, 4, 64, 64)):
"""
Create a superset of inputs needed to run encoder.
Args:
input_shape (tuple): input batch dimensions.
Default: (1, 4, 64, 64).
"""
img = np.random.random(input_shape).astype(np.float32)
img = torch.from_numpy(img)
return img
def test_plain_decoder():
"""Test PlainDecoder."""
model = PlainDecoder(512)
model.init_weights()
model.train()
# create max_pooling index for training
encoder = VGG16(4)
img = _demo_inputs()
outputs = encoder(img)
prediction = model(outputs)
assert_tensor_with_shape(prediction, torch.Size([1, 1, 64, 64]))
# test forward with gpu
if torch.cuda.is_available():
model = PlainDecoder(512)
model.init_weights()
model.train()
model.cuda()
encoder = VGG16(4)
encoder.cuda()
img = _demo_inputs().cuda()
outputs = encoder(img)
prediction = model(outputs)
assert_tensor_with_shape(prediction, torch.Size([1, 1, 64, 64]))
def test_resnet_decoder():
"""Test resnet decoder."""
with pytest.raises(NotImplementedError):
ResNetDec('UnknowBlock', [2, 3, 3, 2], 512)
model = ResNetDec('BasicBlockDec', [2, 3, 3, 2], 512, kernel_size=5)
model.init_weights()
model.train()
encoder = ResNetEnc('BasicBlock', [2, 4, 4, 2], 6)
img = _demo_inputs((1, 6, 64, 64))
feat = encoder(img)
prediction = model(feat)
assert_tensor_with_shape(prediction, torch.Size([1, 1, 64, 64]))
model = ResNetDec(
'BasicBlockDec', [2, 3, 3, 2], 512, with_spectral_norm=True)
assert hasattr(model.conv1.conv, 'weight_orig')
model.init_weights()
model.train()
encoder = ResNetEnc('BasicBlock', [2, 4, 4, 2], 6)
img = _demo_inputs((1, 6, 64, 64))
feat = encoder(img)
prediction = model(feat)
assert_tensor_with_shape(prediction, torch.Size([1, 1, 64, 64]))
# test forward with gpu
if torch.cuda.is_available():
model = ResNetDec('BasicBlockDec', [2, 3, 3, 2], 512, kernel_size=5)
model.init_weights()
model.train()
model.cuda()
encoder = ResNetEnc('BasicBlock', [2, 4, 4, 2], 6)
encoder.cuda()
img = _demo_inputs((1, 6, 64, 64)).cuda()
feat = encoder(img)
prediction = model(feat)
assert_tensor_with_shape(prediction, torch.Size([1, 1, 64, 64]))
model = ResNetDec(
'BasicBlockDec', [2, 3, 3, 2], 512, with_spectral_norm=True)
assert hasattr(model.conv1.conv, 'weight_orig')
model.init_weights()
model.train()
model.cuda()
encoder = ResNetEnc('BasicBlock', [2, 4, 4, 2], 6)
encoder.cuda()
img = _demo_inputs((1, 6, 64, 64)).cuda()
feat = encoder(img)
prediction = model(feat)
assert_tensor_with_shape(prediction, torch.Size([1, 1, 64, 64]))
def test_res_shortcut_decoder():
"""Test resnet decoder with shortcut."""
with pytest.raises(NotImplementedError):
ResShortcutDec('UnknowBlock', [2, 3, 3, 2], 512)
model = ResShortcutDec('BasicBlockDec', [2, 3, 3, 2], 512)
model.init_weights()
model.train()
encoder = ResShortcutEnc('BasicBlock', [2, 4, 4, 2], 6)
img = _demo_inputs((1, 6, 64, 64))
outputs = encoder(img)
prediction = model(outputs)
assert_tensor_with_shape(prediction, torch.Size([1, 1, 64, 64]))
# test forward with gpu
if torch.cuda.is_available():
model = ResShortcutDec('BasicBlockDec', [2, 3, 3, 2], 512)
model.init_weights()
model.train()
model.cuda()
encoder = ResShortcutEnc('BasicBlock', [2, 4, 4, 2], 6)
encoder.cuda()
img = _demo_inputs((1, 6, 64, 64)).cuda()
outputs = encoder(img)
prediction = model(outputs)
assert_tensor_with_shape(prediction, torch.Size([1, 1, 64, 64]))
def test_res_gca_decoder():
"""Test resnet decoder with shortcut and guided contextual attention."""
with pytest.raises(NotImplementedError):
ResGCADecoder('UnknowBlock', [2, 3, 3, 2], 512)
model = ResGCADecoder('BasicBlockDec', [2, 3, 3, 2], 512)
model.init_weights()
model.train()
encoder = ResGCAEncoder('BasicBlock', [2, 4, 4, 2], 6)
img = _demo_inputs((2, 6, 32, 32))
outputs = encoder(img)
prediction = model(outputs)
assert_tensor_with_shape(prediction, torch.Size([2, 1, 32, 32]))
# test forward with gpu
if torch.cuda.is_available():
model = ResGCADecoder('BasicBlockDec', [2, 3, 3, 2], 512)
model.init_weights()
model.train()
model.cuda()
encoder = ResGCAEncoder('BasicBlock', [2, 4, 4, 2], 6)
encoder.cuda()
img = _demo_inputs((2, 6, 32, 32)).cuda()
outputs = encoder(img)
prediction = model(outputs)
assert_tensor_with_shape(prediction, torch.Size([2, 1, 32, 32]))
def test_indexed_upsample():
"""Test indexed upsample module for indexnet decoder."""
indexed_upsample = IndexedUpsample(12, 12)
# test indexed_upsample without dec_idx_feat (no upsample)
x = torch.rand(2, 6, 32, 32)
shortcut = torch.rand(2, 6, 32, 32)
output = indexed_upsample(x, shortcut)
assert_tensor_with_shape(output, (2, 12, 32, 32))
# test indexed_upsample without dec_idx_feat (with upsample)
x = torch.rand(2, 6, 32, 32)
dec_idx_feat = torch.rand(2, 6, 64, 64)
shortcut = torch.rand(2, 6, 64, 64)
output = indexed_upsample(x, shortcut, dec_idx_feat)
assert_tensor_with_shape(output, (2, 12, 64, 64))
def test_indexnet_decoder():
"""Test Indexnet decoder."""
# test indexnet decoder with default indexnet setting
with pytest.raises(AssertionError):
# shortcut must have four dimensions
indexnet_decoder = IndexNetDecoder(
160, kernel_size=5, separable_conv=False)
x = torch.rand(2, 256, 4, 4)
shortcut = torch.rand(2, 128, 8, 8, 8)
dec_idx_feat = torch.rand(2, 128, 8, 8, 8)
outputs_enc = dict(
out=x, shortcuts=[shortcut], dec_idx_feat_list=[dec_idx_feat])
indexnet_decoder(outputs_enc)
indexnet_decoder = IndexNetDecoder(
160, kernel_size=5, separable_conv=False)
indexnet_decoder.init_weights()
indexnet_encoder = IndexNetEncoder(4)
x = torch.rand(2, 4, 32, 32)
outputs_enc = indexnet_encoder(x)
out = indexnet_decoder(outputs_enc)
assert out.shape == (2, 1, 32, 32)
# test indexnet decoder with other setting
indexnet_decoder = IndexNetDecoder(160, kernel_size=3, separable_conv=True)
indexnet_decoder.init_weights()
out = indexnet_decoder(outputs_enc)
assert out.shape == (2, 1, 32, 32)
def test_fba_decoder():
with pytest.raises(AssertionError):
# pool_scales must be list|tuple
FBADecoder(pool_scales=1, in_channels=32, channels=16)
inputs = dict()
conv_out_1 = _demo_inputs((1, 11, 320, 320))
conv_out_2 = _demo_inputs((1, 64, 160, 160))
conv_out_3 = _demo_inputs((1, 256, 80, 80))
conv_out_4 = _demo_inputs((1, 512, 40, 40))
conv_out_5 = _demo_inputs((1, 1024, 40, 40))
conv_out_6 = _demo_inputs((1, 2048, 40, 40))
inputs['conv_out'] = [
conv_out_1, conv_out_2, conv_out_3, conv_out_4, conv_out_5, conv_out_6
]
inputs['merged'] = _demo_inputs((1, 3, 320, 320))
inputs['two_channel_trimap'] = _demo_inputs((1, 2, 320, 320))
model = FBADecoder(
pool_scales=(1, 2, 3, 6),
in_channels=2048,
channels=256,
norm_cfg=dict(type='GN', num_groups=32))
alpha, F, B = model(inputs)
assert_tensor_with_shape(alpha, torch.Size([1, 1, 320, 320]))
assert_tensor_with_shape(F, torch.Size([1, 3, 320, 320]))
assert_tensor_with_shape(B, torch.Size([1, 3, 320, 320]))
| 8,503 | 33.710204 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_backbones/test_encoder_decoders/test_encoder_decoder.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmedit.models.backbones import SimpleEncoderDecoder
def assert_dict_keys_equal(dictionary, target_keys):
"""Check if the keys of the dictionary is equal to the target key set."""
assert isinstance(dictionary, dict)
assert set(dictionary.keys()) == set(target_keys)
def assert_tensor_with_shape(tensor, shape):
""""Check if the shape of the tensor is equal to the target shape."""
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == shape
def test_encoder_decoder():
"""Test SimpleEncoderDecoder."""
# check DIM with only alpha loss
encoder = dict(type='VGG16', in_channels=4)
decoder = dict(type='PlainDecoder')
model = SimpleEncoderDecoder(encoder, decoder)
model.init_weights()
model.train()
fg, bg, merged, alpha, trimap = _demo_inputs_pair()
prediction = model(torch.cat([merged, trimap], 1))
assert_tensor_with_shape(prediction, torch.Size([1, 1, 64, 64]))
# check DIM with only composition loss
encoder = dict(type='VGG16', in_channels=4)
decoder = dict(type='PlainDecoder')
model = SimpleEncoderDecoder(encoder, decoder)
model.init_weights()
model.train()
fg, bg, merged, alpha, trimap = _demo_inputs_pair()
prediction = model(torch.cat([merged, trimap], 1))
assert_tensor_with_shape(prediction, torch.Size([1, 1, 64, 64]))
# check DIM with both alpha and composition loss
encoder = dict(type='VGG16', in_channels=4)
decoder = dict(type='PlainDecoder')
model = SimpleEncoderDecoder(encoder, decoder)
model.init_weights()
model.train()
fg, bg, merged, alpha, trimap = _demo_inputs_pair()
prediction = model(torch.cat([merged, trimap], 1))
assert_tensor_with_shape(prediction, torch.Size([1, 1, 64, 64]))
# test forward with gpu
if torch.cuda.is_available():
encoder = dict(type='VGG16', in_channels=4)
decoder = dict(type='PlainDecoder')
model = SimpleEncoderDecoder(encoder, decoder)
model.init_weights()
model.train()
fg, bg, merged, alpha, trimap = _demo_inputs_pair(cuda=True)
model.cuda()
prediction = model(torch.cat([merged, trimap], 1))
assert_tensor_with_shape(prediction, torch.Size([1, 1, 64, 64]))
def _demo_inputs_pair(img_shape=(64, 64), batch_size=1, cuda=False):
"""
Create a superset of inputs needed to run backbone.
Args:
img_shape (tuple): shape of the input image.
batch_size (int): batch size of the input batch.
cuda (bool): whether transfer input into gpu.
"""
color_shape = (batch_size, 3, img_shape[0], img_shape[1])
gray_shape = (batch_size, 1, img_shape[0], img_shape[1])
fg = torch.from_numpy(np.random.random(color_shape).astype(np.float32))
bg = torch.from_numpy(np.random.random(color_shape).astype(np.float32))
merged = torch.from_numpy(np.random.random(color_shape).astype(np.float32))
alpha = torch.from_numpy(np.random.random(gray_shape).astype(np.float32))
trimap = torch.from_numpy(np.random.random(gray_shape).astype(np.float32))
if cuda:
fg = fg.cuda()
bg = bg.cuda()
merged = merged.cuda()
alpha = alpha.cuda()
trimap = trimap.cuda()
return fg, bg, merged, alpha, trimap
| 3,362 | 35.956044 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_backbones/test_encoder_decoders/test_encoders.py | # Copyright (c) OpenMMLab. All rights reserved.
from collections.abc import Iterable
import numpy as np
import pytest
import torch
from mmcv.utils.parrots_wrapper import _BatchNorm
from mmedit.models.backbones import (VGG16, DepthwiseIndexBlock,
FBAResnetDilated, HolisticIndexBlock,
IndexNetEncoder, ResGCAEncoder, ResNetEnc,
ResShortcutEnc)
from mmedit.models.backbones.encoder_decoders.encoders.resnet import (
BasicBlock, Bottleneck)
def check_norm_state(modules, train_state):
"""Check if norm layer is in correct train state."""
for mod in modules:
if isinstance(mod, _BatchNorm):
if mod.training != train_state:
return False
return True
def is_block(modules):
"""Check if is ResNet building block."""
if isinstance(modules, (BasicBlock, Bottleneck)):
return True
return False
def assert_tensor_with_shape(tensor, shape):
""""Check if the shape of the tensor is equal to the target shape."""
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == shape
def assert_mid_feat_shape(mid_feat, target_shape):
assert len(mid_feat) == 5
for i in range(5):
assert_tensor_with_shape(mid_feat[i], torch.Size(target_shape[i]))
def _demo_inputs(input_shape=(2, 4, 64, 64)):
"""
Create a superset of inputs needed to run encoder.
Args:
input_shape (tuple): input batch dimensions.
Default: (1, 4, 64, 64).
"""
img = np.random.random(input_shape).astype(np.float32)
img = torch.from_numpy(img)
return img
def test_vgg16_encoder():
"""Test VGG16 encoder."""
target_shape = [(2, 64, 32, 32), (2, 128, 16, 16), (2, 256, 8, 8),
(2, 512, 4, 4), (2, 512, 2, 2)]
model = VGG16(4)
model.init_weights()
model.train()
img = _demo_inputs()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['max_idx_1'], target_shape[0])
assert_tensor_with_shape(outputs['max_idx_2'], target_shape[1])
assert_tensor_with_shape(outputs['max_idx_3'], target_shape[2])
assert_tensor_with_shape(outputs['max_idx_4'], target_shape[3])
assert_tensor_with_shape(outputs['max_idx_5'], target_shape[4])
model = VGG16(4, batch_norm=True)
model.init_weights()
model.train()
img = _demo_inputs()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['max_idx_1'], target_shape[0])
assert_tensor_with_shape(outputs['max_idx_2'], target_shape[1])
assert_tensor_with_shape(outputs['max_idx_3'], target_shape[2])
assert_tensor_with_shape(outputs['max_idx_4'], target_shape[3])
assert_tensor_with_shape(outputs['max_idx_5'], target_shape[4])
model = VGG16(4, aspp=True, dilations=[6, 12, 18])
model.init_weights()
model.train()
img = _demo_inputs()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 256, 2, 2))
assert_tensor_with_shape(outputs['max_idx_1'], target_shape[0])
assert_tensor_with_shape(outputs['max_idx_2'], target_shape[1])
assert_tensor_with_shape(outputs['max_idx_3'], target_shape[2])
assert_tensor_with_shape(outputs['max_idx_4'], target_shape[3])
assert_tensor_with_shape(outputs['max_idx_5'], target_shape[4])
assert check_norm_state(model.modules(), True)
# test forward with gpu
if torch.cuda.is_available():
model = VGG16(4)
model.init_weights()
model.train()
model.cuda()
img = _demo_inputs().cuda()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['max_idx_1'], target_shape[0])
assert_tensor_with_shape(outputs['max_idx_2'], target_shape[1])
assert_tensor_with_shape(outputs['max_idx_3'], target_shape[2])
assert_tensor_with_shape(outputs['max_idx_4'], target_shape[3])
assert_tensor_with_shape(outputs['max_idx_5'], target_shape[4])
model = VGG16(4, batch_norm=True)
model.init_weights()
model.train()
model.cuda()
img = _demo_inputs().cuda()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['max_idx_1'], target_shape[0])
assert_tensor_with_shape(outputs['max_idx_2'], target_shape[1])
assert_tensor_with_shape(outputs['max_idx_3'], target_shape[2])
assert_tensor_with_shape(outputs['max_idx_4'], target_shape[3])
assert_tensor_with_shape(outputs['max_idx_5'], target_shape[4])
model = VGG16(4, aspp=True, dilations=[6, 12, 18])
model.init_weights()
model.train()
model.cuda()
img = _demo_inputs().cuda()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 256, 2, 2))
assert_tensor_with_shape(outputs['max_idx_1'], target_shape[0])
assert_tensor_with_shape(outputs['max_idx_2'], target_shape[1])
assert_tensor_with_shape(outputs['max_idx_3'], target_shape[2])
assert_tensor_with_shape(outputs['max_idx_4'], target_shape[3])
assert_tensor_with_shape(outputs['max_idx_5'], target_shape[4])
assert check_norm_state(model.modules(), True)
def test_resnet_encoder():
"""Test resnet encoder."""
with pytest.raises(NotImplementedError):
ResNetEnc('UnknownBlock', [3, 4, 4, 2], 3)
with pytest.raises(TypeError):
model = ResNetEnc('BasicBlock', [3, 4, 4, 2], 3)
model.init_weights(list())
model = ResNetEnc('BasicBlock', [3, 4, 4, 2], 4, with_spectral_norm=True)
assert hasattr(model.conv1.conv, 'weight_orig')
model.init_weights()
model.train()
# trimap has 1 channels
img = _demo_inputs((2, 4, 64, 64))
feat = model(img)
assert_tensor_with_shape(feat, torch.Size([2, 512, 2, 2]))
# test resnet encoder with late downsample
model = ResNetEnc('BasicBlock', [3, 4, 4, 2], 6, late_downsample=True)
model.init_weights()
model.train()
# both image and trimap has 3 channels
img = _demo_inputs((2, 6, 64, 64))
feat = model(img)
assert_tensor_with_shape(feat, torch.Size([2, 512, 2, 2]))
if torch.cuda.is_available():
# repeat above code again
model = ResNetEnc(
'BasicBlock', [3, 4, 4, 2], 4, with_spectral_norm=True)
assert hasattr(model.conv1.conv, 'weight_orig')
model.init_weights()
model.train()
model.cuda()
# trimap has 1 channels
img = _demo_inputs((2, 4, 64, 64)).cuda()
feat = model(img)
assert_tensor_with_shape(feat, torch.Size([2, 512, 2, 2]))
# test resnet encoder with late downsample
model = ResNetEnc('BasicBlock', [3, 4, 4, 2], 6, late_downsample=True)
model.init_weights()
model.train()
model.cuda()
# both image and trimap has 3 channels
img = _demo_inputs((2, 6, 64, 64)).cuda()
feat = model(img)
assert_tensor_with_shape(feat, torch.Size([2, 512, 2, 2]))
def test_res_shortcut_encoder():
"""Test resnet encoder with shortcut."""
with pytest.raises(NotImplementedError):
ResShortcutEnc('UnknownBlock', [3, 4, 4, 2], 3)
target_shape = [(2, 32, 64, 64), (2, 32, 32, 32), (2, 64, 16, 16),
(2, 128, 8, 8), (2, 256, 4, 4)]
# target shape for model with late downsample
target_late_ds_shape = [(2, 32, 64, 64), (2, 64, 32, 32), (2, 64, 16, 16),
(2, 128, 8, 8), (2, 256, 4, 4)]
model = ResShortcutEnc(
'BasicBlock', [3, 4, 4, 2], 4, with_spectral_norm=True)
assert hasattr(model.conv1.conv, 'weight_orig')
model.init_weights()
model.train()
# trimap has 1 channels
img = _demo_inputs((2, 4, 64, 64))
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['feat1'], target_shape[0])
assert_tensor_with_shape(outputs['feat2'], target_shape[1])
assert_tensor_with_shape(outputs['feat3'], target_shape[2])
assert_tensor_with_shape(outputs['feat4'], target_shape[3])
assert_tensor_with_shape(outputs['feat5'], target_shape[4])
model = ResShortcutEnc('BasicBlock', [3, 4, 4, 2], 6)
model.init_weights()
model.train()
# both image and trimap has 3 channels
img = _demo_inputs((2, 6, 64, 64))
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['feat1'], target_shape[0])
assert_tensor_with_shape(outputs['feat2'], target_shape[1])
assert_tensor_with_shape(outputs['feat3'], target_shape[2])
assert_tensor_with_shape(outputs['feat4'], target_shape[3])
assert_tensor_with_shape(outputs['feat5'], target_shape[4])
# test resnet shortcut encoder with late downsample
model = ResShortcutEnc('BasicBlock', [3, 4, 4, 2], 6, late_downsample=True)
model.init_weights()
model.train()
# both image and trimap has 3 channels
img = _demo_inputs((2, 6, 64, 64))
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['feat1'], target_late_ds_shape[0])
assert_tensor_with_shape(outputs['feat2'], target_late_ds_shape[1])
assert_tensor_with_shape(outputs['feat3'], target_late_ds_shape[2])
assert_tensor_with_shape(outputs['feat4'], target_late_ds_shape[3])
assert_tensor_with_shape(outputs['feat5'], target_late_ds_shape[4])
if torch.cuda.is_available():
# repeat above code again
model = ResShortcutEnc(
'BasicBlock', [3, 4, 4, 2], 4, with_spectral_norm=True)
assert hasattr(model.conv1.conv, 'weight_orig')
model.init_weights()
model.train()
model.cuda()
# trimap has 1 channels
img = _demo_inputs((2, 4, 64, 64)).cuda()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['feat1'], target_shape[0])
assert_tensor_with_shape(outputs['feat2'], target_shape[1])
assert_tensor_with_shape(outputs['feat3'], target_shape[2])
assert_tensor_with_shape(outputs['feat4'], target_shape[3])
assert_tensor_with_shape(outputs['feat5'], target_shape[4])
model = ResShortcutEnc('BasicBlock', [3, 4, 4, 2], 6)
model.init_weights()
model.train()
model.cuda()
# both image and trimap has 3 channels
img = _demo_inputs((2, 6, 64, 64)).cuda()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['feat1'], target_shape[0])
assert_tensor_with_shape(outputs['feat2'], target_shape[1])
assert_tensor_with_shape(outputs['feat3'], target_shape[2])
assert_tensor_with_shape(outputs['feat4'], target_shape[3])
assert_tensor_with_shape(outputs['feat5'], target_shape[4])
# test resnet shortcut encoder with late downsample
model = ResShortcutEnc(
'BasicBlock', [3, 4, 4, 2], 6, late_downsample=True)
model.init_weights()
model.train()
model.cuda()
# both image and trimap has 3 channels
img = _demo_inputs((2, 6, 64, 64)).cuda()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['feat1'], target_late_ds_shape[0])
assert_tensor_with_shape(outputs['feat2'], target_late_ds_shape[1])
assert_tensor_with_shape(outputs['feat3'], target_late_ds_shape[2])
assert_tensor_with_shape(outputs['feat4'], target_late_ds_shape[3])
assert_tensor_with_shape(outputs['feat5'], target_late_ds_shape[4])
def test_res_gca_encoder():
"""Test resnet encoder with shortcut and guided contextual attention."""
with pytest.raises(NotImplementedError):
ResGCAEncoder('UnknownBlock', [3, 4, 4, 2], 3)
target_shape = [(2, 32, 64, 64), (2, 32, 32, 32), (2, 64, 16, 16),
(2, 128, 8, 8), (2, 256, 4, 4)]
# target shape for model with late downsample
target_late_ds = [(2, 32, 64, 64), (2, 64, 32, 32), (2, 64, 16, 16),
(2, 128, 8, 8), (2, 256, 4, 4)]
model = ResGCAEncoder('BasicBlock', [3, 4, 4, 2], 4)
model.init_weights()
model.train()
# trimap has 1 channels
img = _demo_inputs((2, 4, 64, 64))
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['img_feat'], (2, 128, 8, 8))
assert_tensor_with_shape(outputs['unknown'], (2, 1, 8, 8))
for i in range(5):
assert_tensor_with_shape(outputs[f'feat{i+1}'], target_shape[i])
model = ResGCAEncoder('BasicBlock', [3, 4, 4, 2], 6)
model.init_weights()
model.train()
# both image and trimap has 3 channels
img = _demo_inputs((2, 6, 64, 64))
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['img_feat'], (2, 128, 8, 8))
assert_tensor_with_shape(outputs['unknown'], (2, 1, 8, 8))
for i in range(5):
assert_tensor_with_shape(outputs[f'feat{i+1}'], target_shape[i])
# test resnet shortcut encoder with late downsample
model = ResGCAEncoder('BasicBlock', [3, 4, 4, 2], 6, late_downsample=True)
model.init_weights()
model.train()
# both image and trimap has 3 channels
img = _demo_inputs((2, 6, 64, 64))
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['img_feat'], (2, 128, 8, 8))
assert_tensor_with_shape(outputs['unknown'], (2, 1, 8, 8))
for i in range(5):
assert_tensor_with_shape(outputs[f'feat{i+1}'], target_late_ds[i])
if torch.cuda.is_available():
# repeat above code again
model = ResGCAEncoder('BasicBlock', [3, 4, 4, 2], 4)
model.init_weights()
model.train()
model.cuda()
# trimap has 1 channels
img = _demo_inputs((2, 4, 64, 64)).cuda()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['img_feat'], (2, 128, 8, 8))
assert_tensor_with_shape(outputs['unknown'], (2, 1, 8, 8))
for i in range(5):
assert_tensor_with_shape(outputs[f'feat{i+1}'], target_shape[i])
model = ResGCAEncoder('BasicBlock', [3, 4, 4, 2], 6)
model.init_weights()
model.train()
model.cuda()
# both image and trimap has 3 channels
img = _demo_inputs((2, 6, 64, 64)).cuda()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['img_feat'], (2, 128, 8, 8))
assert_tensor_with_shape(outputs['unknown'], (2, 1, 8, 8))
for i in range(5):
assert_tensor_with_shape(outputs[f'feat{i+1}'], target_shape[i])
# test resnet shortcut encoder with late downsample
model = ResGCAEncoder(
'BasicBlock', [3, 4, 4, 2], 6, late_downsample=True)
model.init_weights()
model.train()
model.cuda()
# both image and trimap has 3 channels
img = _demo_inputs((2, 6, 64, 64)).cuda()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['img_feat'], (2, 128, 8, 8))
assert_tensor_with_shape(outputs['unknown'], (2, 1, 8, 8))
for i in range(5):
assert_tensor_with_shape(outputs[f'feat{i+1}'], target_late_ds[i])
def test_index_blocks():
"""Test index blocks for indexnet encoder."""
# test holistic index block
# test holistic index block without context and nonlinearty
block = HolisticIndexBlock(128, use_context=False, use_nonlinear=False)
assert not isinstance(block.index_block, Iterable)
x = torch.rand(2, 128, 8, 8)
enc_idx_feat, dec_idx_feat = block(x)
assert enc_idx_feat.shape == (2, 1, 8, 8)
assert dec_idx_feat.shape == (2, 1, 8, 8)
# test holistic index block with context and nonlinearty
block = HolisticIndexBlock(128, use_context=True, use_nonlinear=True)
assert len(block.index_block) == 2 # nonlinear mode has two blocks
x = torch.rand(2, 128, 8, 8)
enc_idx_feat, dec_idx_feat = block(x)
assert enc_idx_feat.shape == (2, 1, 8, 8)
assert dec_idx_feat.shape == (2, 1, 8, 8)
# test depthwise index block
# test depthwise index block without context and nonlinearty in o2o mode
block = DepthwiseIndexBlock(
128, use_context=False, mode='oso', use_nonlinear=False)
assert not isinstance(block.index_blocks[0], Iterable)
x = torch.rand(2, 128, 8, 8)
enc_idx_feat, dec_idx_feat = block(x)
assert enc_idx_feat.shape == (2, 128, 8, 8)
assert dec_idx_feat.shape == (2, 128, 8, 8)
# test depthwise index block with context and nonlinearty in m2o mode
block = DepthwiseIndexBlock(
128, use_context=True, mode='m2o', use_nonlinear=True)
assert len(block.index_blocks[0]) == 2 # nonlinear mode has two blocks
x = torch.rand(2, 128, 8, 8)
enc_idx_feat, dec_idx_feat = block(x)
assert enc_idx_feat.shape == (2, 128, 8, 8)
assert dec_idx_feat.shape == (2, 128, 8, 8)
def test_indexnet_encoder():
"""Test Indexnet encoder."""
with pytest.raises(ValueError):
# out_stride must be 16 or 32
IndexNetEncoder(4, out_stride=8)
with pytest.raises(NameError):
# index_mode must be 'holistic', 'o2o' or 'm2o'
IndexNetEncoder(4, index_mode='unknown_mode')
# test indexnet encoder with default indexnet setting
indexnet_encoder = IndexNetEncoder(
4,
out_stride=32,
width_mult=1,
index_mode='m2o',
aspp=True,
use_nonlinear=True,
use_context=True)
indexnet_encoder.init_weights()
x = torch.rand(2, 4, 32, 32)
outputs = indexnet_encoder(x)
assert outputs['out'].shape == (2, 160, 1, 1)
assert len(outputs['shortcuts']) == 7
target_shapes = [(2, 32, 32, 32), (2, 16, 16, 16), (2, 24, 16, 16),
(2, 32, 8, 8), (2, 64, 4, 4), (2, 96, 2, 2),
(2, 160, 2, 2)]
for shortcut, target_shape in zip(outputs['shortcuts'], target_shapes):
assert shortcut.shape == target_shape
assert len(outputs['dec_idx_feat_list']) == 7
target_shapes = [(2, 32, 32, 32), None, (2, 24, 16, 16), (2, 32, 8, 8),
(2, 64, 4, 4), None, (2, 160, 2, 2)]
for dec_idx_feat, target_shape in zip(outputs['dec_idx_feat_list'],
target_shapes):
if dec_idx_feat is not None:
assert dec_idx_feat.shape == target_shape
# test indexnet encoder with other config
indexnet_encoder = IndexNetEncoder(
4,
out_stride=16,
width_mult=2,
index_mode='o2o',
aspp=False,
use_nonlinear=False,
use_context=False)
indexnet_encoder.init_weights()
x = torch.rand(2, 4, 32, 32)
outputs = indexnet_encoder(x)
assert outputs['out'].shape == (2, 160, 2, 2)
assert len(outputs['shortcuts']) == 7
target_shapes = [(2, 64, 32, 32), (2, 32, 16, 16), (2, 48, 16, 16),
(2, 64, 8, 8), (2, 128, 4, 4), (2, 192, 2, 2),
(2, 320, 2, 2)]
for shortcut, target_shape in zip(outputs['shortcuts'], target_shapes):
assert shortcut.shape == target_shape
assert len(outputs['dec_idx_feat_list']) == 7
target_shapes = [(2, 64, 32, 32), None, (2, 48, 16, 16), (2, 64, 8, 8),
(2, 128, 4, 4), None, None]
for dec_idx_feat, target_shape in zip(outputs['dec_idx_feat_list'],
target_shapes):
if dec_idx_feat is not None:
assert dec_idx_feat.shape == target_shape
# test indexnet encoder with holistic index block
indexnet_encoder = IndexNetEncoder(
4,
out_stride=16,
width_mult=2,
index_mode='holistic',
aspp=False,
freeze_bn=True,
use_nonlinear=False,
use_context=False)
indexnet_encoder.init_weights()
x = torch.rand(2, 4, 32, 32)
outputs = indexnet_encoder(x)
assert outputs['out'].shape == (2, 160, 2, 2)
assert len(outputs['shortcuts']) == 7
target_shapes = [(2, 64, 32, 32), (2, 32, 16, 16), (2, 48, 16, 16),
(2, 64, 8, 8), (2, 128, 4, 4), (2, 192, 2, 2),
(2, 320, 2, 2)]
for shortcut, target_shape in zip(outputs['shortcuts'], target_shapes):
assert shortcut.shape == target_shape
assert len(outputs['dec_idx_feat_list']) == 7
target_shapes = [(2, 1, 32, 32), None, (2, 1, 16, 16), (2, 1, 8, 8),
(2, 1, 4, 4), None, None]
for dec_idx_feat, target_shape in zip(outputs['dec_idx_feat_list'],
target_shapes):
if dec_idx_feat is not None:
assert dec_idx_feat.shape == target_shape
def test_fba_encoder():
"""Test FBA encoder."""
with pytest.raises(KeyError):
# ResNet depth should be in [18, 34, 50, 101, 152]
FBAResnetDilated(
20,
in_channels=11,
stem_channels=64,
base_channels=64,
)
with pytest.raises(AssertionError):
# In ResNet: 1 <= num_stages <= 4
FBAResnetDilated(
50,
in_channels=11,
stem_channels=64,
base_channels=64,
num_stages=0)
with pytest.raises(AssertionError):
# In ResNet: 1 <= num_stages <= 4
FBAResnetDilated(
50,
in_channels=11,
stem_channels=64,
base_channels=64,
num_stages=5)
with pytest.raises(AssertionError):
# len(strides) == len(dilations) == num_stages
FBAResnetDilated(
50,
in_channels=11,
stem_channels=64,
base_channels=64,
strides=(1, ),
dilations=(1, 1),
num_stages=3)
with pytest.raises(TypeError):
# pretrained must be a string path
model = FBAResnetDilated(
50,
in_channels=11,
stem_channels=64,
base_channels=64,
)
model.init_weights(pretrained=233)
model = FBAResnetDilated(
depth=50,
in_channels=11,
stem_channels=64,
base_channels=64,
conv_cfg=dict(type='ConvWS'),
norm_cfg=dict(type='GN', num_groups=32))
model.init_weights()
model.train()
input = _demo_inputs((1, 14, 320, 320))
output = model(input)
assert 'conv_out' in output.keys()
assert 'merged' in output.keys()
assert 'two_channel_trimap' in output.keys()
assert isinstance(output['conv_out'], list)
assert len(output['conv_out']) == 6
assert isinstance(output['merged'], torch.Tensor)
assert_tensor_with_shape(output['merged'], torch.Size([1, 3, 320, 320]))
assert isinstance(output['two_channel_trimap'], torch.Tensor)
assert_tensor_with_shape(output['two_channel_trimap'],
torch.Size([1, 2, 320, 320]))
if torch.cuda.is_available():
model = FBAResnetDilated(
depth=50,
in_channels=11,
stem_channels=64,
base_channels=64,
conv_cfg=dict(type='ConvWS'),
norm_cfg=dict(type='GN', num_groups=32))
model.init_weights()
model.train()
model.cuda()
input = _demo_inputs((1, 14, 320, 320)).cuda()
output = model(input)
assert 'conv_out' in output.keys()
assert 'merged' in output.keys()
assert 'two_channel_trimap' in output.keys()
assert isinstance(output['conv_out'], list)
assert len(output['conv_out']) == 6
assert isinstance(output['merged'], torch.Tensor)
assert_tensor_with_shape(output['merged'], torch.Size([1, 3, 320,
320]))
assert isinstance(output['two_channel_trimap'], torch.Tensor)
assert_tensor_with_shape(output['two_channel_trimap'],
torch.Size([1, 2, 320, 320]))
| 24,631 | 38.22293 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_backbones/test_encoder_decoders/test_pconv_encdec.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmcv.utils.parrots_wrapper import _BatchNorm
from mmedit.models.backbones import PConvEncoder, PConvEncoderDecoder
def test_pconv_encdec():
pconv_enc_cfg = dict(type='PConvEncoder')
pconv_dec_cfg = dict(type='PConvDecoder')
if torch.cuda.is_available():
pconv_encdec = PConvEncoderDecoder(pconv_enc_cfg, pconv_dec_cfg)
pconv_encdec.init_weights()
pconv_encdec.cuda()
x = torch.randn((1, 3, 256, 256)).cuda()
mask = torch.ones_like(x)
mask[..., 50:150, 100:250] = 1.
res, updated_mask = pconv_encdec(x, mask)
assert res.shape == (1, 3, 256, 256)
assert mask.shape == (1, 3, 256, 256)
with pytest.raises(TypeError):
pconv_encdec.init_weights(pretrained=dict(igccc=8989))
def test_pconv_enc():
pconv_enc = PConvEncoder(norm_eval=False)
pconv_enc.train()
for name, module in pconv_enc.named_modules():
if isinstance(module, _BatchNorm):
assert module.training
pconv_enc = PConvEncoder(norm_eval=True)
pconv_enc.train()
for name, module in pconv_enc.named_modules():
if isinstance(module, _BatchNorm):
assert not module.training
| 1,280 | 31.025 | 72 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_backbones/test_encoder_decoders/test_deepfill_encdec.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmedit.models.backbones import DeepFillEncoderDecoder, GLEncoderDecoder
from mmedit.models.components import DeepFillRefiner
def test_deepfill_encdec():
encdec = DeepFillEncoderDecoder()
assert isinstance(encdec.stage1, GLEncoderDecoder)
assert isinstance(encdec.stage2, DeepFillRefiner)
if torch.cuda.is_available():
img = torch.rand((2, 3, 256, 256)).cuda()
mask = img.new_zeros((2, 1, 256, 256))
mask[..., 20:100, 30:120] = 1.
input_x = torch.cat([img, torch.ones_like(mask), mask], dim=1)
encdec.cuda()
stage1_res, stage2_res = encdec(input_x)
assert stage1_res.shape == (2, 3, 256, 256)
assert stage2_res.shape == (2, 3, 256, 256)
encdec = DeepFillEncoderDecoder(return_offset=True).cuda()
stage1_res, stage2_res, offset = encdec(input_x)
assert offset.shape == (2, 32, 32, 32, 32)
| 961 | 37.48 | 76 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_extractors/test_feedback_hour_glass.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmedit.models import build_component
from mmedit.models.extractors import Hourglass
from mmedit.models.extractors.feedback_hour_glass import (
ResBlock, reduce_to_five_heatmaps)
def test_res_block():
res_block = ResBlock(16, 32)
x = torch.rand(2, 16, 64, 64)
y = res_block(x)
assert y.shape == (2, 32, 64, 64)
res_block = ResBlock(16, 16)
x = torch.rand(2, 16, 64, 64)
y = res_block(x)
assert y.shape == (2, 16, 64, 64)
def test_hour_glass():
hour_glass = Hourglass(2, 16)
x = torch.rand(2, 16, 64, 64)
y = hour_glass(x)
assert y.shape == x.shape
def test_feedback_hour_glass():
model_cfg = dict(
type='FeedbackHourglass', mid_channels=16, num_keypoints=20)
fhg = build_component(model_cfg)
assert fhg.__class__.__name__ == 'FeedbackHourglass'
x = torch.rand(2, 3, 64, 64)
heatmap, last_hidden = fhg.forward(x)
assert heatmap.shape == (2, 20, 16, 16)
assert last_hidden.shape == (2, 16, 16, 16)
heatmap, last_hidden = fhg.forward(x, last_hidden)
assert heatmap.shape == (2, 20, 16, 16)
assert last_hidden.shape == (2, 16, 16, 16)
def test_reduce_to_five_heatmaps():
heatmap = torch.rand((2, 5, 64, 64))
new_heatmap = reduce_to_five_heatmaps(heatmap, False)
assert new_heatmap.shape == (2, 5, 64, 64)
new_heatmap = reduce_to_five_heatmaps(heatmap, True)
assert new_heatmap.shape == (2, 5, 64, 64)
heatmap = torch.rand((2, 68, 64, 64))
new_heatmap = reduce_to_five_heatmaps(heatmap, False)
assert new_heatmap.shape == (2, 5, 64, 64)
new_heatmap = reduce_to_five_heatmaps(heatmap, True)
assert new_heatmap.shape == (2, 5, 64, 64)
heatmap = torch.rand((2, 194, 64, 64))
new_heatmap = reduce_to_five_heatmaps(heatmap, False)
assert new_heatmap.shape == (2, 5, 64, 64)
new_heatmap = reduce_to_five_heatmaps(heatmap, True)
assert new_heatmap.shape == (2, 5, 64, 64)
with pytest.raises(NotImplementedError):
heatmap = torch.rand((2, 12, 64, 64))
reduce_to_five_heatmaps(heatmap, False)
| 2,154 | 30.231884 | 68 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_extractors/test_lte.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmedit.models import build_component
def test_lte():
model_cfg = dict(
type='LTE',
requires_grad=False,
pixel_range=1.,
pretrained=None,
load_pretrained_vgg=False)
lte = build_component(model_cfg)
assert lte.__class__.__name__ == 'LTE'
x = torch.rand(2, 3, 64, 64)
x_level3, x_level2, x_level1 = lte(x)
assert x_level1.shape == (2, 64, 64, 64)
assert x_level2.shape == (2, 128, 32, 32)
assert x_level3.shape == (2, 256, 16, 16)
lte.init_weights(None)
with pytest.raises(IOError):
model_cfg['pretrained'] = ''
lte = build_component(model_cfg)
x_level3, x_level2, x_level1 = lte(x)
lte.init_weights('')
with pytest.raises(TypeError):
lte.init_weights(1)
| 863 | 24.411765 | 47 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_synthesizers/test_cyclegan.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
from unittest.mock import patch
import mmcv
import pytest
import torch
from mmcv.parallel import DataContainer as DC
from mmcv.runner import obj_from_dict
from mmedit.models import build_model
from mmedit.models.backbones import ResnetGenerator
from mmedit.models.components import PatchDiscriminator
from mmedit.models.losses import GANLoss, L1Loss
def test_cyclegan():
model_cfg = dict(
type='CycleGAN',
generator=dict(
type='ResnetGenerator',
in_channels=3,
out_channels=3,
base_channels=64,
norm_cfg=dict(type='IN'),
use_dropout=False,
num_blocks=9,
padding_mode='reflect',
init_cfg=dict(type='normal', gain=0.02)),
discriminator=dict(
type='PatchDiscriminator',
in_channels=3,
base_channels=64,
num_conv=3,
norm_cfg=dict(type='IN'),
init_cfg=dict(type='normal', gain=0.02)),
gan_loss=dict(
type='GANLoss',
gan_type='lsgan',
real_label_val=1.0,
fake_label_val=0,
loss_weight=1.0),
cycle_loss=dict(type='L1Loss', loss_weight=10.0, reduction='mean'),
id_loss=dict(type='L1Loss', loss_weight=0.5, reduction='mean'))
train_cfg = None
test_cfg = None
# build synthesizer
synthesizer = build_model(
model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# test checking if id loss > 0, in_channels == out_channels
with pytest.raises(AssertionError):
bad_model_cfg = copy.deepcopy(model_cfg)
bad_model_cfg['generator']['out_channels'] = 1
_ = build_model(bad_model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# test checking gan loss cannot be None
with pytest.raises(AssertionError):
bad_model_cfg = copy.deepcopy(model_cfg)
bad_model_cfg['gan_loss'] = None
_ = build_model(bad_model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# test checking cycle loss cannot be None
with pytest.raises(AssertionError):
bad_model_cfg = copy.deepcopy(model_cfg)
bad_model_cfg['cycle_loss'] = None
_ = build_model(bad_model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# test attributes
assert synthesizer.__class__.__name__ == 'CycleGAN'
assert isinstance(synthesizer.generators['a'], ResnetGenerator)
assert isinstance(synthesizer.generators['b'], ResnetGenerator)
assert isinstance(synthesizer.discriminators['a'], PatchDiscriminator)
assert isinstance(synthesizer.discriminators['b'], PatchDiscriminator)
assert isinstance(synthesizer.gan_loss, GANLoss)
assert isinstance(synthesizer.cycle_loss, L1Loss)
assert isinstance(synthesizer.id_loss, L1Loss)
assert synthesizer.train_cfg is None
assert synthesizer.test_cfg is None
# prepare data
inputs = torch.rand(1, 3, 64, 64)
targets = torch.rand(1, 3, 64, 64)
data_batch = {'img_a': inputs, 'img_b': targets}
img_meta = {}
img_meta['img_a_path'] = 'img_a_path'
img_meta['img_b_path'] = 'img_b_path'
data_batch['meta'] = [img_meta]
# prepare optimizer
optim_cfg = dict(type='Adam', lr=2e-4, betas=(0.5, 0.999))
optimizer = {
'generators':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(synthesizer, 'generators').parameters())),
'discriminators':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(synthesizer, 'discriminators').parameters()))
}
# test forward_dummy
with torch.no_grad():
output = synthesizer.forward_dummy(data_batch['img_a'])
assert torch.is_tensor(output)
assert output.size() == (1, 3, 64, 64)
# test forward_test
with torch.no_grad():
outputs = synthesizer(inputs, targets, [img_meta], test_mode=True)
assert torch.equal(outputs['real_a'], data_batch['img_a'])
assert torch.equal(outputs['real_b'], data_batch['img_b'])
assert torch.is_tensor(outputs['fake_b'])
assert torch.is_tensor(outputs['fake_a'])
assert outputs['fake_b'].size() == (1, 3, 64, 64)
assert outputs['fake_a'].size() == (1, 3, 64, 64)
# val_step
with torch.no_grad():
outputs = synthesizer.val_step(data_batch)
assert torch.equal(outputs['real_a'], data_batch['img_a'])
assert torch.equal(outputs['real_b'], data_batch['img_b'])
assert torch.is_tensor(outputs['fake_b'])
assert torch.is_tensor(outputs['fake_a'])
assert outputs['fake_b'].size() == (1, 3, 64, 64)
assert outputs['fake_a'].size() == (1, 3, 64, 64)
# test forward_train
outputs = synthesizer(inputs, targets, [img_meta], test_mode=False)
assert torch.equal(outputs['real_a'], data_batch['img_a'])
assert torch.equal(outputs['real_b'], data_batch['img_b'])
assert torch.is_tensor(outputs['fake_b'])
assert torch.is_tensor(outputs['fake_a'])
assert torch.is_tensor(outputs['rec_a'])
assert torch.is_tensor(outputs['rec_b'])
assert outputs['fake_b'].size() == (1, 3, 64, 64)
assert outputs['fake_a'].size() == (1, 3, 64, 64)
assert outputs['rec_a'].size() == (1, 3, 64, 64)
assert outputs['rec_b'].size() == (1, 3, 64, 64)
# test train_step
outputs = synthesizer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['results'], dict)
for v in [
'loss_gan_d_a', 'loss_gan_d_b', 'loss_id_a', 'loss_id_b',
'loss_gan_g_a', 'loss_gan_g_b', 'loss_cycle_a', 'loss_cycle_b'
]:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['real_a'], data_batch['img_a'])
assert torch.equal(outputs['results']['real_b'], data_batch['img_b'])
assert torch.is_tensor(outputs['results']['fake_b'])
assert torch.is_tensor(outputs['results']['fake_a'])
assert outputs['results']['fake_b'].size() == (1, 3, 64, 64)
assert outputs['results']['fake_a'].size() == (1, 3, 64, 64)
# test train_step and forward_test (gpu)
if torch.cuda.is_available():
synthesizer = synthesizer.cuda()
optimizer = {
'generators':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(synthesizer, 'generators').parameters())),
'discriminators':
obj_from_dict(
optim_cfg, torch.optim,
dict(
params=getattr(synthesizer,
'discriminators').parameters()))
}
data_batch_cuda = copy.deepcopy(data_batch)
data_batch_cuda['img_a'] = inputs.cuda()
data_batch_cuda['img_b'] = targets.cuda()
data_batch_cuda['meta'] = [DC(img_meta, cpu_only=True).data]
# forward_test
with torch.no_grad():
outputs = synthesizer(
data_batch_cuda['img_a'],
data_batch_cuda['img_b'],
data_batch_cuda['meta'],
test_mode=True)
assert torch.equal(outputs['real_a'], data_batch_cuda['img_a'].cpu())
assert torch.equal(outputs['real_b'], data_batch_cuda['img_b'].cpu())
assert torch.is_tensor(outputs['fake_b'])
assert torch.is_tensor(outputs['fake_a'])
assert outputs['fake_b'].size() == (1, 3, 64, 64)
assert outputs['fake_a'].size() == (1, 3, 64, 64)
# val_step
with torch.no_grad():
outputs = synthesizer.val_step(data_batch_cuda)
assert torch.equal(outputs['real_a'], data_batch_cuda['img_a'].cpu())
assert torch.equal(outputs['real_b'], data_batch_cuda['img_b'].cpu())
assert torch.is_tensor(outputs['fake_b'])
assert torch.is_tensor(outputs['fake_a'])
assert outputs['fake_b'].size() == (1, 3, 64, 64)
assert outputs['fake_a'].size() == (1, 3, 64, 64)
# test forward_train
outputs = synthesizer(
data_batch_cuda['img_a'],
data_batch_cuda['img_b'],
data_batch_cuda['meta'],
test_mode=False)
assert torch.equal(outputs['real_a'], data_batch_cuda['img_a'])
assert torch.equal(outputs['real_b'], data_batch_cuda['img_b'])
assert torch.is_tensor(outputs['fake_b'])
assert torch.is_tensor(outputs['fake_a'])
assert torch.is_tensor(outputs['rec_a'])
assert torch.is_tensor(outputs['rec_b'])
assert outputs['fake_b'].size() == (1, 3, 64, 64)
assert outputs['fake_a'].size() == (1, 3, 64, 64)
assert outputs['rec_a'].size() == (1, 3, 64, 64)
assert outputs['rec_b'].size() == (1, 3, 64, 64)
# train_step
outputs = synthesizer.train_step(data_batch_cuda, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['results'], dict)
for v in [
'loss_gan_d_a', 'loss_gan_d_b', 'loss_id_a', 'loss_id_b',
'loss_gan_g_a', 'loss_gan_g_b', 'loss_cycle_a', 'loss_cycle_b'
]:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['real_a'],
data_batch_cuda['img_a'].cpu())
assert torch.equal(outputs['results']['real_b'],
data_batch_cuda['img_b'].cpu())
assert torch.is_tensor(outputs['results']['fake_b'])
assert torch.is_tensor(outputs['results']['fake_a'])
assert outputs['results']['fake_b'].size() == (1, 3, 64, 64)
assert outputs['results']['fake_a'].size() == (1, 3, 64, 64)
# test disc_steps and disc_init_steps
data_batch['img_a'] = inputs.cpu()
data_batch['img_b'] = targets.cpu()
train_cfg = dict(disc_steps=2, disc_init_steps=2)
synthesizer = build_model(
model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
optimizer = {
'generators':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(synthesizer, 'generators').parameters())),
'discriminators':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(synthesizer, 'discriminators').parameters()))
}
# iter 0, 1
for i in range(2):
assert synthesizer.step_counter == i
outputs = synthesizer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['results'], dict)
for v in [
'loss_id_a', 'loss_id_b', 'loss_gan_g_a', 'loss_gan_g_b',
'loss_cycle_a', 'loss_cycle_b'
]:
assert outputs['log_vars'].get(v) is None
assert isinstance(outputs['log_vars']['loss_gan_d_a'], float)
assert isinstance(outputs['log_vars']['loss_gan_d_b'], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['real_a'], data_batch['img_a'])
assert torch.equal(outputs['results']['real_b'], data_batch['img_b'])
assert torch.is_tensor(outputs['results']['fake_b'])
assert torch.is_tensor(outputs['results']['fake_a'])
assert outputs['results']['fake_b'].size() == (1, 3, 64, 64)
assert outputs['results']['fake_a'].size() == (1, 3, 64, 64)
assert synthesizer.step_counter == i + 1
# iter 2, 3, 4, 5
for i in range(2, 6):
assert synthesizer.step_counter == i
outputs = synthesizer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['results'], dict)
log_check_list = [
'loss_gan_d_a', 'loss_gan_d_b', 'loss_id_a', 'loss_id_b',
'loss_gan_g_a', 'loss_gan_g_b', 'loss_cycle_a', 'loss_cycle_b'
]
if i % 2 == 1:
log_None_list = [
'loss_id_a', 'loss_id_b', 'loss_gan_g_a', 'loss_gan_g_b',
'loss_cycle_a', 'loss_cycle_b'
]
for v in log_None_list:
assert outputs['log_vars'].get(v) is None
log_check_list.remove(v)
for v in log_check_list:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['real_a'], data_batch['img_a'])
assert torch.equal(outputs['results']['real_b'], data_batch['img_b'])
assert torch.is_tensor(outputs['results']['fake_b'])
assert torch.is_tensor(outputs['results']['fake_a'])
assert outputs['results']['fake_b'].size() == (1, 3, 64, 64)
assert outputs['results']['fake_a'].size() == (1, 3, 64, 64)
assert synthesizer.step_counter == i + 1
# test without id loss
model_cfg_ = copy.deepcopy(model_cfg)
model_cfg_.pop('id_loss')
synthesizer = build_model(model_cfg_, train_cfg=None, test_cfg=None)
optimizer = {
'generators':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(synthesizer, 'generators').parameters())),
'discriminators':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(synthesizer, 'discriminators').parameters()))
}
data_batch['img_a'] = inputs.cpu()
data_batch['img_b'] = targets.cpu()
outputs = synthesizer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['results'], dict)
assert outputs['log_vars'].get('loss_id_a') is None
assert outputs['log_vars'].get('loss_id_b') is None
log_check_list = [
'loss_gan_d_a', 'loss_gan_d_b', 'loss_gan_g_a', 'loss_gan_g_b',
'loss_cycle_a', 'loss_cycle_b'
]
for v in log_check_list:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['real_a'], data_batch['img_a'])
assert torch.equal(outputs['results']['real_b'], data_batch['img_b'])
assert torch.is_tensor(outputs['results']['fake_b'])
assert torch.is_tensor(outputs['results']['fake_a'])
assert outputs['results']['fake_b'].size() == (1, 3, 64, 64)
assert outputs['results']['fake_a'].size() == (1, 3, 64, 64)
# test b2a translation
data_batch['img_a'] = inputs.cpu()
data_batch['img_b'] = targets.cpu()
train_cfg = dict(direction='b2a')
synthesizer = build_model(
model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
optimizer = {
'generators':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(synthesizer, 'generators').parameters())),
'discriminators':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(synthesizer, 'discriminators').parameters()))
}
assert synthesizer.step_counter == 0
outputs = synthesizer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['results'], dict)
for v in [
'loss_gan_d_a', 'loss_gan_d_b', 'loss_id_a', 'loss_id_b',
'loss_gan_g_a', 'loss_gan_g_b', 'loss_cycle_a', 'loss_cycle_b'
]:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['real_a'], data_batch['img_b'])
assert torch.equal(outputs['results']['real_b'], data_batch['img_a'])
assert torch.is_tensor(outputs['results']['fake_b'])
assert torch.is_tensor(outputs['results']['fake_a'])
assert outputs['results']['fake_b'].size() == (1, 3, 64, 64)
assert outputs['results']['fake_a'].size() == (1, 3, 64, 64)
assert synthesizer.step_counter == 1
# test GAN image buffer size = 0
data_batch['img_a'] = inputs.cpu()
data_batch['img_b'] = targets.cpu()
train_cfg = dict(buffer_size=0)
synthesizer = build_model(
model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
optimizer = {
'generators':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(synthesizer, 'generators').parameters())),
'discriminators':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(synthesizer, 'discriminators').parameters()))
}
assert synthesizer.step_counter == 0
outputs = synthesizer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['results'], dict)
for v in [
'loss_gan_d_a', 'loss_gan_d_b', 'loss_id_a', 'loss_id_b',
'loss_gan_g_a', 'loss_gan_g_b', 'loss_cycle_a', 'loss_cycle_b'
]:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['real_a'], data_batch['img_a'])
assert torch.equal(outputs['results']['real_b'], data_batch['img_b'])
assert torch.is_tensor(outputs['results']['fake_b'])
assert torch.is_tensor(outputs['results']['fake_a'])
assert outputs['results']['fake_b'].size() == (1, 3, 64, 64)
assert outputs['results']['fake_a'].size() == (1, 3, 64, 64)
assert synthesizer.step_counter == 1
# test save image
# show input
train_cfg = None
test_cfg = dict(show_input=True)
synthesizer = build_model(
model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
with patch.object(mmcv, 'imwrite', return_value=True):
# test save path not None Assertion
with pytest.raises(AssertionError):
with torch.no_grad():
_ = synthesizer(
inputs,
targets, [img_meta],
test_mode=True,
save_image=True)
# iteration is None
with torch.no_grad():
outputs = synthesizer(
inputs,
targets, [img_meta],
test_mode=True,
save_image=True,
save_path='save_path')
assert torch.equal(outputs['real_a'], data_batch['img_a'])
assert torch.equal(outputs['real_b'], data_batch['img_b'])
assert torch.is_tensor(outputs['fake_b'])
assert torch.is_tensor(outputs['fake_a'])
assert outputs['fake_b'].size() == (1, 3, 64, 64)
assert outputs['fake_a'].size() == (1, 3, 64, 64)
assert outputs['saved_flag']
# iteration is not None
with torch.no_grad():
outputs = synthesizer(
inputs,
targets, [img_meta],
test_mode=True,
save_image=True,
save_path='save_path',
iteration=1000)
assert torch.equal(outputs['real_a'], data_batch['img_a'])
assert torch.equal(outputs['real_b'], data_batch['img_b'])
assert torch.is_tensor(outputs['fake_b'])
assert torch.is_tensor(outputs['fake_a'])
assert outputs['fake_b'].size() == (1, 3, 64, 64)
assert outputs['fake_a'].size() == (1, 3, 64, 64)
assert outputs['saved_flag']
# not show input, test_direction a2b
train_cfg = None
test_cfg = dict(show_input=False, test_direction='a2b')
synthesizer = build_model(
model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
with patch.object(mmcv, 'imwrite', return_value=True):
# test save path not None Assertion
with pytest.raises(AssertionError):
with torch.no_grad():
_ = synthesizer(
inputs,
targets, [img_meta],
test_mode=True,
save_image=True)
# iteration is None
with torch.no_grad():
outputs = synthesizer(
inputs,
targets, [img_meta],
test_mode=True,
save_image=True,
save_path='save_path')
assert torch.equal(outputs['real_a'], data_batch['img_a'])
assert torch.equal(outputs['real_b'], data_batch['img_b'])
assert torch.is_tensor(outputs['fake_b'])
assert torch.is_tensor(outputs['fake_a'])
assert outputs['fake_b'].size() == (1, 3, 64, 64)
assert outputs['fake_a'].size() == (1, 3, 64, 64)
assert outputs['saved_flag']
# iteration is not None
with torch.no_grad():
outputs = synthesizer(
inputs,
targets, [img_meta],
test_mode=True,
save_image=True,
save_path='save_path',
iteration=1000)
assert torch.equal(outputs['real_a'], data_batch['img_a'])
assert torch.equal(outputs['real_b'], data_batch['img_b'])
assert torch.is_tensor(outputs['fake_b'])
assert torch.is_tensor(outputs['fake_a'])
assert outputs['fake_b'].size() == (1, 3, 64, 64)
assert outputs['fake_a'].size() == (1, 3, 64, 64)
assert outputs['saved_flag']
# not show input, test_direction b2a
train_cfg = None
test_cfg = dict(show_input=False, test_direction='b2a')
synthesizer = build_model(
model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
with patch.object(mmcv, 'imwrite', return_value=True):
# test save path not None Assertion
with pytest.raises(AssertionError):
with torch.no_grad():
_ = synthesizer(
inputs,
targets, [img_meta],
test_mode=True,
save_image=True)
# iteration is None
with torch.no_grad():
outputs = synthesizer(
inputs,
targets, [img_meta],
test_mode=True,
save_image=True,
save_path='save_path')
assert torch.equal(outputs['real_a'], data_batch['img_a'])
assert torch.equal(outputs['real_b'], data_batch['img_b'])
assert torch.is_tensor(outputs['fake_b'])
assert torch.is_tensor(outputs['fake_a'])
assert outputs['fake_b'].size() == (1, 3, 64, 64)
assert outputs['fake_a'].size() == (1, 3, 64, 64)
assert outputs['saved_flag']
# iteration is not None
with torch.no_grad():
outputs = synthesizer(
inputs,
targets, [img_meta],
test_mode=True,
save_image=True,
save_path='save_path',
iteration=1000)
assert torch.equal(outputs['real_a'], data_batch['img_a'])
assert torch.equal(outputs['real_b'], data_batch['img_b'])
assert torch.is_tensor(outputs['fake_b'])
assert torch.is_tensor(outputs['fake_a'])
assert outputs['fake_b'].size() == (1, 3, 64, 64)
assert outputs['fake_a'].size() == (1, 3, 64, 64)
assert outputs['saved_flag']
| 23,294 | 40.747312 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_restorers/test_basicvsr_model.py | # Copyright (c) OpenMMLab. All rights reserved.
import tempfile
import mmcv
import pytest
import torch
from mmcv.runner import obj_from_dict
from mmedit.models import build_model
from mmedit.models.backbones.sr_backbones import BasicVSRNet
from mmedit.models.losses import MSELoss
def test_basicvsr_model():
model_cfg = dict(
type='BasicVSR',
generator=dict(
type='BasicVSRNet',
mid_channels=64,
num_blocks=30,
spynet_pretrained=None),
pixel_loss=dict(type='MSELoss', loss_weight=1.0, reduction='sum'),
)
train_cfg = dict(fix_iter=1)
train_cfg = mmcv.Config(train_cfg)
test_cfg = None
# build restorer
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# test attributes
assert restorer.__class__.__name__ == 'BasicVSR'
assert isinstance(restorer.generator, BasicVSRNet)
assert isinstance(restorer.pixel_loss, MSELoss)
# prepare data
inputs = torch.rand(1, 5, 3, 64, 64)
targets = torch.rand(1, 5, 3, 256, 256)
if torch.cuda.is_available():
inputs = inputs.cuda()
targets = targets.cuda()
restorer = restorer.cuda()
# prepare data and optimizer
data_batch = {'lq': inputs, 'gt': targets}
optim_cfg = dict(type='Adam', lr=2e-4, betas=(0.9, 0.999))
optimizer = {
'generator':
obj_from_dict(optim_cfg, torch.optim,
dict(params=getattr(restorer, 'generator').parameters()))
}
# train_step (without updating spynet)
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu())
assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu())
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 5, 3, 256, 256)
# train with spynet updated
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu())
assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu())
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 5, 3, 256, 256)
# test forward_dummy
with torch.no_grad():
output = restorer.forward_dummy(data_batch['lq'])
assert torch.is_tensor(output)
assert output.size() == (1, 5, 3, 256, 256)
# forward_test
with torch.no_grad():
outputs = restorer(**data_batch, test_mode=True)
assert torch.equal(outputs['lq'], data_batch['lq'].cpu())
assert torch.equal(outputs['gt'], data_batch['gt'].cpu())
assert torch.is_tensor(outputs['output'])
assert outputs['output'].size() == (1, 5, 3, 256, 256)
with torch.no_grad():
outputs = restorer(inputs, test_mode=True)
assert torch.equal(outputs['lq'], data_batch['lq'].cpu())
assert torch.is_tensor(outputs['output'])
assert outputs['output'].size() == (1, 5, 3, 256, 256)
# test with metric and save image
train_cfg = mmcv.ConfigDict(fix_iter=1)
test_cfg = dict(metrics=('PSNR', 'SSIM'), crop_border=0)
test_cfg = mmcv.Config(test_cfg)
data_batch = {
'lq': inputs,
'gt': targets,
'meta': [{
'gt_path': 'fake_path/fake_name.png',
'key': '000'
}]
}
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
if torch.cuda.is_available():
restorer = restorer.cuda()
with pytest.raises(AssertionError):
# evaluation with metrics must have gt images
restorer(lq=inputs, test_mode=True)
with tempfile.TemporaryDirectory() as tmpdir:
outputs = restorer(
**data_batch,
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration=None)
assert isinstance(outputs, dict)
assert isinstance(outputs['eval_result'], dict)
assert isinstance(outputs['eval_result']['PSNR'], float)
assert isinstance(outputs['eval_result']['SSIM'], float)
outputs = restorer(
**data_batch,
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration=100)
assert isinstance(outputs, dict)
assert isinstance(outputs['eval_result'], dict)
assert isinstance(outputs['eval_result']['PSNR'], float)
assert isinstance(outputs['eval_result']['SSIM'], float)
with pytest.raises(ValueError):
# iteration should be number or None
restorer(
**data_batch,
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration='100')
# forward_test (with ensemble)
model_cfg = dict(
type='BasicVSR',
generator=dict(
type='BasicVSRNet',
mid_channels=64,
num_blocks=30,
spynet_pretrained=None),
pixel_loss=dict(type='MSELoss', loss_weight=1.0, reduction='sum'),
ensemble=dict(
type='SpatialTemporalEnsemble', is_temporal_ensemble=False),
)
train_cfg = dict(fix_iter=1)
train_cfg = mmcv.Config(train_cfg)
test_cfg = None
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
inputs = torch.rand(1, 5, 3, 64, 64)
targets = torch.rand(1, 5, 3, 256, 256)
if torch.cuda.is_available():
inputs = inputs.cuda()
targets = targets.cuda()
restorer = restorer.cuda()
data_batch = {'lq': inputs, 'gt': targets}
with torch.no_grad():
outputs = restorer(**data_batch, test_mode=True)
assert torch.equal(outputs['lq'], data_batch['lq'].cpu())
assert torch.equal(outputs['gt'], data_batch['gt'].cpu())
assert torch.is_tensor(outputs['output'])
assert outputs['output'].size() == (1, 5, 3, 256, 256)
# forward_test (with unsupported ensemble)
model_cfg = dict(
type='BasicVSR',
generator=dict(
type='BasicVSRNet',
mid_channels=64,
num_blocks=30,
spynet_pretrained=None),
pixel_loss=dict(type='MSELoss', loss_weight=1.0, reduction='sum'),
ensemble=dict(type='abc', is_temporal_ensemble=False),
)
with pytest.raises(NotImplementedError):
restorer = build_model(
model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
| 6,854 | 32.602941 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_restorers/test_real_basicvsr.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import patch
import pytest
import torch
from mmcv.runner import obj_from_dict
from mmedit.models import build_model
from mmedit.models.backbones import RealBasicVSRNet
from mmedit.models.components import UNetDiscriminatorWithSpectralNorm
from mmedit.models.losses import GANLoss, L1Loss
def test_real_basicvsr():
model_cfg = dict(
type='RealBasicVSR',
generator=dict(type='RealBasicVSRNet'),
discriminator=dict(
type='UNetDiscriminatorWithSpectralNorm',
in_channels=3,
mid_channels=64,
skip_connection=True),
pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'),
cleaning_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'),
gan_loss=dict(
type='GANLoss',
gan_type='vanilla',
loss_weight=1e-1,
real_label_val=1.0,
fake_label_val=0),
is_use_sharpened_gt_in_pixel=True,
is_use_sharpened_gt_in_percep=True,
is_use_sharpened_gt_in_gan=True,
is_use_ema=True,
)
train_cfg = None
test_cfg = None
# build restorer
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# test attributes
assert restorer.__class__.__name__ == 'RealBasicVSR'
assert isinstance(restorer.generator, RealBasicVSRNet)
assert isinstance(restorer.discriminator,
UNetDiscriminatorWithSpectralNorm)
assert isinstance(restorer.pixel_loss, L1Loss)
assert isinstance(restorer.gan_loss, GANLoss)
# prepare data
inputs = torch.rand(1, 5, 3, 64, 64)
targets = torch.rand(1, 5, 3, 256, 256)
data_batch = {'lq': inputs, 'gt': targets, 'gt_unsharp': targets}
# prepare optimizer
optim_cfg = dict(type='Adam', lr=2e-4, betas=(0.9, 0.999))
optimizer = {
'generator':
obj_from_dict(optim_cfg, torch.optim,
dict(
params=getattr(restorer, 'generator').parameters())),
'discriminator':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(restorer, 'discriminator').parameters()))
}
# no forward train in GAN models, raise ValueError
with pytest.raises(ValueError):
restorer(**data_batch, test_mode=False)
# test train_step
with patch.object(
restorer,
'perceptual_loss',
return_value=(torch.tensor(1.0), torch.tensor(2.0))):
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in [
'loss_perceptual', 'loss_gan', 'loss_d_real', 'loss_d_fake',
'loss_pix', 'loss_clean'
]:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (5, 3, 256, 256)
# test train_step (gpu)
if torch.cuda.is_available():
restorer = restorer.cuda()
optimizer = {
'generator':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(restorer, 'generator').parameters())),
'discriminator':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(restorer, 'discriminator').parameters()))
}
data_batch = {
'lq': inputs.cuda(),
'gt': targets.cuda(),
'gt_unsharp': targets.cuda()
}
# train_step
with patch.object(
restorer,
'perceptual_loss',
return_value=(torch.tensor(1.0).cuda(),
torch.tensor(2.0).cuda())):
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in [
'loss_perceptual', 'loss_gan', 'loss_d_real',
'loss_d_fake', 'loss_pix', 'loss_clean'
]:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'],
data_batch['lq'].cpu())
assert torch.equal(outputs['results']['gt'],
data_batch['gt'].cpu())
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (5, 3, 256, 256)
# test disc_steps and disc_init_steps and start_iter
data_batch = {
'lq': inputs.cpu(),
'gt': targets.cpu(),
'gt_unsharp': targets.cpu()
}
train_cfg = dict(disc_steps=2, disc_init_steps=2, start_iter=0)
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
with patch.object(
restorer,
'perceptual_loss',
return_value=(torch.tensor(1.0), torch.tensor(2.0))):
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in ['loss_d_real', 'loss_d_fake']:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (5, 3, 256, 256)
# test without pixel loss and perceptual loss
model_cfg_ = model_cfg.copy()
model_cfg_.pop('pixel_loss')
restorer = build_model(model_cfg_, train_cfg=None, test_cfg=None)
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in ['loss_gan', 'loss_d_real', 'loss_d_fake']:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (5, 3, 256, 256)
# test train_step w/o loss_percep
restorer = build_model(model_cfg, train_cfg=None, test_cfg=None)
with patch.object(
restorer, 'perceptual_loss',
return_value=(None, torch.tensor(2.0))):
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in [
'loss_style', 'loss_gan', 'loss_d_real', 'loss_d_fake',
'loss_pix', 'loss_clean'
]:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (5, 3, 256, 256)
# test train_step w/o loss_style
restorer = build_model(model_cfg, train_cfg=None, test_cfg=None)
with patch.object(
restorer, 'perceptual_loss',
return_value=(torch.tensor(2.0), None)):
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in [
'loss_perceptual', 'loss_gan', 'loss_d_real', 'loss_d_fake',
'loss_pix', 'loss_clean'
]:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (5, 3, 256, 256)
| 8,475 | 39.361905 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_restorers/test_tdan.py | # Copyright (c) OpenMMLab. All rights reserved.
import tempfile
import mmcv
import pytest
import torch
from mmcv.runner import obj_from_dict
from mmedit.models import build_model
from mmedit.models.backbones import TDANNet
from mmedit.models.losses import MSELoss
def test_tdan_model():
model_cfg = dict(
type='TDAN',
generator=dict(
type='TDANNet',
in_channels=3,
mid_channels=64,
out_channels=3,
num_blocks_before_align=5,
num_blocks_after_align=10),
pixel_loss=dict(type='MSELoss', loss_weight=1.0, reduction='sum'),
lq_pixel_loss=dict(type='MSELoss', loss_weight=1.0, reduction='sum'),
)
train_cfg = None
test_cfg = None
# build restorer
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# test attributes
assert restorer.__class__.__name__ == 'TDAN'
assert isinstance(restorer.generator, TDANNet)
assert isinstance(restorer.pixel_loss, MSELoss)
# prepare data
inputs = torch.rand(1, 5, 3, 8, 8)
targets = torch.rand(1, 3, 32, 32)
# test train_step and forward_test (gpu)
if torch.cuda.is_available():
restorer = restorer.cuda()
data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda()}
# prepare optimizer
optim_cfg = dict(type='Adam', lr=2e-4, betas=(0.9, 0.999))
optimizer = {
'generator':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(restorer, 'generator').parameters()))
}
# train_step
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu())
assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu())
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 32, 32)
# test forward_dummy
with torch.no_grad():
output = restorer.forward_dummy(data_batch['lq'])
assert isinstance(output, tuple)
assert torch.is_tensor(output[0])
assert output[0].size() == (1, 3, 32, 32)
assert torch.is_tensor(output[1])
assert output[1].size() == (1, 5, 3, 8, 8)
# forward_test
with torch.no_grad():
outputs = restorer(**data_batch, test_mode=True)
assert torch.equal(outputs['lq'], data_batch['lq'].cpu())
assert torch.equal(outputs['gt'], data_batch['gt'].cpu())
assert torch.is_tensor(outputs['output'])
assert outputs['output'].size() == (1, 3, 32, 32)
with torch.no_grad():
outputs = restorer(inputs.cuda(), test_mode=True)
assert torch.equal(outputs['lq'], data_batch['lq'].cpu())
assert torch.is_tensor(outputs['output'])
assert outputs['output'].size() == (1, 3, 32, 32)
# test with metric and save image
if torch.cuda.is_available():
train_cfg = mmcv.ConfigDict(tsa_iter=1)
test_cfg = dict(metrics=('PSNR', 'SSIM'), crop_border=0)
test_cfg = mmcv.Config(test_cfg)
data_batch = {
'lq': inputs.cuda(),
'gt': targets.cuda(),
'meta': [{
'gt_path': 'fake_path/fake_name.png',
'key': '000/00000000'
}]
}
restorer = build_model(
model_cfg, train_cfg=train_cfg, test_cfg=test_cfg).cuda()
with pytest.raises(AssertionError):
# evaluation with metrics must have gt images
restorer(lq=inputs.cuda(), test_mode=True)
with tempfile.TemporaryDirectory() as tmpdir:
outputs = restorer(
**data_batch,
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration=None)
assert isinstance(outputs, dict)
assert isinstance(outputs['eval_result'], dict)
assert isinstance(outputs['eval_result']['PSNR'], float)
assert isinstance(outputs['eval_result']['SSIM'], float)
outputs = restorer(
**data_batch,
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration=100)
assert isinstance(outputs, dict)
assert isinstance(outputs['eval_result'], dict)
assert isinstance(outputs['eval_result']['PSNR'], float)
assert isinstance(outputs['eval_result']['SSIM'], float)
with pytest.raises(ValueError):
# iteration should be number or None
restorer(
**data_batch,
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration='100')
| 5,090 | 34.110345 | 77 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_restorers/test_real_esrgan.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import patch
import pytest
import torch
from mmcv.runner import obj_from_dict
from mmedit.models import build_model
from mmedit.models.backbones import MSRResNet
from mmedit.models.components import ModifiedVGG
from mmedit.models.losses import GANLoss, L1Loss
def test_real_esrgan():
model_cfg = dict(
type='RealESRGAN',
generator=dict(
type='MSRResNet',
in_channels=3,
out_channels=3,
mid_channels=4,
num_blocks=1,
upscale_factor=4),
discriminator=dict(type='ModifiedVGG', in_channels=3, mid_channels=2),
pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'),
gan_loss=dict(
type='GANLoss',
gan_type='vanilla',
loss_weight=1e-1,
real_label_val=1.0,
fake_label_val=0),
is_use_sharpened_gt_in_pixel=True,
is_use_sharpened_gt_in_percep=True,
is_use_sharpened_gt_in_gan=True,
is_use_ema=True,
)
train_cfg = None
test_cfg = None
# build restorer
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# test attributes
assert restorer.__class__.__name__ == 'RealESRGAN'
assert isinstance(restorer.generator, MSRResNet)
assert isinstance(restorer.discriminator, ModifiedVGG)
assert isinstance(restorer.pixel_loss, L1Loss)
assert isinstance(restorer.gan_loss, GANLoss)
# prepare data
inputs = torch.rand(1, 3, 32, 32)
targets = torch.rand(1, 3, 128, 128)
data_batch = {'lq': inputs, 'gt': targets, 'gt_unsharp': targets}
# prepare optimizer
optim_cfg = dict(type='Adam', lr=2e-4, betas=(0.9, 0.999))
optimizer = {
'generator':
obj_from_dict(optim_cfg, torch.optim,
dict(
params=getattr(restorer, 'generator').parameters())),
'discriminator':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(restorer, 'discriminator').parameters()))
}
# no forward train in GAN models, raise ValueError
with pytest.raises(ValueError):
restorer(**data_batch, test_mode=False)
# test forward_test
data_batch.pop('gt_unsharp')
with torch.no_grad():
outputs = restorer(**data_batch, test_mode=True)
assert torch.equal(outputs['lq'], data_batch['lq'])
assert torch.is_tensor(outputs['output'])
assert outputs['output'].size() == (1, 3, 128, 128)
# test forward_dummy
with torch.no_grad():
output = restorer.forward_dummy(data_batch['lq'])
assert torch.is_tensor(output)
assert output.size() == (1, 3, 128, 128)
# val_step
with torch.no_grad():
outputs = restorer.val_step(data_batch)
data_batch['gt_unsharp'] = targets
assert torch.equal(outputs['lq'], data_batch['lq'])
assert torch.is_tensor(outputs['output'])
assert outputs['output'].size() == (1, 3, 128, 128)
# test train_step
with patch.object(
restorer,
'perceptual_loss',
return_value=(torch.tensor(1.0), torch.tensor(2.0))):
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in [
'loss_perceptual', 'loss_gan', 'loss_d_real', 'loss_d_fake',
'loss_pix'
]:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 128, 128)
# test train_step and forward_test (gpu)
if torch.cuda.is_available():
restorer = restorer.cuda()
optimizer = {
'generator':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(restorer, 'generator').parameters())),
'discriminator':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(restorer, 'discriminator').parameters()))
}
data_batch = {
'lq': inputs.cuda(),
'gt': targets.cuda(),
'gt_unsharp': targets.cuda()
}
# forward_test
data_batch.pop('gt_unsharp')
with torch.no_grad():
outputs = restorer(**data_batch, test_mode=True)
assert torch.equal(outputs['lq'], data_batch['lq'].cpu())
assert torch.is_tensor(outputs['output'])
assert outputs['output'].size() == (1, 3, 128, 128)
# val_step
with torch.no_grad():
outputs = restorer.val_step(data_batch)
data_batch['gt_unsharp'] = targets.cuda()
assert torch.equal(outputs['lq'], data_batch['lq'].cpu())
assert torch.is_tensor(outputs['output'])
assert outputs['output'].size() == (1, 3, 128, 128)
# train_step
with patch.object(
restorer,
'perceptual_loss',
return_value=(torch.tensor(1.0).cuda(),
torch.tensor(2.0).cuda())):
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in [
'loss_perceptual', 'loss_gan', 'loss_d_real',
'loss_d_fake', 'loss_pix'
]:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'],
data_batch['lq'].cpu())
assert torch.equal(outputs['results']['gt'],
data_batch['gt'].cpu())
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 128, 128)
# test disc_steps and disc_init_steps and start_iter
data_batch = {
'lq': inputs.cpu(),
'gt': targets.cpu(),
'gt_unsharp': targets.cpu()
}
train_cfg = dict(disc_steps=2, disc_init_steps=2, start_iter=0)
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
with patch.object(
restorer,
'perceptual_loss',
return_value=(torch.tensor(1.0), torch.tensor(2.0))):
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in ['loss_d_real', 'loss_d_fake']:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 128, 128)
# test no discriminator (testing mode)
model_cfg_ = model_cfg.copy()
model_cfg_.pop('discriminator')
restorer = build_model(model_cfg_, train_cfg=train_cfg, test_cfg=test_cfg)
data_batch.pop('gt_unsharp')
with torch.no_grad():
outputs = restorer(**data_batch, test_mode=True)
data_batch['gt_unsharp'] = targets.cpu()
assert torch.equal(outputs['lq'], data_batch['lq'])
assert torch.is_tensor(outputs['output'])
assert outputs['output'].size() == (1, 3, 128, 128)
# test without pixel loss and perceptual loss
model_cfg_ = model_cfg.copy()
model_cfg_.pop('pixel_loss')
restorer = build_model(model_cfg_, train_cfg=None, test_cfg=None)
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in ['loss_gan', 'loss_d_real', 'loss_d_fake']:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 128, 128)
# test train_step w/o loss_percep
restorer = build_model(model_cfg, train_cfg=None, test_cfg=None)
with patch.object(
restorer, 'perceptual_loss',
return_value=(None, torch.tensor(2.0))):
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in [
'loss_style', 'loss_gan', 'loss_d_real', 'loss_d_fake',
'loss_pix'
]:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 128, 128)
# test train_step w/o loss_style
restorer = build_model(model_cfg, train_cfg=None, test_cfg=None)
with patch.object(
restorer, 'perceptual_loss',
return_value=(torch.tensor(2.0), None)):
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in [
'loss_perceptual', 'loss_gan', 'loss_d_real', 'loss_d_fake',
'loss_pix'
]:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 128, 128)
| 10,264 | 38.480769 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_restorers/test_glean.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import pytest
import torch
from mmedit.models import build_model
def test_glean():
model_cfg = dict(
type='GLEAN',
generator=dict(
type='GLEANStyleGANv2',
in_size=16,
out_size=64,
style_channels=512),
discriminator=dict(type='StyleGAN2Discriminator', in_size=64),
pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'),
gan_loss=dict(
type='GANLoss',
gan_type='vanilla',
real_label_val=1.0,
fake_label_val=0,
loss_weight=5e-3))
train_cfg = None
test_cfg = mmcv.Config(dict(metrics=['PSNR'], crop_border=0))
# build restorer
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# prepare data
inputs = torch.rand(1, 3, 16, 16)
targets = torch.rand(1, 3, 64, 64)
data_batch = {'lq': inputs, 'gt': targets}
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
meta = [{'lq_path': ''}]
# test forward_test (cpu)
with pytest.raises(ValueError): # iteration is not None or number
with torch.no_grad():
restorer(
**data_batch,
test_mode=True,
save_image=True,
meta=meta,
iteration='1')
with pytest.raises(AssertionError): # test with metric but gt is None
with torch.no_grad():
data_batch.pop('gt')
restorer(**data_batch, test_mode=True)
# test forward_test (gpu)
if torch.cuda.is_available():
data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda()}
restorer = restorer.cuda()
with pytest.raises(ValueError): # iteration is not None or number
with torch.no_grad():
restorer(
**data_batch,
test_mode=True,
save_image=True,
meta=meta,
iteration='1')
with pytest.raises(AssertionError): # test with metric but gt is None
with torch.no_grad():
data_batch.pop('gt')
restorer(**data_batch, test_mode=True)
| 2,269 | 30.971831 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_restorers/test_liif.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
import torch.nn as nn
from mmcv.runner import obj_from_dict
from mmcv.utils.config import Config
from mmedit.models import build_model
from mmedit.models.registry import COMPONENTS
@COMPONENTS.register_module()
class BP(nn.Module):
"""A simple BP network for testing LIIF.
Args:
in_dim (int): Input dimension.
out_dim (int): Output dimension.
"""
def __init__(self, in_dim, out_dim):
super().__init__()
self.layer = nn.Linear(in_dim, out_dim)
def forward(self, x):
shape = x.shape[:-1]
x = self.layer(x.view(-1, x.shape[-1]))
return x.view(*shape, -1)
def test_liif():
model_cfg = dict(
type='LIIF',
generator=dict(
type='LIIFEDSR',
encoder=dict(
type='EDSR',
in_channels=3,
out_channels=3,
mid_channels=64,
num_blocks=16),
imnet=dict(
type='MLPRefiner',
in_dim=64,
out_dim=3,
hidden_list=[256, 256, 256, 256]),
local_ensemble=True,
feat_unfold=True,
cell_decode=True,
eval_bsize=30000),
rgb_mean=(0.4488, 0.4371, 0.4040),
rgb_std=(1., 1., 1.),
pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'))
scale_max = 4
train_cfg = None
test_cfg = Config(dict(metrics=['PSNR', 'SSIM'], crop_border=scale_max))
# build restorer
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# test attributes
assert restorer.__class__.__name__ == 'LIIF'
# prepare data
inputs = torch.rand(1, 3, 22, 11)
targets = torch.rand(1, 128 * 64, 3)
coord = torch.rand(1, 128 * 64, 2)
cell = torch.rand(1, 128 * 64, 2)
data_batch = {'lq': inputs, 'gt': targets, 'coord': coord, 'cell': cell}
# prepare optimizer
optim_cfg = dict(type='Adam', lr=1e-4, betas=(0.9, 0.999))
optimizer = obj_from_dict(optim_cfg, torch.optim,
dict(params=restorer.parameters()))
# test train_step and forward_test (cpu)
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert outputs['num_samples'] == 1
assert outputs['results']['lq'].shape == data_batch['lq'].shape
assert outputs['results']['gt'].shape == data_batch['gt'].shape
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 128 * 64, 3)
# test train_step and forward_test (gpu)
if torch.cuda.is_available():
restorer = restorer.cuda()
data_batch = {
'lq': inputs.cuda(),
'gt': targets.cuda(),
'coord': coord.cuda(),
'cell': cell.cuda()
}
# train_step
optimizer = obj_from_dict(optim_cfg, torch.optim,
dict(params=restorer.parameters()))
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert outputs['num_samples'] == 1
assert outputs['results']['lq'].shape == data_batch['lq'].shape
assert outputs['results']['gt'].shape == data_batch['gt'].shape
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 128 * 64, 3)
# val_step
result = restorer.val_step(data_batch, meta=[{'gt_path': ''}])
assert isinstance(result, dict)
assert isinstance(result['eval_result'], dict)
assert result['eval_result'].keys() == set({'PSNR', 'SSIM'})
assert isinstance(result['eval_result']['PSNR'], np.float64)
assert isinstance(result['eval_result']['SSIM'], np.float64)
| 4,114 | 33.579832 | 77 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_restorers/test_dic_model.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmcv.runner import obj_from_dict
from mmcv.utils.config import Config
from mmedit.models.builder import build_model
def test_dic_model():
pretrained = 'https://download.openmmlab.com/mmediting/' + \
'restorers/dic/light_cnn_feature.pth'
model_cfg_pre = dict(
type='DIC',
generator=dict(
type='DICNet', in_channels=3, out_channels=3, mid_channels=48),
pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'),
align_loss=dict(type='MSELoss', loss_weight=0.1, reduction='mean'))
model_cfg = dict(
type='DIC',
generator=dict(
type='DICNet', in_channels=3, out_channels=3, mid_channels=48),
discriminator=dict(type='LightCNN', in_channels=3),
pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'),
align_loss=dict(type='MSELoss', loss_weight=0.1, reduction='mean'),
feature_loss=dict(
type='LightCNNFeatureLoss',
pretrained=pretrained,
loss_weight=0.1,
criterion='l1'),
gan_loss=dict(
type='GANLoss',
gan_type='vanilla',
loss_weight=0.005,
real_label_val=1.0,
fake_label_val=0))
scale = 8
train_cfg = None
test_cfg = Config(dict(metrics=['PSNR', 'SSIM'], crop_border=scale))
# build restorer
build_model(model_cfg_pre, train_cfg=train_cfg, test_cfg=test_cfg)
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# test attributes
assert restorer.__class__.__name__ == 'DIC'
# prepare data
inputs = torch.rand(1, 3, 16, 16)
targets = torch.rand(1, 3, 128, 128)
heatmap = torch.rand(1, 68, 32, 32)
data_batch = {'lq': inputs, 'gt': targets, 'heatmap': heatmap}
# prepare optimizer
optim_cfg = dict(type='Adam', lr=1e-4, betas=(0.9, 0.999))
generator = obj_from_dict(optim_cfg, torch.optim,
dict(params=restorer.parameters()))
discriminator = obj_from_dict(optim_cfg, torch.optim,
dict(params=restorer.parameters()))
optimizer = dict(generator=generator, discriminator=discriminator)
# test train_step and forward_test (cpu)
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pixel_v3'], float)
assert outputs['num_samples'] == 1
assert outputs['results']['lq'].shape == data_batch['lq'].shape
assert outputs['results']['gt'].shape == data_batch['gt'].shape
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 128, 128)
# test train_step and forward_test (gpu)
if torch.cuda.is_available():
restorer = restorer.cuda()
data_batch = {
'lq': inputs.cuda(),
'gt': targets.cuda(),
'heatmap': heatmap.cuda()
}
# train_step
optim_cfg = dict(type='Adam', lr=1e-4, betas=(0.9, 0.999))
generator = obj_from_dict(optim_cfg, torch.optim,
dict(params=restorer.parameters()))
discriminator = obj_from_dict(optim_cfg, torch.optim,
dict(params=restorer.parameters()))
optimizer = dict(generator=generator, discriminator=discriminator)
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pixel_v3'], float)
assert outputs['num_samples'] == 1
assert outputs['results']['lq'].shape == data_batch['lq'].shape
assert outputs['results']['gt'].shape == data_batch['gt'].shape
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 128, 128)
# val_step
data_batch.pop('heatmap')
result = restorer.val_step(data_batch, meta=[{'gt_path': ''}])
assert isinstance(result, dict)
assert isinstance(result['eval_result'], dict)
assert result['eval_result'].keys() == set({'PSNR', 'SSIM'})
assert isinstance(result['eval_result']['PSNR'], np.float64)
assert isinstance(result['eval_result']['SSIM'], np.float64)
with pytest.raises(AssertionError):
# evaluation with metrics must have gt images
restorer(lq=inputs.cuda(), test_mode=True)
with pytest.raises(TypeError):
restorer.init_weights(pretrained=1)
with pytest.raises(OSError):
restorer.init_weights(pretrained='')
| 4,844 | 39.375 | 77 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_restorers/test_ttsr.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmcv.runner import obj_from_dict
from mmcv.utils.config import Config
from mmedit.models import build_backbone, build_model
from mmedit.models.backbones.sr_backbones.ttsr_net import (CSFI2, CSFI3, SFE,
MergeFeatures)
def test_sfe():
inputs = torch.rand(2, 3, 48, 48)
sfe = SFE(3, 64, 16, 1.)
outputs = sfe(inputs)
assert outputs.shape == (2, 64, 48, 48)
def test_csfi():
inputs1 = torch.rand(2, 16, 24, 24)
inputs2 = torch.rand(2, 16, 48, 48)
inputs4 = torch.rand(2, 16, 96, 96)
csfi2 = CSFI2(mid_channels=16)
out1, out2 = csfi2(inputs1, inputs2)
assert out1.shape == (2, 16, 24, 24)
assert out2.shape == (2, 16, 48, 48)
csfi3 = CSFI3(mid_channels=16)
out1, out2, out4 = csfi3(inputs1, inputs2, inputs4)
assert out1.shape == (2, 16, 24, 24)
assert out2.shape == (2, 16, 48, 48)
assert out4.shape == (2, 16, 96, 96)
def test_merge_features():
inputs1 = torch.rand(2, 16, 24, 24)
inputs2 = torch.rand(2, 16, 48, 48)
inputs4 = torch.rand(2, 16, 96, 96)
merge_features = MergeFeatures(mid_channels=16, out_channels=3)
out = merge_features(inputs1, inputs2, inputs4)
assert out.shape == (2, 3, 96, 96)
def test_ttsr_net():
inputs = torch.rand(2, 3, 24, 24)
soft_attention = torch.rand(2, 1, 24, 24)
t_level3 = torch.rand(2, 64, 24, 24)
t_level2 = torch.rand(2, 32, 48, 48)
t_level1 = torch.rand(2, 16, 96, 96)
ttsr_cfg = dict(
type='TTSRNet',
in_channels=3,
out_channels=3,
mid_channels=16,
texture_channels=16)
ttsr = build_backbone(ttsr_cfg)
outputs = ttsr(inputs, soft_attention, (t_level3, t_level2, t_level1))
assert outputs.shape == (2, 3, 96, 96)
def test_ttsr():
model_cfg = dict(
type='TTSR',
generator=dict(
type='TTSRNet',
in_channels=3,
out_channels=3,
mid_channels=64,
num_blocks=(16, 16, 8, 4)),
extractor=dict(type='LTE', load_pretrained_vgg=False),
transformer=dict(type='SearchTransformer'),
discriminator=dict(type='TTSRDiscriminator', in_size=64),
pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'),
perceptual_loss=dict(
type='PerceptualLoss',
layer_weights={'29': 1.0},
vgg_type='vgg19',
perceptual_weight=1e-2,
style_weight=0.001,
criterion='mse'),
transferal_perceptual_loss=dict(
type='TransferalPerceptualLoss',
loss_weight=1e-2,
use_attention=False,
criterion='mse'),
gan_loss=dict(
type='GANLoss',
gan_type='vanilla',
loss_weight=1e-3,
real_label_val=1.0,
fake_label_val=0))
scale = 4
train_cfg = None
test_cfg = Config(dict(metrics=['PSNR', 'SSIM'], crop_border=scale))
# build restorer
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
model_cfg = dict(
type='TTSR',
generator=dict(
type='TTSRNet',
in_channels=3,
out_channels=3,
mid_channels=64,
num_blocks=(16, 16, 8, 4)),
extractor=dict(type='LTE'),
transformer=dict(type='SearchTransformer'),
discriminator=dict(type='TTSRDiscriminator', in_size=64),
pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'),
perceptual_loss=dict(
type='PerceptualLoss',
layer_weights={'29': 1.0},
vgg_type='vgg19',
perceptual_weight=1e-2,
style_weight=0.001,
criterion='mse'),
transferal_perceptual_loss=dict(
type='TransferalPerceptualLoss',
loss_weight=1e-2,
use_attention=False,
criterion='mse'),
gan_loss=dict(
type='GANLoss',
gan_type='vanilla',
loss_weight=1e-3,
real_label_val=1.0,
fake_label_val=0))
scale = 4
train_cfg = None
test_cfg = Config(dict(metrics=['PSNR', 'SSIM'], crop_border=scale))
# build restorer
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# test attributes
assert restorer.__class__.__name__ == 'TTSR'
# prepare data
inputs = torch.rand(1, 3, 16, 16)
targets = torch.rand(1, 3, 64, 64)
ref = torch.rand(1, 3, 64, 64)
data_batch = {
'lq': inputs,
'gt': targets,
'ref': ref,
'lq_up': ref,
'ref_downup': ref
}
# prepare optimizer
optim_cfg_g = dict(type='Adam', lr=1e-4, betas=(0.9, 0.999))
optim_cfg_d = dict(type='Adam', lr=1e-4, betas=(0.9, 0.999))
optimizer = dict(
generator=obj_from_dict(optim_cfg_g, torch.optim,
dict(params=restorer.parameters())),
discriminator=obj_from_dict(optim_cfg_d, torch.optim,
dict(params=restorer.parameters())))
# test train_step and forward_test (cpu)
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert outputs['num_samples'] == 1
assert outputs['results']['lq'].shape == data_batch['lq'].shape
assert outputs['results']['gt'].shape == data_batch['gt'].shape
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 64, 64)
# test train_step and forward_test (gpu)
if torch.cuda.is_available():
restorer = restorer.cuda()
data_batch = {
'lq': inputs.cuda(),
'gt': targets.cuda(),
'ref': ref.cuda(),
'lq_up': ref.cuda(),
'ref_downup': ref.cuda()
}
# train_step
optimizer = dict(
generator=obj_from_dict(optim_cfg_g, torch.optim,
dict(params=restorer.parameters())),
discriminator=obj_from_dict(optim_cfg_d, torch.optim,
dict(params=restorer.parameters())))
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert outputs['num_samples'] == 1
assert outputs['results']['lq'].shape == data_batch['lq'].shape
assert outputs['results']['gt'].shape == data_batch['gt'].shape
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 64, 64)
# val_step
result = restorer.val_step(data_batch, meta=[{'gt_path': ''}])
assert isinstance(result, dict)
assert isinstance(result['eval_result'], dict)
assert result['eval_result'].keys() == set({'PSNR', 'SSIM'})
assert isinstance(result['eval_result']['PSNR'], np.float64)
assert isinstance(result['eval_result']['SSIM'], np.float64)
| 7,308 | 33.63981 | 77 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_restorers/test_esrgan.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import patch
import torch
from mmcv.runner import obj_from_dict
from mmedit.models import build_model
from mmedit.models.backbones import MSRResNet
from mmedit.models.components import ModifiedVGG
from mmedit.models.losses import GANLoss, L1Loss
def test_esrgan():
model_cfg = dict(
type='ESRGAN',
generator=dict(
type='MSRResNet',
in_channels=3,
out_channels=3,
mid_channels=4,
num_blocks=1,
upscale_factor=4),
discriminator=dict(type='ModifiedVGG', in_channels=3, mid_channels=2),
pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'),
gan_loss=dict(
type='GANLoss',
gan_type='vanilla',
real_label_val=1.0,
fake_label_val=0,
loss_weight=5e-3))
train_cfg = None
test_cfg = None
# build restorer
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# test attributes
assert restorer.__class__.__name__ == 'ESRGAN'
assert isinstance(restorer.generator, MSRResNet)
assert isinstance(restorer.discriminator, ModifiedVGG)
assert isinstance(restorer.pixel_loss, L1Loss)
assert isinstance(restorer.gan_loss, GANLoss)
# prepare data
inputs = torch.rand(1, 3, 32, 32)
targets = torch.rand(1, 3, 128, 128)
data_batch = {'lq': inputs, 'gt': targets}
# prepare optimizer
optim_cfg = dict(type='Adam', lr=2e-4, betas=(0.9, 0.999))
optimizer = {
'generator':
obj_from_dict(optim_cfg, torch.optim,
dict(
params=getattr(restorer, 'generator').parameters())),
'discriminator':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(restorer, 'discriminator').parameters()))
}
# test train_step
with patch.object(
restorer,
'perceptual_loss',
return_value=(torch.tensor(1.0), torch.tensor(2.0))):
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in [
'loss_perceptual', 'loss_gan', 'loss_d_real', 'loss_d_fake',
'loss_pix'
]:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 128, 128)
# test train_step and forward_test (gpu)
if torch.cuda.is_available():
restorer = restorer.cuda()
optimizer = {
'generator':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(restorer, 'generator').parameters())),
'discriminator':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(restorer, 'discriminator').parameters()))
}
data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda()}
# train_step
with patch.object(
restorer,
'perceptual_loss',
return_value=(torch.tensor(1.0).cuda(),
torch.tensor(2.0).cuda())):
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in [
'loss_perceptual', 'loss_gan', 'loss_d_real',
'loss_d_fake', 'loss_pix'
]:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'],
data_batch['lq'].cpu())
assert torch.equal(outputs['results']['gt'],
data_batch['gt'].cpu())
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 128, 128)
# test disc_steps and disc_init_steps
data_batch = {'lq': inputs.cpu(), 'gt': targets.cpu()}
train_cfg = dict(disc_steps=2, disc_init_steps=2)
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
with patch.object(
restorer,
'perceptual_loss',
return_value=(torch.tensor(1.0), torch.tensor(2.0))):
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in ['loss_d_real', 'loss_d_fake']:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 128, 128)
# test without pixel loss and perceptual loss
model_cfg_ = model_cfg.copy()
model_cfg_.pop('pixel_loss')
restorer = build_model(model_cfg_, train_cfg=None, test_cfg=None)
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in ['loss_gan', 'loss_d_real', 'loss_d_fake']:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 128, 128)
# test train_step w/o loss_percep
restorer = build_model(model_cfg, train_cfg=None, test_cfg=None)
with patch.object(
restorer, 'perceptual_loss',
return_value=(None, torch.tensor(2.0))):
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in [
'loss_style', 'loss_gan', 'loss_d_real', 'loss_d_fake',
'loss_pix'
]:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 128, 128)
# test train_step w/o loss_style
restorer = build_model(model_cfg, train_cfg=None, test_cfg=None)
with patch.object(
restorer, 'perceptual_loss',
return_value=(torch.tensor(2.0), None)):
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in [
'loss_perceptual', 'loss_gan', 'loss_d_real', 'loss_d_fake',
'loss_pix'
]:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 128, 128)
| 7,807 | 39.666667 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_restorers/test_edvr_model.py | # Copyright (c) OpenMMLab. All rights reserved.
import tempfile
import mmcv
import pytest
import torch
from mmcv.runner import obj_from_dict
from mmedit.models import build_model
from mmedit.models.backbones import EDVRNet
from mmedit.models.losses import L1Loss
def test_edvr_model():
model_cfg = dict(
type='EDVR',
generator=dict(
type='EDVRNet',
in_channels=3,
out_channels=3,
mid_channels=8,
num_frames=5,
deform_groups=2,
num_blocks_extraction=1,
num_blocks_reconstruction=1,
center_frame_idx=2,
with_tsa=False),
pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='sum'),
)
train_cfg = None
test_cfg = None
# build restorer
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# test attributes
assert restorer.__class__.__name__ == 'EDVR'
assert isinstance(restorer.generator, EDVRNet)
assert isinstance(restorer.pixel_loss, L1Loss)
# prepare data
inputs = torch.rand(1, 5, 3, 8, 8)
targets = torch.rand(1, 3, 32, 32)
# test train_step and forward_test (gpu)
if torch.cuda.is_available():
restorer = restorer.cuda()
data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda()}
# prepare optimizer
optim_cfg = dict(type='Adam', lr=2e-4, betas=(0.9, 0.999))
optimizer = {
'generator':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(restorer, 'generator').parameters()))
}
# train_step
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu())
assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu())
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 32, 32)
# with TSA
model_cfg['generator']['with_tsa'] = True
with pytest.raises(KeyError):
# In TSA mode, train_cfg must contain "tsa_iter"
train_cfg = dict(other_conent='xxx')
restorer = build_model(
model_cfg, train_cfg=train_cfg, test_cfg=test_cfg).cuda()
outputs = restorer.train_step(data_batch, optimizer)
train_cfg = None
restorer = build_model(
model_cfg, train_cfg=train_cfg, test_cfg=test_cfg).cuda()
outputs = restorer.train_step(data_batch, optimizer)
train_cfg = mmcv.ConfigDict(tsa_iter=1)
restorer = build_model(
model_cfg, train_cfg=train_cfg, test_cfg=test_cfg).cuda()
optimizer = {
'generator':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(restorer, 'generator').parameters()))
}
# train without updating tsa module
outputs = restorer.train_step(data_batch, optimizer)
# train with updating tsa module
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu())
assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu())
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 32, 32)
# test forward_dummy
with torch.no_grad():
output = restorer.forward_dummy(data_batch['lq'])
assert torch.is_tensor(output)
assert output.size() == (1, 3, 32, 32)
# forward_test
with torch.no_grad():
outputs = restorer(**data_batch, test_mode=True)
assert torch.equal(outputs['lq'], data_batch['lq'].cpu())
assert torch.equal(outputs['gt'], data_batch['gt'].cpu())
assert torch.is_tensor(outputs['output'])
assert outputs['output'].size() == (1, 3, 32, 32)
with torch.no_grad():
outputs = restorer(inputs.cuda(), test_mode=True)
assert torch.equal(outputs['lq'], data_batch['lq'].cpu())
assert torch.is_tensor(outputs['output'])
assert outputs['output'].size() == (1, 3, 32, 32)
# test with metric and save image
if torch.cuda.is_available():
train_cfg = mmcv.ConfigDict(tsa_iter=1)
test_cfg = dict(metrics=('PSNR', 'SSIM'), crop_border=0)
test_cfg = mmcv.Config(test_cfg)
data_batch = {
'lq': inputs.cuda(),
'gt': targets.cuda(),
'meta': [{
'gt_path': 'fake_path/fake_name.png',
'key': '000/00000000'
}]
}
restorer = build_model(
model_cfg, train_cfg=train_cfg, test_cfg=test_cfg).cuda()
with pytest.raises(AssertionError):
# evaluation with metrics must have gt images
restorer(lq=inputs.cuda(), test_mode=True)
with tempfile.TemporaryDirectory() as tmpdir:
outputs = restorer(
**data_batch,
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration=None)
assert isinstance(outputs, dict)
assert isinstance(outputs['eval_result'], dict)
assert isinstance(outputs['eval_result']['PSNR'], float)
assert isinstance(outputs['eval_result']['SSIM'], float)
outputs = restorer(
**data_batch,
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration=100)
assert isinstance(outputs, dict)
assert isinstance(outputs['eval_result'], dict)
assert isinstance(outputs['eval_result']['PSNR'], float)
assert isinstance(outputs['eval_result']['SSIM'], float)
with pytest.raises(ValueError):
# iteration should be number or None
restorer(
**data_batch,
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration='100')
| 6,625 | 35.406593 | 77 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_restorers/test_srgan.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import patch
import pytest
import torch
from mmcv.runner import obj_from_dict
from mmedit.models import build_model
from mmedit.models.backbones import MSRResNet
from mmedit.models.components import ModifiedVGG
from mmedit.models.losses import GANLoss, L1Loss
def test_srgan():
model_cfg = dict(
type='SRGAN',
generator=dict(
type='MSRResNet',
in_channels=3,
out_channels=3,
mid_channels=4,
num_blocks=1,
upscale_factor=4),
discriminator=dict(type='ModifiedVGG', in_channels=3, mid_channels=2),
pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'),
gan_loss=dict(
type='GANLoss',
gan_type='vanilla',
real_label_val=1.0,
fake_label_val=0,
loss_weight=5e-3))
train_cfg = None
test_cfg = None
# build restorer
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# test attributes
assert restorer.__class__.__name__ == 'SRGAN'
assert isinstance(restorer.generator, MSRResNet)
assert isinstance(restorer.discriminator, ModifiedVGG)
assert isinstance(restorer.pixel_loss, L1Loss)
assert isinstance(restorer.gan_loss, GANLoss)
# prepare data
inputs = torch.rand(1, 3, 32, 32)
targets = torch.rand(1, 3, 128, 128)
data_batch = {'lq': inputs, 'gt': targets}
# prepare optimizer
optim_cfg = dict(type='Adam', lr=2e-4, betas=(0.9, 0.999))
optimizer = {
'generator':
obj_from_dict(optim_cfg, torch.optim,
dict(
params=getattr(restorer, 'generator').parameters())),
'discriminator':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(restorer, 'discriminator').parameters()))
}
# no forward train in GAN models, raise ValueError
with pytest.raises(ValueError):
restorer(**data_batch, test_mode=False)
# test forward_test
with torch.no_grad():
outputs = restorer(**data_batch, test_mode=True)
assert torch.equal(outputs['lq'], data_batch['lq'])
assert torch.is_tensor(outputs['output'])
assert outputs['output'].size() == (1, 3, 128, 128)
# test forward_dummy
with torch.no_grad():
output = restorer.forward_dummy(data_batch['lq'])
assert torch.is_tensor(output)
assert output.size() == (1, 3, 128, 128)
# val_step
with torch.no_grad():
outputs = restorer.val_step(data_batch)
assert torch.equal(outputs['lq'], data_batch['lq'])
assert torch.is_tensor(outputs['output'])
assert outputs['output'].size() == (1, 3, 128, 128)
# test train_step
with patch.object(
restorer,
'perceptual_loss',
return_value=(torch.tensor(1.0), torch.tensor(2.0))):
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in [
'loss_perceptual', 'loss_gan', 'loss_d_real', 'loss_d_fake',
'loss_pix'
]:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 128, 128)
# test train_step and forward_test (gpu)
if torch.cuda.is_available():
restorer = restorer.cuda()
optimizer = {
'generator':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(restorer, 'generator').parameters())),
'discriminator':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(restorer, 'discriminator').parameters()))
}
data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda()}
# forward_test
with torch.no_grad():
outputs = restorer(**data_batch, test_mode=True)
assert torch.equal(outputs['lq'], data_batch['lq'].cpu())
assert torch.is_tensor(outputs['output'])
assert outputs['output'].size() == (1, 3, 128, 128)
# val_step
with torch.no_grad():
outputs = restorer.val_step(data_batch)
assert torch.equal(outputs['lq'], data_batch['lq'].cpu())
assert torch.is_tensor(outputs['output'])
assert outputs['output'].size() == (1, 3, 128, 128)
# train_step
with patch.object(
restorer,
'perceptual_loss',
return_value=(torch.tensor(1.0).cuda(),
torch.tensor(2.0).cuda())):
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in [
'loss_perceptual', 'loss_gan', 'loss_d_real',
'loss_d_fake', 'loss_pix'
]:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'],
data_batch['lq'].cpu())
assert torch.equal(outputs['results']['gt'],
data_batch['gt'].cpu())
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 128, 128)
# test disc_steps and disc_init_steps
data_batch = {'lq': inputs.cpu(), 'gt': targets.cpu()}
train_cfg = dict(disc_steps=2, disc_init_steps=2)
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
with patch.object(
restorer,
'perceptual_loss',
return_value=(torch.tensor(1.0), torch.tensor(2.0))):
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in ['loss_d_real', 'loss_d_fake']:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 128, 128)
# test no discriminator (testing mode)
model_cfg_ = model_cfg.copy()
model_cfg_.pop('discriminator')
restorer = build_model(model_cfg_, train_cfg=train_cfg, test_cfg=test_cfg)
with torch.no_grad():
outputs = restorer(**data_batch, test_mode=True)
assert torch.equal(outputs['lq'], data_batch['lq'])
assert torch.is_tensor(outputs['output'])
assert outputs['output'].size() == (1, 3, 128, 128)
# test without pixel loss and perceptual loss
model_cfg_ = model_cfg.copy()
model_cfg_.pop('pixel_loss')
restorer = build_model(model_cfg_, train_cfg=None, test_cfg=None)
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in ['loss_gan', 'loss_d_real', 'loss_d_fake']:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 128, 128)
# test train_step w/o loss_percep
restorer = build_model(model_cfg, train_cfg=None, test_cfg=None)
with patch.object(
restorer, 'perceptual_loss',
return_value=(None, torch.tensor(2.0))):
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in [
'loss_style', 'loss_gan', 'loss_d_real', 'loss_d_fake',
'loss_pix'
]:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 128, 128)
# test train_step w/o loss_style
restorer = build_model(model_cfg, train_cfg=None, test_cfg=None)
with patch.object(
restorer, 'perceptual_loss',
return_value=(torch.tensor(2.0), None)):
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in [
'loss_perceptual', 'loss_gan', 'loss_d_real', 'loss_d_fake',
'loss_pix'
]:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 128, 128)
| 9,665 | 39.107884 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_restorers/test_basic_restorer.py | # Copyright (c) OpenMMLab. All rights reserved.
import tempfile
import mmcv
import pytest
import torch
from mmcv.runner import obj_from_dict
from mmedit.models import build_model
from mmedit.models.backbones import MSRResNet
from mmedit.models.losses import L1Loss
def test_basic_restorer():
model_cfg = dict(
type='BasicRestorer',
generator=dict(
type='MSRResNet',
in_channels=3,
out_channels=3,
mid_channels=4,
num_blocks=1,
upscale_factor=4),
pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'))
train_cfg = None
test_cfg = None
# build restorer
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# test attributes
assert restorer.__class__.__name__ == 'BasicRestorer'
assert isinstance(restorer.generator, MSRResNet)
assert isinstance(restorer.pixel_loss, L1Loss)
# prepare data
inputs = torch.rand(1, 3, 20, 20)
targets = torch.rand(1, 3, 80, 80)
data_batch = {'lq': inputs, 'gt': targets}
# prepare optimizer
optim_cfg = dict(type='Adam', lr=2e-4, betas=(0.9, 0.999))
optimizer = {
'generator':
obj_from_dict(optim_cfg, torch.optim,
dict(params=restorer.parameters()))
}
# test forward train
outputs = restorer(**data_batch, test_mode=False)
assert isinstance(outputs, dict)
assert isinstance(outputs['losses'], dict)
assert isinstance(outputs['losses']['loss_pix'], torch.FloatTensor)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 80, 80)
# test forward_test
with torch.no_grad():
outputs = restorer(**data_batch, test_mode=True)
assert torch.equal(outputs['lq'], data_batch['lq'])
assert torch.is_tensor(outputs['output'])
assert outputs['output'].size() == (1, 3, 80, 80)
# test forward_dummy
with torch.no_grad():
output = restorer.forward_dummy(data_batch['lq'])
assert torch.is_tensor(output)
assert output.size() == (1, 3, 80, 80)
# test train_step
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 80, 80)
# test train_step and forward_test (gpu)
if torch.cuda.is_available():
restorer = restorer.cuda()
optimizer['generator'] = obj_from_dict(
optim_cfg, torch.optim, dict(params=restorer.parameters()))
data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda()}
# test forward train
outputs = restorer(**data_batch, test_mode=False)
assert isinstance(outputs, dict)
assert isinstance(outputs['losses'], dict)
assert isinstance(outputs['losses']['loss_pix'],
torch.cuda.FloatTensor)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu())
assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu())
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 80, 80)
# forward_test
with torch.no_grad():
outputs = restorer(**data_batch, test_mode=True)
assert torch.equal(outputs['lq'], data_batch['lq'].cpu())
assert torch.is_tensor(outputs['output'])
assert outputs['output'].size() == (1, 3, 80, 80)
# train_step
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu())
assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu())
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 80, 80)
# test with metric and save image
test_cfg = dict(metrics=('PSNR', 'SSIM'), crop_border=0)
test_cfg = mmcv.Config(test_cfg)
data_batch = {
'lq': inputs,
'gt': targets,
'meta': [{
'lq_path': 'fake_path/fake_name.png'
}]
}
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
with pytest.raises(AssertionError):
# evaluation with metrics must have gt images
restorer(lq=inputs, test_mode=True)
with tempfile.TemporaryDirectory() as tmpdir:
outputs = restorer(
**data_batch,
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration=None)
assert isinstance(outputs, dict)
assert isinstance(outputs['eval_result'], dict)
assert isinstance(outputs['eval_result']['PSNR'], float)
assert isinstance(outputs['eval_result']['SSIM'], float)
outputs = restorer(
**data_batch,
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration=100)
assert isinstance(outputs, dict)
assert isinstance(outputs['eval_result'], dict)
assert isinstance(outputs['eval_result']['PSNR'], float)
assert isinstance(outputs['eval_result']['SSIM'], float)
with pytest.raises(ValueError):
# iteration should be number or None
restorer(
**data_batch,
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration='100')
| 6,226 | 35.415205 | 77 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_video_interpolator/test_basic_interpolator.py | # Copyright (c) OpenMMLab. All rights reserved.
import tempfile
import mmcv
import pytest
import torch
import torch.nn as nn
from mmcv.runner import obj_from_dict
from mmedit.models import build_model
from mmedit.models.losses import L1Loss
from mmedit.models.registry import COMPONENTS
@COMPONENTS.register_module()
class InterpolateExample(nn.Module):
"""An example of interpolate network for testing BasicInterpolator.
"""
def __init__(self):
super().__init__()
self.layer = nn.Conv2d(3, 3, 3, 1, 1)
def forward(self, x):
return self.layer(x[:, 0])
def init_weights(self, pretrained=None):
pass
@COMPONENTS.register_module()
class InterpolateExample2(nn.Module):
"""An example of interpolate network for testing BasicInterpolator.
"""
def __init__(self):
super().__init__()
self.layer = nn.Conv2d(3, 3, 3, 1, 1)
def forward(self, x):
return self.layer(x[:, 0]).unsqueeze(1)
def init_weights(self, pretrained=None):
pass
def test_basic_interpolator():
model_cfg = dict(
type='BasicInterpolator',
generator=dict(type='InterpolateExample'),
pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'))
train_cfg = None
test_cfg = None
# build restorer
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# test attributes
assert restorer.__class__.__name__ == 'BasicInterpolator'
assert isinstance(restorer.generator, InterpolateExample)
assert isinstance(restorer.pixel_loss, L1Loss)
# prepare data
inputs = torch.rand(1, 2, 3, 20, 20)
target = torch.rand(1, 3, 20, 20)
data_batch = {'inputs': inputs, 'target': target}
# prepare optimizer
optim_cfg = dict(type='Adam', lr=2e-4, betas=(0.9, 0.999))
optimizer = {
'generator':
obj_from_dict(optim_cfg, torch.optim,
dict(params=restorer.parameters()))
}
# test forward train
outputs = restorer(**data_batch, test_mode=False)
assert isinstance(outputs, dict)
assert isinstance(outputs['losses'], dict)
assert isinstance(outputs['losses']['loss_pix'], torch.FloatTensor)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['inputs'], data_batch['inputs'])
assert torch.equal(outputs['results']['target'], data_batch['target'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 20, 20)
# test forward_test
with torch.no_grad():
restorer.val_step(data_batch)
outputs = restorer(**data_batch, test_mode=True)
assert torch.equal(outputs['inputs'], data_batch['inputs'])
assert torch.is_tensor(outputs['output'])
assert outputs['output'].size() == (1, 3, 20, 20)
assert outputs['output'].max() <= 1.
assert outputs['output'].min() >= 0.
# test forward_dummy
with torch.no_grad():
output = restorer.forward_dummy(data_batch['inputs'])
assert torch.is_tensor(output)
assert output.size() == (1, 3, 20, 20)
# test train_step
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['inputs'], data_batch['inputs'])
assert torch.equal(outputs['results']['target'], data_batch['target'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 20, 20)
# test train_step and forward_test (gpu)
if torch.cuda.is_available():
restorer = restorer.cuda()
optimizer['generator'] = obj_from_dict(
optim_cfg, torch.optim, dict(params=restorer.parameters()))
data_batch = {'inputs': inputs.cuda(), 'target': target.cuda()}
# test forward train
outputs = restorer(**data_batch, test_mode=False)
assert isinstance(outputs, dict)
assert isinstance(outputs['losses'], dict)
assert isinstance(outputs['losses']['loss_pix'],
torch.cuda.FloatTensor)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['inputs'],
data_batch['inputs'].cpu())
assert torch.equal(outputs['results']['target'],
data_batch['target'].cpu())
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 20, 20)
# forward_test
with torch.no_grad():
restorer.val_step(data_batch)
outputs = restorer(**data_batch, test_mode=True)
assert torch.equal(outputs['inputs'], data_batch['inputs'].cpu())
assert torch.is_tensor(outputs['output'])
assert outputs['output'].size() == (1, 3, 20, 20)
assert outputs['output'].max() <= 1.
assert outputs['output'].min() >= 0.
# train_step
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['inputs'],
data_batch['inputs'].cpu())
assert torch.equal(outputs['results']['target'],
data_batch['target'].cpu())
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 20, 20)
# test with metric and save image
test_cfg = dict(metrics=('PSNR', 'SSIM'), crop_border=0)
test_cfg = mmcv.Config(test_cfg)
data_batch = {
'inputs': inputs,
'target': target,
'meta': [{
'key': '000001/0000',
'target_path': 'fake_path/fake_name.png'
}]
}
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
with pytest.raises(AssertionError):
# evaluation with metrics must have target images
restorer(inputs=inputs, test_mode=True)
with tempfile.TemporaryDirectory() as tmpdir:
outputs = restorer(
**data_batch,
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration=None)
assert isinstance(outputs, dict)
assert isinstance(outputs['eval_result'], dict)
assert isinstance(outputs['eval_result']['PSNR'], float)
assert isinstance(outputs['eval_result']['SSIM'], float)
outputs = restorer(
**data_batch,
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration=100)
assert isinstance(outputs, dict)
assert isinstance(outputs['eval_result'], dict)
assert isinstance(outputs['eval_result']['PSNR'], float)
assert isinstance(outputs['eval_result']['SSIM'], float)
outputs = restorer(
inputs=inputs,
target=target,
meta=[{
'key':
'000001/0000',
'inputs_path':
['fake_path/fake_name.png', 'fake_path/fake_name.png']
}],
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration=100)
assert isinstance(outputs, dict)
assert isinstance(outputs['eval_result'], dict)
assert isinstance(outputs['eval_result']['PSNR'], float)
assert isinstance(outputs['eval_result']['SSIM'], float)
with pytest.raises(ValueError):
# iteration should be number or None
restorer(
**data_batch,
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration='100')
# test forward_test when output.shape==5
model_cfg = dict(
type='BasicInterpolator',
generator=dict(type='InterpolateExample2'),
pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'))
train_cfg = None
test_cfg = None
restorer = build_model(
model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
outputs = restorer(
inputs=inputs,
target=target.unsqueeze(1),
meta=[{
'key':
'000001/0000',
'inputs_path':
['fake_path/fake_name.png', 'fake_path/fake_name.png']
}],
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration=100)
outputs = restorer(
inputs=inputs,
target=target.unsqueeze(1),
meta=[{
'key':
'000001/0000',
'inputs_path':
['fake_path/fake_name.png', 'fake_path/fake_name.png']
}],
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration=None)
with pytest.raises(ValueError):
# iteration should be number or None
restorer(
**data_batch,
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration='100')
# test merge_frames
input_tensors = torch.rand(2, 2, 3, 256, 256)
output_tensors = torch.rand(2, 1, 3, 256, 256)
result = restorer.merge_frames(input_tensors, output_tensors)
assert isinstance(result, list)
assert len(result) == 5
assert result[0].shape == (256, 256, 3)
# test split_frames
tensors = torch.rand(1, 10, 3, 256, 256)
result = restorer.split_frames(tensors)
assert isinstance(result, torch.Tensor)
assert result.shape == (9, 2, 3, 256, 256)
# test evaluate 5d output
test_cfg = dict(metrics=('PSNR', 'SSIM'), crop_border=0)
test_cfg = mmcv.Config(test_cfg)
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
output = torch.rand(1, 2, 3, 256, 256)
target = torch.rand(1, 2, 3, 256, 256)
restorer.evaluate(output, target)
| 10,311 | 34.07483 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_video_interpolator/test_cain.py | # Copyright (c) OpenMMLab. All rights reserved.
import tempfile
import mmcv
import pytest
import torch
from mmcv.runner import obj_from_dict
from mmedit.models import build_model
from mmedit.models.backbones import CAINNet
from mmedit.models.losses import L1Loss
def test_cain():
model_cfg = dict(
type='CAIN',
generator=dict(type='CAINNet'),
pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'))
train_cfg = None
test_cfg = None
# build restorer
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# test attributes
assert restorer.__class__.__name__ == 'CAIN'
assert isinstance(restorer.generator, CAINNet)
assert isinstance(restorer.pixel_loss, L1Loss)
# prepare data
inputs = torch.rand(1, 2, 3, 128, 128)
target = torch.rand(1, 3, 128, 128)
data_batch = {'inputs': inputs, 'target': target, 'meta': [{'key': '001'}]}
# prepare optimizer
optim_cfg = dict(type='Adam', lr=2e-4, betas=(0.9, 0.999))
optimizer = {
'generator':
obj_from_dict(optim_cfg, torch.optim,
dict(params=restorer.parameters()))
}
# test forward_test
with torch.no_grad():
outputs = restorer.forward_test(**data_batch)
assert torch.equal(outputs['inputs'], data_batch['inputs'])
assert torch.is_tensor(outputs['output'])
assert outputs['output'].size() == (1, 3, 128, 128)
# test train_step
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['inputs'], data_batch['inputs'])
assert torch.equal(outputs['results']['target'], data_batch['target'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 128, 128)
# test train_step and forward_test (gpu)
if torch.cuda.is_available():
restorer = restorer.cuda()
optimizer['generator'] = obj_from_dict(
optim_cfg, torch.optim, dict(params=restorer.parameters()))
data_batch = {
'inputs': inputs.cuda(),
'target': target.cuda(),
'meta': [{
'key': '001'
}]
}
# forward_test
with torch.no_grad():
outputs = restorer.forward_test(**data_batch)
assert torch.equal(outputs['inputs'], data_batch['inputs'].cpu())
assert torch.is_tensor(outputs['output'])
assert outputs['output'].size() == (1, 3, 128, 128)
# train_step
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['inputs'],
data_batch['inputs'].cpu())
assert torch.equal(outputs['results']['target'],
data_batch['target'].cpu())
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 128, 128)
# test with metric and save image
test_cfg = dict(metrics=('PSNR', 'SSIM'), crop_border=0)
test_cfg = mmcv.Config(test_cfg)
data_batch = {
'inputs': inputs,
'target': target,
'meta': [{
'key': 'fake_path/fake_name'
}]
}
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
with pytest.raises(AssertionError):
# evaluation with metrics must have target images
restorer(inputs=inputs, test_mode=True)
with tempfile.TemporaryDirectory() as tmpdir:
outputs = restorer(
**data_batch,
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration=None)
assert isinstance(outputs, dict)
assert isinstance(outputs['eval_result'], dict)
assert isinstance(outputs['eval_result']['PSNR'], float)
assert isinstance(outputs['eval_result']['SSIM'], float)
outputs = restorer(
**data_batch,
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration=100)
assert isinstance(outputs, dict)
assert isinstance(outputs['eval_result'], dict)
assert isinstance(outputs['eval_result']['PSNR'], float)
assert isinstance(outputs['eval_result']['SSIM'], float)
| 4,692 | 33.762963 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_mattors/test_mattors.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from unittest.mock import patch
import mmcv
import numpy as np
import pytest
import torch
from mmedit.models import BaseMattor, build_model
def _get_model_cfg(fname):
"""
Grab configs necessary to create a model. These are deep copied to allow
for safe modification of parameters without influencing other tests.
"""
config_dpath = 'configs/mattors'
config_fpath = osp.join(config_dpath, fname)
if not osp.exists(config_dpath):
raise Exception('Cannot find config path')
config = mmcv.Config.fromfile(config_fpath)
return config.model, config.train_cfg, config.test_cfg
def assert_dict_keys_equal(dictionary, target_keys):
"""Check if the keys of the dictionary is equal to the target key set."""
assert isinstance(dictionary, dict)
assert set(dictionary.keys()) == set(target_keys)
@patch.multiple(BaseMattor, __abstractmethods__=set())
def test_base_mattor():
backbone = dict(
type='SimpleEncoderDecoder',
encoder=dict(type='VGG16', in_channels=4),
decoder=dict(type='PlainDecoder'))
refiner = dict(type='PlainRefiner')
train_cfg = mmcv.ConfigDict(train_backbone=True, train_refiner=True)
test_cfg = mmcv.ConfigDict(
refine=True, metrics=['SAD', 'MSE', 'GRAD', 'CONN'])
with pytest.raises(KeyError):
# metrics should be specified in test_cfg
BaseMattor(
backbone,
refiner,
train_cfg.copy(),
test_cfg=mmcv.ConfigDict(refine=True))
with pytest.raises(KeyError):
# supported metric should be one of {'SAD', 'MSE'}
BaseMattor(
backbone,
refiner,
train_cfg.copy(),
test_cfg=mmcv.ConfigDict(
refine=True, metrics=['UnsupportedMetric']))
with pytest.raises(TypeError):
# metrics must be None or a list of str
BaseMattor(
backbone,
refiner,
train_cfg.copy(),
test_cfg=mmcv.ConfigDict(refine=True, metrics='SAD'))
# build mattor without refiner
mattor = BaseMattor(
backbone, refiner=None, train_cfg=None, test_cfg=test_cfg.copy())
assert not mattor.with_refiner
# only train the refiner, this will freeze the backbone
mattor = BaseMattor(
backbone,
refiner,
train_cfg=mmcv.ConfigDict(train_backbone=False, train_refiner=True),
test_cfg=test_cfg.copy())
assert not mattor.train_cfg.train_backbone
assert mattor.train_cfg.train_refiner
assert mattor.test_cfg.refine
# only train the backbone while the refiner is used for inference but not
# trained, this behavior is allowed currently but will cause a warning.
mattor = BaseMattor(
backbone,
refiner,
train_cfg=mmcv.ConfigDict(train_backbone=True, train_refiner=False),
test_cfg=test_cfg.copy())
assert mattor.train_cfg.train_backbone
assert not mattor.train_cfg.train_refiner
assert mattor.test_cfg.refine
def test_dim():
model_cfg, train_cfg, test_cfg = _get_model_cfg(
'dim/dim_stage3_v16_pln_1x1_1000k_comp1k.py')
model_cfg['pretrained'] = None
# 1. test dim model with refiner
train_cfg.train_refiner = True
test_cfg.refine = True
# test model forward in train mode
model = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
input_train = _demo_input_train((64, 64))
output_train = model(**input_train)
assert output_train['num_samples'] == 1
assert_dict_keys_equal(output_train['losses'],
['loss_alpha', 'loss_comp', 'loss_refine'])
# test model forward in train mode with gpu
if torch.cuda.is_available():
model = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
model.cuda()
input_train = _demo_input_train((64, 64), cuda=True)
output_train = model(**input_train)
assert output_train['num_samples'] == 1
assert_dict_keys_equal(output_train['losses'],
['loss_alpha', 'loss_comp', 'loss_refine'])
# test model forward in test mode
with torch.no_grad():
model = build_model(model_cfg, train_cfg=None, test_cfg=test_cfg)
input_test = _demo_input_test((64, 64))
output_test = model(**input_test, test_mode=True)
assert isinstance(output_test['pred_alpha'], np.ndarray)
assert_dict_keys_equal(output_test['eval_result'],
['SAD', 'MSE', 'GRAD', 'CONN'])
# test model forward in test mode with gpu
if torch.cuda.is_available():
model = build_model(model_cfg, train_cfg=None, test_cfg=test_cfg)
model.cuda()
input_test = _demo_input_test((64, 64), cuda=True)
output_test = model(**input_test, test_mode=True)
assert isinstance(output_test['pred_alpha'], np.ndarray)
assert_dict_keys_equal(output_test['eval_result'],
['SAD', 'MSE', 'GRAD', 'CONN'])
# 2. test dim model without refiner
model_cfg['refiner'] = None
test_cfg['metrics'] = None
# test model forward in train mode
model = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
input_train = _demo_input_train((64, 64))
output_train = model(**input_train)
assert output_train['num_samples'] == 1
assert_dict_keys_equal(output_train['losses'], ['loss_alpha', 'loss_comp'])
# test model forward in train mode with gpu
if torch.cuda.is_available():
model = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
model.cuda()
input_train = _demo_input_train((64, 64), cuda=True)
output_train = model(**input_train)
assert output_train['num_samples'] == 1
assert_dict_keys_equal(output_train['losses'],
['loss_alpha', 'loss_comp'])
# test model forward in test mode
with torch.no_grad():
model = build_model(model_cfg, train_cfg=None, test_cfg=test_cfg)
input_test = _demo_input_test((64, 64))
output_test = model(**input_test, test_mode=True)
assert isinstance(output_test['pred_alpha'], np.ndarray)
assert output_test['eval_result'] is None
# check test with gpu
if torch.cuda.is_available():
model = build_model(model_cfg, train_cfg=None, test_cfg=test_cfg)
model.cuda()
input_test = _demo_input_test((64, 64), cuda=True)
output_test = model(**input_test, test_mode=True)
assert isinstance(output_test['pred_alpha'], np.ndarray)
assert output_test['eval_result'] is None
# test forward_dummy
model.cpu().eval()
inputs = torch.ones((1, 4, 32, 32))
model.forward_dummy(inputs)
def test_indexnet():
model_cfg, _, test_cfg = _get_model_cfg(
'indexnet/indexnet_mobv2_1x16_78k_comp1k.py')
model_cfg['pretrained'] = None
# test indexnet inference
with torch.no_grad():
indexnet = build_model(model_cfg, train_cfg=None, test_cfg=test_cfg)
indexnet.eval()
input_test = _demo_input_test((64, 64))
output_test = indexnet(**input_test, test_mode=True)
assert isinstance(output_test['pred_alpha'], np.ndarray)
assert output_test['pred_alpha'].shape == (64, 64)
assert_dict_keys_equal(output_test['eval_result'],
['SAD', 'MSE', 'GRAD', 'CONN'])
# test inference with gpu
if torch.cuda.is_available():
indexnet = build_model(
model_cfg, train_cfg=None, test_cfg=test_cfg).cuda()
indexnet.eval()
input_test = _demo_input_test((64, 64), cuda=True)
output_test = indexnet(**input_test, test_mode=True)
assert isinstance(output_test['pred_alpha'], np.ndarray)
assert output_test['pred_alpha'].shape == (64, 64)
assert_dict_keys_equal(output_test['eval_result'],
['SAD', 'MSE', 'GRAD', 'CONN'])
# test forward train though we do not guarantee the training for present
model_cfg.loss_alpha = None
model_cfg.loss_comp = dict(type='L1CompositionLoss')
indexnet = build_model(
model_cfg,
train_cfg=mmcv.ConfigDict(train_backbone=True),
test_cfg=test_cfg)
input_train = _demo_input_train((64, 64), batch_size=2)
output_train = indexnet(**input_train)
assert output_train['num_samples'] == 2
assert_dict_keys_equal(output_train['losses'], ['loss_comp'])
if torch.cuda.is_available():
model_cfg.loss_alpha = dict(type='L1Loss')
model_cfg.loss_comp = None
indexnet = build_model(
model_cfg,
train_cfg=mmcv.ConfigDict(train_backbone=True),
test_cfg=test_cfg).cuda()
input_train = _demo_input_train((64, 64), batch_size=2, cuda=True)
output_train = indexnet(**input_train)
assert output_train['num_samples'] == 2
assert_dict_keys_equal(output_train['losses'], ['loss_alpha'])
# test forward_dummy
indexnet.cpu().eval()
inputs = torch.ones((1, 4, 32, 32))
indexnet.forward_dummy(inputs)
def test_gca():
model_cfg, train_cfg, test_cfg = _get_model_cfg(
'gca/gca_r34_4x10_200k_comp1k.py')
model_cfg['pretrained'] = None
# test model forward in train mode
model = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
inputs = _demo_input_train((64, 64), batch_size=2)
inputs['trimap'] = inputs['trimap'].expand_as(inputs['merged'])
inputs['meta'][0]['to_onehot'] = True
outputs = model(inputs['merged'], inputs['trimap'], inputs['meta'],
inputs['alpha'])
assert outputs['num_samples'] == 2
assert_dict_keys_equal(outputs['losses'], ['loss'])
if torch.cuda.is_available():
model = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
model.cuda()
inputs = _demo_input_train((64, 64), batch_size=2, cuda=True)
inputs['trimap'] = inputs['trimap'].expand_as(inputs['merged'])
inputs['meta'][0]['to_onehot'] = True
outputs = model(inputs['merged'], inputs['trimap'], inputs['meta'],
inputs['alpha'])
assert outputs['num_samples'] == 2
assert_dict_keys_equal(outputs['losses'], ['loss'])
# test model forward in test mode
with torch.no_grad():
model_cfg.backbone.encoder.in_channels = 4
model = build_model(model_cfg, train_cfg=None, test_cfg=test_cfg)
inputs = _demo_input_test((64, 64))
outputs = model(**inputs, test_mode=True)
assert_dict_keys_equal(outputs['eval_result'],
['SAD', 'MSE', 'GRAD', 'CONN'])
if torch.cuda.is_available():
model = build_model(model_cfg, train_cfg=None, test_cfg=test_cfg)
model.cuda()
inputs = _demo_input_test((64, 64), cuda=True)
outputs = model(**inputs, test_mode=True)
assert_dict_keys_equal(outputs['eval_result'],
['SAD', 'MSE', 'GRAD', 'CONN'])
# test forward_dummy
model.cpu().eval()
inputs = torch.ones((1, 4, 32, 32))
model.forward_dummy(inputs)
def _demo_input_train(img_shape, batch_size=1, cuda=False):
"""
Create a superset of inputs needed to run backbone.
Args:
img_shape (tuple): shape of the input image.
batch_size (int): batch size of the input batch.
cuda (bool): whether transfer input into gpu.
"""
color_shape = (batch_size, 3, img_shape[0], img_shape[1])
gray_shape = (batch_size, 1, img_shape[0], img_shape[1])
merged = torch.from_numpy(np.random.random(color_shape).astype(np.float32))
trimap = torch.from_numpy(
np.random.randint(255, size=gray_shape).astype(np.float32))
meta = [{}] * batch_size
alpha = torch.from_numpy(np.random.random(gray_shape).astype(np.float32))
ori_merged = torch.from_numpy(
np.random.random(color_shape).astype(np.float32))
fg = torch.from_numpy(np.random.random(color_shape).astype(np.float32))
bg = torch.from_numpy(np.random.random(color_shape).astype(np.float32))
if cuda:
merged = merged.cuda()
trimap = trimap.cuda()
alpha = alpha.cuda()
ori_merged = ori_merged.cuda()
fg = fg.cuda()
bg = bg.cuda()
return dict(
merged=merged,
trimap=trimap,
meta=meta,
alpha=alpha,
ori_merged=ori_merged,
fg=fg,
bg=bg)
def _demo_input_test(img_shape, batch_size=1, cuda=False, test_trans='resize'):
"""
Create a superset of inputs needed to run backbone.
Args:
img_shape (tuple): shape of the input image.
batch_size (int): batch size of the input batch.
cuda (bool): whether transfer input into gpu.
test_trans (str): what test transformation is used in data pipeline.
"""
color_shape = (batch_size, 3, img_shape[0], img_shape[1])
gray_shape = (batch_size, 1, img_shape[0], img_shape[1])
merged = torch.from_numpy(np.random.random(color_shape).astype(np.float32))
trimap = torch.from_numpy(
np.random.randint(255, size=gray_shape).astype(np.float32))
ori_alpha = np.random.random(img_shape).astype(np.float32)
ori_trimap = np.random.randint(256, size=img_shape).astype(np.float32)
if cuda:
merged = merged.cuda()
trimap = trimap.cuda()
meta = [
dict(
ori_alpha=ori_alpha,
ori_trimap=ori_trimap,
merged_ori_shape=img_shape)
] * batch_size
if test_trans == 'pad':
meta[0]['pad'] = (0, 0)
elif test_trans == 'resize':
# we just test bilinear as the interpolation method
meta[0]['interpolation'] = 'bilinear'
return dict(merged=merged, trimap=trimap, meta=meta)
| 14,057 | 37.620879 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_losses/test_losses.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
from unittest.mock import patch
import numpy
import numpy.testing as npt
import pytest
import torch
from mmedit.models import (CharbonnierCompLoss, CharbonnierLoss, DiscShiftLoss,
GANLoss, GaussianBlur, GradientLoss,
GradientPenaltyLoss, L1CompositionLoss, L1Loss,
MaskedTVLoss, MSECompositionLoss, MSELoss,
PerceptualLoss, PerceptualVGG,
TransferalPerceptualLoss, mask_reduce_loss,
reduce_loss)
def test_utils():
loss = torch.rand(1, 3, 4, 4)
weight = torch.zeros(1, 3, 4, 4)
weight[:, :, :2, :2] = 1
# test reduce_loss()
reduced = reduce_loss(loss, 'none')
assert reduced is loss
reduced = reduce_loss(loss, 'mean')
npt.assert_almost_equal(reduced.numpy(), loss.mean())
reduced = reduce_loss(loss, 'sum')
npt.assert_almost_equal(reduced.numpy(), loss.sum())
# test mask_reduce_loss()
reduced = mask_reduce_loss(loss, weight=None, reduction='none')
assert reduced is loss
reduced = mask_reduce_loss(loss, weight=weight, reduction='mean')
target = (loss *
weight).sum(dim=[1, 2, 3]) / weight.sum(dim=[1, 2, 3]).mean()
npt.assert_almost_equal(reduced.numpy(), target)
reduced = mask_reduce_loss(loss, weight=weight, reduction='sum')
npt.assert_almost_equal(reduced.numpy(), (loss * weight).sum())
weight_single_channel = weight[:, 0:1, ...]
reduced = mask_reduce_loss(
loss, weight=weight_single_channel, reduction='mean')
target = (loss *
weight).sum(dim=[1, 2, 3]) / weight.sum(dim=[1, 2, 3]).mean()
npt.assert_almost_equal(reduced.numpy(), target)
loss_b = torch.rand(2, 3, 4, 4)
weight_b = torch.zeros(2, 1, 4, 4)
weight_b[0, :, :3, :3] = 1
weight_b[1, :, :2, :2] = 1
reduced = mask_reduce_loss(loss_b, weight=weight_b, reduction='mean')
target = (loss_b * weight_b).sum() / weight_b.sum() / 3.
npt.assert_almost_equal(reduced.numpy(), target)
with pytest.raises(AssertionError):
weight_wrong = weight[0, 0, ...]
reduced = mask_reduce_loss(loss, weight=weight_wrong, reduction='mean')
with pytest.raises(AssertionError):
weight_wrong = weight[:, 0:2, ...]
reduced = mask_reduce_loss(loss, weight=weight_wrong, reduction='mean')
def test_pixelwise_losses():
with pytest.raises(ValueError):
# only 'none', 'mean' and 'sum' are supported
L1Loss(reduction='InvalidValue')
with pytest.raises(ValueError):
# only 'none', 'mean' and 'sum' are supported
MSELoss(reduction='InvalidValue')
with pytest.raises(ValueError):
# only 'none', 'mean' and 'sum' are supported
CharbonnierLoss(reduction='InvalidValue')
unknown_h, unknown_w = (32, 32)
weight = torch.zeros(1, 1, 64, 64)
weight[0, 0, :unknown_h, :unknown_w] = 1
pred = weight.clone()
target = weight.clone() * 2
# test l1 loss
l1_loss = L1Loss(loss_weight=1.0, reduction='mean')
loss = l1_loss(pred, target)
assert loss.shape == ()
assert loss.item() == 0.25
l1_loss = L1Loss(loss_weight=0.5, reduction='none')
loss = l1_loss(pred, target, weight)
assert loss.shape == (1, 1, 64, 64)
assert (loss == torch.ones(1, 1, 64, 64) * weight * 0.5).all()
l1_loss = L1Loss(loss_weight=0.5, reduction='sum')
loss = l1_loss(pred, target, weight)
assert loss.shape == ()
assert loss.item() == 512
# test mse loss
mse_loss = MSELoss(loss_weight=1.0, reduction='mean')
loss = mse_loss(pred, target)
assert loss.shape == ()
assert loss.item() == 0.25
mse_loss = MSELoss(loss_weight=0.5, reduction='none')
loss = mse_loss(pred, target, weight)
assert loss.shape == (1, 1, 64, 64)
assert (loss == torch.ones(1, 1, 64, 64) * weight * 0.5).all()
mse_loss = MSELoss(loss_weight=0.5, reduction='sum')
loss = mse_loss(pred, target, weight)
assert loss.shape == ()
assert loss.item() == 512
# test charbonnier loss
charbonnier_loss = CharbonnierLoss(
loss_weight=1.0, reduction='mean', eps=1e-12)
loss = charbonnier_loss(pred, target)
assert loss.shape == ()
assert math.isclose(loss.item(), 0.25, rel_tol=1e-5)
charbonnier_loss = CharbonnierLoss(
loss_weight=0.5, reduction='none', eps=1e-6)
loss = charbonnier_loss(pred, target, weight)
assert loss.shape == (1, 1, 64, 64)
npt.assert_almost_equal(
loss.numpy(), torch.ones(1, 1, 64, 64) * weight * 0.5, decimal=6)
charbonnier_loss = CharbonnierLoss(
loss_weight=0.5, reduction='sum', eps=1e-12)
loss = charbonnier_loss(pred, target)
assert loss.shape == ()
assert math.isclose(loss.item(), 512, rel_tol=1e-5)
# test samplewise option, use L1Loss as an example
unknown_h, unknown_w = (32, 32)
weight = torch.zeros(2, 1, 64, 64)
weight[0, 0, :unknown_h, :unknown_w] = 1
# weight[1, 0, :unknown_h // 2, :unknown_w // 2] = 1
pred = weight.clone()
target = weight.clone()
# make mean l1_loss of sample 2 different from sample 1
target[0, ...] *= 2
l1_loss = L1Loss(loss_weight=1.0, reduction='mean', sample_wise=True)
loss = l1_loss(pred, target, weight)
assert loss.shape == ()
assert loss.item() == 0.5
masked_tv_loss = MaskedTVLoss(loss_weight=1.0)
pred = torch.zeros((1, 1, 6, 6))
mask = torch.zeros_like(pred)
mask[..., 2:4, 2:4] = 1.
pred[..., 3, :] = 1.
loss = masked_tv_loss(pred, mask)
assert loss.shape == ()
npt.assert_almost_equal(loss.item(), 1.)
def test_composition_losses():
with pytest.raises(ValueError):
# only 'none', 'mean' and 'sum' are supported
L1CompositionLoss(reduction='InvalidValue')
with pytest.raises(ValueError):
# only 'none', 'mean' and 'sum' are supported
MSECompositionLoss(reduction='InvalidValue')
with pytest.raises(ValueError):
# only 'none', 'mean' and 'sum' are supported
CharbonnierCompLoss(reduction='InvalidValue')
unknown_h, unknown_w = (32, 32)
weight = torch.zeros(1, 1, 64, 64)
weight[0, 0, :unknown_h, :unknown_w] = 1
pred_alpha = weight.clone() * 0.5
ori_merged = torch.ones(1, 3, 64, 64)
fg = torch.zeros(1, 3, 64, 64)
bg = torch.ones(1, 3, 64, 64) * 4
l1_comp_loss = L1CompositionLoss(loss_weight=1.0, reduction='mean')
loss = l1_comp_loss(pred_alpha, fg, bg, ori_merged)
assert loss.shape == ()
assert loss.item() == 2.5
l1_comp_loss = L1CompositionLoss(loss_weight=0.5, reduction='none')
loss = l1_comp_loss(pred_alpha, fg, bg, ori_merged, weight)
assert loss.shape == (1, 3, 64, 64)
assert (loss == torch.ones(1, 3, 64, 64) * weight * 0.5).all()
l1_comp_loss = L1CompositionLoss(loss_weight=0.5, reduction='sum')
loss = l1_comp_loss(pred_alpha, fg, bg, ori_merged, weight)
assert loss.shape == ()
assert loss.item() == 1536
mse_comp_loss = MSECompositionLoss(loss_weight=1.0, reduction='mean')
loss = mse_comp_loss(pred_alpha, fg, bg, ori_merged)
assert loss.shape == ()
assert loss.item() == 7.0
mse_comp_loss = MSECompositionLoss(loss_weight=0.5, reduction='none')
loss = mse_comp_loss(pred_alpha, fg, bg, ori_merged, weight)
assert loss.shape == (1, 3, 64, 64)
assert (loss == torch.ones(1, 3, 64, 64) * weight * 0.5).all()
mse_comp_loss = MSECompositionLoss(loss_weight=0.5, reduction='sum')
loss = mse_comp_loss(pred_alpha, fg, bg, ori_merged, weight)
assert loss.shape == ()
assert loss.item() == 1536
cb_comp_loss = CharbonnierCompLoss(
loss_weight=1.0, reduction='mean', eps=1e-12)
loss = cb_comp_loss(pred_alpha, fg, bg, ori_merged)
assert loss.shape == ()
assert loss.item() == 2.5
cb_comp_loss = CharbonnierCompLoss(
loss_weight=0.5, reduction='none', eps=1e-6)
loss = cb_comp_loss(pred_alpha, fg, bg, ori_merged, weight)
assert loss.shape == (1, 3, 64, 64)
npt.assert_almost_equal(
loss.numpy(), torch.ones(1, 3, 64, 64) * weight * 0.5, decimal=6)
cb_comp_loss = CharbonnierCompLoss(
loss_weight=0.5, reduction='sum', eps=1e-6)
loss = cb_comp_loss(pred_alpha, fg, bg, ori_merged, weight)
assert loss.shape == ()
assert math.isclose(loss.item(), 1536, rel_tol=1e-6)
@patch.object(PerceptualVGG, 'init_weights')
def test_perceptual_loss(init_weights):
if torch.cuda.is_available():
loss_percep = PerceptualLoss(layer_weights={'0': 1.}).cuda()
x = torch.randn(1, 3, 16, 16).cuda()
x.requires_grad = True
gt = torch.randn(1, 3, 16, 16).cuda()
percep, style = loss_percep(x, gt)
assert percep.item() > 0
assert style.item() > 0
optim = torch.optim.SGD(params=[x], lr=10)
optim.zero_grad()
percep.backward()
optim.step()
percep_new, _ = loss_percep(x, gt)
assert percep_new < percep
loss_percep = PerceptualLoss(
layer_weights={
'0': 1.
}, perceptual_weight=0.).cuda()
x = torch.randn(1, 3, 16, 16).cuda()
gt = torch.randn(1, 3, 16, 16).cuda()
percep, style = loss_percep(x, gt)
assert percep is None and style > 0
loss_percep = PerceptualLoss(
layer_weights={
'0': 1.
}, style_weight=0., criterion='mse').cuda()
x = torch.randn(1, 3, 16, 16).cuda()
gt = torch.randn(1, 3, 16, 16).cuda()
percep, style = loss_percep(x, gt)
assert style is None and percep > 0
loss_percep = PerceptualLoss(
layer_weights={
'0': 1.
}, layer_weights_style={
'1': 1.
}).cuda()
x = torch.randn(1, 3, 16, 16).cuda()
gt = torch.randn(1, 3, 16, 16).cuda()
percep, style = loss_percep(x, gt)
assert percep > 0 and style > 0
# test whether vgg type is valid
with pytest.raises(AssertionError):
loss_percep = PerceptualLoss(layer_weights={'0': 1.}, vgg_type='igccc')
# test whether criterion is valid
with pytest.raises(NotImplementedError):
loss_percep = PerceptualLoss(
layer_weights={'0': 1.}, criterion='igccc')
layer_name_list = ['2', '10', '30']
vgg_model = PerceptualVGG(
layer_name_list,
use_input_norm=False,
vgg_type='vgg16',
pretrained='torchvision://vgg16')
x = torch.rand((1, 3, 32, 32))
output = vgg_model(x)
assert isinstance(output, dict)
assert len(output) == len(layer_name_list)
assert set(output.keys()) == set(layer_name_list)
# test whether the layer name is valid
with pytest.raises(AssertionError):
layer_name_list = ['2', '10', '30', '100']
vgg_model = PerceptualVGG(
layer_name_list,
use_input_norm=False,
vgg_type='vgg16',
pretrained='torchvision://vgg16')
# reset mock to clear some memory usage
init_weights.reset_mock()
def test_t_perceptual_loss():
maps = [
torch.rand((2, 8, 8, 8), requires_grad=True),
torch.rand((2, 4, 16, 16), requires_grad=True)
]
textures = [torch.rand((2, 8, 8, 8)), torch.rand((2, 4, 16, 16))]
soft = torch.rand((2, 1, 8, 8))
loss_t_percep = TransferalPerceptualLoss()
t_percep = loss_t_percep(maps, soft, textures)
assert t_percep.item() > 0
loss_t_percep = TransferalPerceptualLoss(
use_attention=False, criterion='l1')
t_percep = loss_t_percep(maps, soft, textures)
assert t_percep.item() > 0
if torch.cuda.is_available():
maps = [
torch.rand((2, 8, 8, 8)).cuda(),
torch.rand((2, 4, 16, 16)).cuda()
]
textures = [
torch.rand((2, 8, 8, 8)).cuda(),
torch.rand((2, 4, 16, 16)).cuda()
]
soft = torch.rand((2, 1, 8, 8)).cuda()
loss_t_percep = TransferalPerceptualLoss().cuda()
maps[0].requires_grad = True
maps[1].requires_grad = True
t_percep = loss_t_percep(maps, soft, textures)
assert t_percep.item() > 0
optim = torch.optim.SGD(params=maps, lr=10)
optim.zero_grad()
t_percep.backward()
optim.step()
t_percep_new = loss_t_percep(maps, soft, textures)
assert t_percep_new < t_percep
loss_t_percep = TransferalPerceptualLoss(
use_attention=False, criterion='l1').cuda()
t_percep = loss_t_percep(maps, soft, textures)
assert t_percep.item() > 0
# test whether vgg type is valid
with pytest.raises(ValueError):
TransferalPerceptualLoss(criterion='l2')
def test_gan_losses():
"""Test gan losses."""
with pytest.raises(NotImplementedError):
GANLoss(
'xixihaha',
loss_weight=1.0,
real_label_val=1.0,
fake_label_val=0.0)
input_1 = torch.ones(1, 1)
input_2 = torch.ones(1, 3, 6, 6) * 2
# vanilla
gan_loss = GANLoss(
'vanilla', loss_weight=2.0, real_label_val=1.0, fake_label_val=0.0)
loss = gan_loss(input_1, True, is_disc=False)
npt.assert_almost_equal(loss.item(), 0.6265233)
loss = gan_loss(input_1, False, is_disc=False)
npt.assert_almost_equal(loss.item(), 2.6265232)
loss = gan_loss(input_1, True, is_disc=True)
npt.assert_almost_equal(loss.item(), 0.3132616)
loss = gan_loss(input_1, False, is_disc=True)
npt.assert_almost_equal(loss.item(), 1.3132616)
# lsgan
gan_loss = GANLoss(
'lsgan', loss_weight=2.0, real_label_val=1.0, fake_label_val=0.0)
loss = gan_loss(input_2, True, is_disc=False)
npt.assert_almost_equal(loss.item(), 2.0)
loss = gan_loss(input_2, False, is_disc=False)
npt.assert_almost_equal(loss.item(), 8.0)
loss = gan_loss(input_2, True, is_disc=True)
npt.assert_almost_equal(loss.item(), 1.0)
loss = gan_loss(input_2, False, is_disc=True)
npt.assert_almost_equal(loss.item(), 4.0)
# wgan
gan_loss = GANLoss(
'wgan', loss_weight=2.0, real_label_val=1.0, fake_label_val=0.0)
loss = gan_loss(input_2, True, is_disc=False)
npt.assert_almost_equal(loss.item(), -4.0)
loss = gan_loss(input_2, False, is_disc=False)
npt.assert_almost_equal(loss.item(), 4)
loss = gan_loss(input_2, True, is_disc=True)
npt.assert_almost_equal(loss.item(), -2.0)
loss = gan_loss(input_2, False, is_disc=True)
npt.assert_almost_equal(loss.item(), 2.0)
# hinge
gan_loss = GANLoss(
'hinge', loss_weight=2.0, real_label_val=1.0, fake_label_val=0.0)
loss = gan_loss(input_2, True, is_disc=False)
npt.assert_almost_equal(loss.item(), -4.0)
loss = gan_loss(input_2, False, is_disc=False)
npt.assert_almost_equal(loss.item(), -4.0)
loss = gan_loss(input_2, True, is_disc=True)
npt.assert_almost_equal(loss.item(), 0.0)
loss = gan_loss(input_2, False, is_disc=True)
npt.assert_almost_equal(loss.item(), 3.0)
# smgan
mask = torch.ones(1, 3, 6, 6)
gan_loss = GANLoss(
'smgan', loss_weight=2.0, real_label_val=1.0, fake_label_val=0.0)
loss = gan_loss(input_2, True, is_disc=False, mask=mask)
npt.assert_almost_equal(loss.item(), 2.0)
loss = gan_loss(input_2, False, is_disc=False, mask=mask)
npt.assert_almost_equal(loss.item(), 8.0)
loss = gan_loss(input_2, True, is_disc=True, mask=mask)
npt.assert_almost_equal(loss.item(), 1.0)
loss = gan_loss(input_2, False, is_disc=True, mask=mask)
npt.assert_almost_equal(loss.item(), 3.786323, decimal=6)
mask = torch.ones(1, 3, 6, 5)
loss = gan_loss(input_2, True, is_disc=False, mask=mask)
npt.assert_almost_equal(loss.item(), 2.0)
if torch.cuda.is_available():
input_2 = input_2.cuda()
mask = torch.ones(1, 3, 6, 6).cuda()
gan_loss = GANLoss(
'smgan', loss_weight=2.0, real_label_val=1.0, fake_label_val=0.0)
loss = gan_loss(input_2, True, is_disc=False, mask=mask)
npt.assert_almost_equal(loss.item(), 2.0)
loss = gan_loss(input_2, False, is_disc=False, mask=mask)
npt.assert_almost_equal(loss.item(), 8.0)
loss = gan_loss(input_2, True, is_disc=True, mask=mask)
npt.assert_almost_equal(loss.item(), 1.0)
loss = gan_loss(input_2, False, is_disc=True, mask=mask)
npt.assert_almost_equal(loss.item(), 3.786323, decimal=6)
# test GaussianBlur for smgan
with pytest.raises(TypeError):
gausian_blur = GaussianBlur(kernel_size=71, sigma=2)
gausian_blur(mask).detach().cpu()
with pytest.raises(TypeError):
gausian_blur = GaussianBlur(kernel_size=(70, 70))
gausian_blur(mask).detach().cpu()
with pytest.raises(TypeError):
mask = numpy.ones((1, 3, 6, 6))
gausian_blur = GaussianBlur()
gausian_blur(mask).detach().cpu()
with pytest.raises(ValueError):
mask = torch.ones(1, 3)
gausian_blur = GaussianBlur()
gausian_blur(mask).detach().cpu()
def test_gradient_penalty_losses():
"""Test gradient penalty losses."""
input = torch.ones(1, 3, 6, 6) * 2
gan_loss = GradientPenaltyLoss(loss_weight=10.0)
loss = gan_loss(lambda x: x, input, input, mask=None)
assert loss.item() > 0
mask = torch.ones(1, 3, 6, 6)
mask[:, :, 2:4, 2:4] = 0
loss = gan_loss(lambda x: x, input, input, mask=mask)
assert loss.item() > 0
def test_disc_shift_loss():
loss_disc_shift = DiscShiftLoss()
x = torch.Tensor([0.1])
loss = loss_disc_shift(x)
npt.assert_almost_equal(loss.item(), 0.001)
def test_gradient_loss():
with pytest.raises(ValueError):
# only 'none', 'mean' and 'sum' are supported
GradientLoss(reduction='InvalidValue')
unknown_h, unknown_w = (32, 32)
weight = torch.zeros(1, 1, 64, 64)
weight[0, 0, :unknown_h, :unknown_w] = 1
pred = weight.clone()
target = weight.clone() * 2
gradient_loss = GradientLoss(loss_weight=1.0, reduction='mean')
loss = gradient_loss(pred, target)
assert loss.shape == ()
npt.assert_almost_equal(loss.item(), 0.1860352)
gradient_loss = GradientLoss(loss_weight=0.5, reduction='none')
loss = gradient_loss(pred, target, weight)
assert loss.shape == (1, 1, 64, 64)
npt.assert_almost_equal(torch.sum(loss).item(), 252)
gradient_loss = GradientLoss(loss_weight=0.5, reduction='sum')
loss = gradient_loss(pred, target, weight)
assert loss.shape == ()
npt.assert_almost_equal(loss.item(), 252)
| 18,730 | 34.542694 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_losses/test_feature_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmedit.models.losses import LightCNNFeatureLoss
def test_light_cnn_feature_loss():
pretrained = 'https://download.openmmlab.com/mmediting/' + \
'restorers/dic/light_cnn_feature.pth'
pred = torch.rand((3, 3, 128, 128))
gt = torch.rand((3, 3, 128, 128))
feature_loss = LightCNNFeatureLoss(pretrained=pretrained)
loss = feature_loss(pred, gt)
assert loss.item() > 0
feature_loss = LightCNNFeatureLoss(pretrained=pretrained, criterion='mse')
loss = feature_loss(pred, gt)
assert loss.item() > 0
if torch.cuda.is_available():
pred = pred.cuda()
gt = gt.cuda()
feature_loss = feature_loss.cuda()
pred.requires_grad = True
loss = feature_loss(pred, gt)
assert loss.item() > 0
optim = torch.optim.SGD(params=[pred], lr=10)
optim.zero_grad()
loss.backward()
optim.step()
loss_new = feature_loss(pred, gt)
assert loss_new < loss
feature_loss = LightCNNFeatureLoss(
pretrained=pretrained, criterion='mse').cuda()
loss = feature_loss(pred, gt)
assert loss.item() > 0
with pytest.raises(AssertionError):
feature_loss.model.train()
feature_loss(pred, gt)
# test criterion value error
with pytest.raises(ValueError):
LightCNNFeatureLoss(pretrained=pretrained, criterion='l2')
# test assert isinstance(pretrained, str)
with pytest.raises(AssertionError):
LightCNNFeatureLoss(pretrained=None)
| 1,600 | 28.648148 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_common/test_common_module.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
import torch.nn as nn
from mmedit.models.common import (ASPP, DepthwiseSeparableConvModule,
GCAModule, LinearModule, MaskConvModule,
PartialConv2d, SimpleGatedConvModule)
def test_mask_conv_module():
with pytest.raises(KeyError):
# conv_cfg must be a dict or None
conv_cfg = dict(type='conv')
MaskConvModule(3, 8, 2, conv_cfg=conv_cfg)
with pytest.raises(AssertionError):
# norm_cfg must be a dict or None
norm_cfg = ['norm']
MaskConvModule(3, 8, 2, norm_cfg=norm_cfg)
with pytest.raises(AssertionError):
# order elements must be ('conv', 'norm', 'act')
order = ['conv', 'norm', 'act']
MaskConvModule(3, 8, 2, order=order)
with pytest.raises(AssertionError):
# order elements must be ('conv', 'norm', 'act')
order = ('conv', 'norm')
MaskConvModule(3, 8, 2, order=order)
with pytest.raises(KeyError):
# softmax is not supported
act_cfg = dict(type='softmax')
MaskConvModule(3, 8, 2, act_cfg=act_cfg)
conv_cfg = dict(type='PConv', multi_channel=True)
conv = MaskConvModule(3, 8, 2, conv_cfg=conv_cfg)
x = torch.rand(1, 3, 256, 256)
mask_in = torch.ones_like(x)
mask_in[..., 20:130, 120:150] = 0.
output, mask_update = conv(x, mask_in)
assert output.shape == (1, 8, 255, 255)
assert mask_update.shape == (1, 8, 255, 255)
# add test for ['norm', 'conv', 'act']
conv = MaskConvModule(
3, 8, 2, order=('norm', 'conv', 'act'), conv_cfg=conv_cfg)
x = torch.rand(1, 3, 256, 256)
output = conv(x, mask_in, return_mask=False)
assert output.shape == (1, 8, 255, 255)
conv = MaskConvModule(
3, 8, 3, padding=1, conv_cfg=conv_cfg, with_spectral_norm=True)
assert hasattr(conv.conv, 'weight_orig')
output = conv(x, return_mask=False)
assert output.shape == (1, 8, 256, 256)
conv = MaskConvModule(
3,
8,
3,
padding=1,
norm_cfg=dict(type='BN'),
padding_mode='reflect',
conv_cfg=conv_cfg)
assert isinstance(conv.padding_layer, nn.ReflectionPad2d)
output = conv(x, mask_in, return_mask=False)
assert output.shape == (1, 8, 256, 256)
conv = MaskConvModule(
3, 8, 3, padding=1, act_cfg=dict(type='LeakyReLU'), conv_cfg=conv_cfg)
output = conv(x, mask_in, return_mask=False)
assert output.shape == (1, 8, 256, 256)
with pytest.raises(KeyError):
conv = MaskConvModule(3, 8, 3, padding=1, padding_mode='igccc')
def test_pconv2d():
pconv2d = PartialConv2d(
3, 2, kernel_size=1, stride=1, multi_channel=True, eps=1e-8)
x = torch.rand(1, 3, 6, 6)
mask = torch.ones_like(x)
mask[..., 2, 2] = 0.
output, updated_mask = pconv2d(x, mask=mask)
assert output.shape == (1, 2, 6, 6)
assert updated_mask.shape == (1, 2, 6, 6)
output = pconv2d(x, mask=None)
assert output.shape == (1, 2, 6, 6)
pconv2d = PartialConv2d(
3, 2, kernel_size=1, stride=1, multi_channel=True, eps=1e-8)
output = pconv2d(x, mask=None)
assert output.shape == (1, 2, 6, 6)
pconv2d = PartialConv2d(
3, 2, kernel_size=1, stride=1, multi_channel=False, eps=1e-8)
output = pconv2d(x, mask=None)
assert output.shape == (1, 2, 6, 6)
pconv2d = PartialConv2d(
3,
2,
kernel_size=1,
stride=1,
bias=False,
multi_channel=True,
eps=1e-8)
output = pconv2d(x, mask=mask, return_mask=False)
assert output.shape == (1, 2, 6, 6)
with pytest.raises(AssertionError):
pconv2d(x, mask=torch.ones(1, 1, 6, 6))
pconv2d = PartialConv2d(
3,
2,
kernel_size=1,
stride=1,
bias=False,
multi_channel=False,
eps=1e-8)
output = pconv2d(x, mask=None)
assert output.shape == (1, 2, 6, 6)
with pytest.raises(AssertionError):
output = pconv2d(x, mask=mask[0])
with pytest.raises(AssertionError):
output = pconv2d(x, mask=torch.ones(1, 3, 6, 6))
if torch.cuda.is_available():
pconv2d = PartialConv2d(
3,
2,
kernel_size=1,
stride=1,
bias=False,
multi_channel=False,
eps=1e-8).cuda().half()
output = pconv2d(x.cuda().half(), mask=None)
assert output.shape == (1, 2, 6, 6)
def test_depthwise_separable_conv():
with pytest.raises(AssertionError):
# conv_cfg must be a dict or None
DepthwiseSeparableConvModule(4, 8, 2, groups=2)
# test default config
conv = DepthwiseSeparableConvModule(3, 8, 2)
assert conv.depthwise_conv.conv.groups == 3
assert conv.pointwise_conv.conv.kernel_size == (1, 1)
assert not conv.depthwise_conv.with_norm
assert not conv.pointwise_conv.with_norm
assert conv.depthwise_conv.activate.__class__.__name__ == 'ReLU'
assert conv.pointwise_conv.activate.__class__.__name__ == 'ReLU'
x = torch.rand(1, 3, 256, 256)
output = conv(x)
assert output.shape == (1, 8, 255, 255)
# test
conv = DepthwiseSeparableConvModule(3, 8, 2, dw_norm_cfg=dict(type='BN'))
assert conv.depthwise_conv.norm_name == 'bn'
assert not conv.pointwise_conv.with_norm
x = torch.rand(1, 3, 256, 256)
output = conv(x)
assert output.shape == (1, 8, 255, 255)
conv = DepthwiseSeparableConvModule(3, 8, 2, pw_norm_cfg=dict(type='BN'))
assert not conv.depthwise_conv.with_norm
assert conv.pointwise_conv.norm_name == 'bn'
x = torch.rand(1, 3, 256, 256)
output = conv(x)
assert output.shape == (1, 8, 255, 255)
# add test for ['norm', 'conv', 'act']
conv = DepthwiseSeparableConvModule(3, 8, 2, order=('norm', 'conv', 'act'))
x = torch.rand(1, 3, 256, 256)
output = conv(x)
assert output.shape == (1, 8, 255, 255)
conv = DepthwiseSeparableConvModule(
3, 8, 3, padding=1, with_spectral_norm=True)
assert hasattr(conv.depthwise_conv.conv, 'weight_orig')
assert hasattr(conv.pointwise_conv.conv, 'weight_orig')
output = conv(x)
assert output.shape == (1, 8, 256, 256)
conv = DepthwiseSeparableConvModule(
3, 8, 3, padding=1, padding_mode='reflect')
assert isinstance(conv.depthwise_conv.padding_layer, nn.ReflectionPad2d)
output = conv(x)
assert output.shape == (1, 8, 256, 256)
conv = DepthwiseSeparableConvModule(
3, 8, 3, padding=1, dw_act_cfg=dict(type='LeakyReLU'))
assert conv.depthwise_conv.activate.__class__.__name__ == 'LeakyReLU'
assert conv.pointwise_conv.activate.__class__.__name__ == 'ReLU'
output = conv(x)
assert output.shape == (1, 8, 256, 256)
conv = DepthwiseSeparableConvModule(
3, 8, 3, padding=1, pw_act_cfg=dict(type='LeakyReLU'))
assert conv.depthwise_conv.activate.__class__.__name__ == 'ReLU'
assert conv.pointwise_conv.activate.__class__.__name__ == 'LeakyReLU'
output = conv(x)
assert output.shape == (1, 8, 256, 256)
def test_aspp():
# test aspp with normal conv
aspp = ASPP(128, out_channels=512, mid_channels=128, dilations=(6, 12, 18))
assert aspp.convs[0].activate.__class__.__name__ == 'ReLU'
assert aspp.convs[0].conv.out_channels == 128
assert aspp.convs[1].__class__.__name__ == 'ConvModule'
for conv_idx in range(1, 4):
assert aspp.convs[conv_idx].conv.dilation[0] == 6 * conv_idx
x = torch.rand(2, 128, 8, 8)
output = aspp(x)
assert output.shape == (2, 512, 8, 8)
# test aspp with separable conv
aspp = ASPP(128, separable_conv=True)
assert aspp.convs[1].__class__.__name__ == 'DepthwiseSeparableConvModule'
x = torch.rand(2, 128, 8, 8)
output = aspp(x)
assert output.shape == (2, 256, 8, 8)
# test aspp with ReLU6
aspp = ASPP(128, dilations=(12, 24, 36), act_cfg=dict(type='ReLU6'))
assert aspp.convs[0].activate.__class__.__name__ == 'ReLU6'
for conv_idx in range(1, 4):
assert aspp.convs[conv_idx].conv.dilation[0] == 12 * conv_idx
x = torch.rand(2, 128, 8, 8)
output = aspp(x)
assert output.shape == (2, 256, 8, 8)
def test_gca_module():
img_feat = torch.rand(1, 128, 64, 64)
alpha_feat = torch.rand(1, 128, 64, 64)
unknown = None
gca = GCAModule(128, 128, rate=1)
output = gca(img_feat, alpha_feat, unknown)
assert output.shape == (1, 128, 64, 64)
img_feat = torch.rand(1, 128, 64, 64)
alpha_feat = torch.rand(1, 128, 64, 64)
unknown = torch.rand(1, 1, 64, 64)
gca = GCAModule(128, 128, rate=2)
output = gca(img_feat, alpha_feat, unknown)
assert output.shape == (1, 128, 64, 64)
def test_gated_conv():
conv = SimpleGatedConvModule(3, 10, 3, padding=1)
x = torch.rand((2, 3, 10, 10))
assert not conv.conv.with_activation
assert conv.with_feat_act
assert conv.with_gate_act
assert isinstance(conv.feat_act, nn.ELU)
assert isinstance(conv.gate_act, nn.Sigmoid)
assert conv.conv.out_channels == 20
out = conv(x)
assert out.shape == (2, 10, 10, 10)
conv = SimpleGatedConvModule(
3, 10, 3, padding=1, feat_act_cfg=None, gate_act_cfg=None)
assert not conv.with_gate_act
out = conv(x)
assert out.shape == (2, 10, 10, 10)
with pytest.raises(AssertionError):
conv = SimpleGatedConvModule(
3, 1, 3, padding=1, order=('linear', 'act', 'norm'))
conv = SimpleGatedConvModule(3, out_channels=10, kernel_size=3, padding=1)
assert conv.conv.out_channels == 20
out = conv(x)
assert out.shape == (2, 10, 10, 10)
def test_linear_module():
linear = LinearModule(10, 20)
linear.init_weights()
x = torch.rand((3, 10))
assert linear.with_bias
assert not linear.with_spectral_norm
assert linear.out_features == 20
assert linear.in_features == 10
assert isinstance(linear.activate, nn.ReLU)
y = linear(x)
assert y.shape == (3, 20)
linear = LinearModule(10, 20, act_cfg=None, with_spectral_norm=True)
assert hasattr(linear.linear, 'weight_orig')
assert not linear.with_activation
y = linear(x)
assert y.shape == (3, 20)
linear = LinearModule(
10, 20, act_cfg=dict(type='LeakyReLU'), with_spectral_norm=True)
y = linear(x)
assert y.shape == (3, 20)
assert isinstance(linear.activate, nn.LeakyReLU)
linear = LinearModule(
10, 20, bias=False, act_cfg=None, with_spectral_norm=True)
y = linear(x)
assert y.shape == (3, 20)
assert not linear.with_bias
linear = LinearModule(
10,
20,
bias=False,
act_cfg=None,
with_spectral_norm=True,
order=('act', 'linear'))
assert linear.order == ('act', 'linear')
| 10,851 | 31.984802 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_common/test_img_normalize.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmedit.models.common import ImgNormalize
def test_normalize_layer():
rgb_mean = (1, 2, 3)
rgb_std = (1, 0.5, 0.25)
layer = ImgNormalize(1, rgb_mean, rgb_std)
x = torch.randn((2, 3, 64, 64))
y = layer(x)
x = x.permute((1, 0, 2, 3)).reshape((3, -1))
y = y.permute((1, 0, 2, 3)).reshape((3, -1))
rgb_mean = torch.tensor(rgb_mean)
rgb_std = torch.tensor(rgb_std)
mean_x = x.mean(dim=1)
mean_y = y.mean(dim=1)
std_x = x.std(dim=1)
std_y = y.std(dim=1)
assert sum(torch.div(std_x, std_y) - rgb_std) < 1e-5
assert sum(torch.div(mean_x - rgb_mean, rgb_std) - mean_y) < 1e-5
| 695 | 29.26087 | 69 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_common/test_sampling.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmedit.models.common import PixelShufflePack, pixel_unshuffle
def test_pixel_shuffle():
# test on cpu
model = PixelShufflePack(3, 3, 2, 3)
model.init_weights()
x = torch.rand(1, 3, 16, 16)
y = model(x)
assert y.shape == (1, 3, 32, 32)
# test on gpu
if torch.cuda.is_available():
model = model.cuda()
x = x.cuda()
y = model(x)
assert y.shape == (1, 3, 32, 32)
def test_pixel_unshuffle():
# test on cpu
x = torch.rand(1, 3, 20, 20)
y = pixel_unshuffle(x, scale=2)
assert y.shape == (1, 12, 10, 10)
with pytest.raises(AssertionError):
y = pixel_unshuffle(x, scale=3)
# test on gpu
if torch.cuda.is_available():
x = x.cuda()
y = pixel_unshuffle(x, scale=2)
assert y.shape == (1, 12, 10, 10)
with pytest.raises(AssertionError):
y = pixel_unshuffle(x, scale=3)
| 988 | 23.121951 | 66 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_common/test_flow_warp.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmedit.models import flow_warp
def tensor_shift(x, shift=(1, 1), fill_val=0):
"""Shift tensor for testing flow_warp.
Args:
x (Tensor): the input tensor. The shape is (b, c, h, w].
shift (tuple): shift pixel.
fill_val (float): fill value.
Returns:
Tensor: the shifted tensor.
"""
_, _, h, w = x.size()
shift_h, shift_w = shift
new = torch.ones_like(x) * fill_val
len_h = h - shift_h
len_w = w - shift_w
new[:, :, shift_h:shift_h + len_h,
shift_w:shift_w + len_w] = x.narrow(2, 0, len_h).narrow(3, 0, len_w)
return new
def test_flow_warp():
x = torch.rand(1, 3, 10, 10)
flow = torch.rand(1, 4, 4, 2)
with pytest.raises(ValueError):
# The spatial sizes of input and flow are not the same.
flow_warp(x, flow)
# cpu
x = torch.rand(1, 3, 10, 10)
flow = -torch.ones(1, 10, 10, 2)
result = flow_warp(x, flow)
assert result.size() == (1, 3, 10, 10)
error = torch.sum(torch.abs(result - tensor_shift(x, (1, 1))))
assert error < 1e-5
# gpu
if torch.cuda.is_available():
x = torch.rand(1, 3, 10, 10).cuda()
flow = -torch.ones(1, 10, 10, 2).cuda()
result = flow_warp(x, flow)
assert result.size() == (1, 3, 10, 10)
error = torch.sum(torch.abs(result - tensor_shift(x, (1, 1))))
assert error < 1e-5
| 1,471 | 26.773585 | 76 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_common/test_model_utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import numpy as np
import pytest
import torch
import torch.nn as nn
from mmedit.models.common import (GANImageBuffer, extract_around_bbox,
extract_bbox_patch, generation_init_weights,
set_requires_grad)
def test_set_requires_grad():
model = torch.nn.Conv2d(1, 3, 1, 1)
set_requires_grad(model, False)
for param in model.parameters():
assert not param.requires_grad
def test_gan_image_buffer():
# test buffer size = 0
buffer = GANImageBuffer(buffer_size=0)
img_np = np.random.randn(1, 3, 256, 256)
img_tensor = torch.from_numpy(img_np)
img_tensor_return = buffer.query(img_tensor)
assert torch.equal(img_tensor_return, img_tensor)
# test buffer size > 0
buffer = GANImageBuffer(buffer_size=1)
img_np = np.random.randn(2, 3, 256, 256)
img_tensor = torch.from_numpy(img_np)
img_tensor_0 = torch.unsqueeze(img_tensor[0], 0)
img_tensor_1 = torch.unsqueeze(img_tensor[1], 0)
img_tensor_00 = torch.cat([img_tensor_0, img_tensor_0], 0)
img_tensor_return = buffer.query(img_tensor)
assert (torch.equal(img_tensor_return, img_tensor)
and torch.equal(buffer.image_buffer[0], img_tensor_0)) or \
(torch.equal(img_tensor_return, img_tensor_00)
and torch.equal(buffer.image_buffer[0], img_tensor_1))
# test buffer size > 0, specify buffer chance
buffer = GANImageBuffer(buffer_size=1, buffer_ratio=0.3)
img_np = np.random.randn(2, 3, 256, 256)
img_tensor = torch.from_numpy(img_np)
img_tensor_0 = torch.unsqueeze(img_tensor[0], 0)
img_tensor_1 = torch.unsqueeze(img_tensor[1], 0)
img_tensor_00 = torch.cat([img_tensor_0, img_tensor_0], 0)
img_tensor_return = buffer.query(img_tensor)
assert (torch.equal(img_tensor_return, img_tensor)
and torch.equal(buffer.image_buffer[0], img_tensor_0)) or \
(torch.equal(img_tensor_return, img_tensor_00)
and torch.equal(buffer.image_buffer[0], img_tensor_1))
def test_generation_init_weights():
# Conv
module = nn.Conv2d(3, 3, 1)
module_tmp = copy.deepcopy(module)
generation_init_weights(module, init_type='normal', init_gain=0.02)
generation_init_weights(module, init_type='xavier', init_gain=0.02)
generation_init_weights(module, init_type='kaiming')
generation_init_weights(module, init_type='orthogonal', init_gain=0.02)
with pytest.raises(NotImplementedError):
generation_init_weights(module, init_type='abc')
assert not torch.equal(module.weight.data, module_tmp.weight.data)
# Linear
module = nn.Linear(3, 1)
module_tmp = copy.deepcopy(module)
generation_init_weights(module, init_type='normal', init_gain=0.02)
generation_init_weights(module, init_type='xavier', init_gain=0.02)
generation_init_weights(module, init_type='kaiming')
generation_init_weights(module, init_type='orthogonal', init_gain=0.02)
with pytest.raises(NotImplementedError):
generation_init_weights(module, init_type='abc')
assert not torch.equal(module.weight.data, module_tmp.weight.data)
# BatchNorm2d
module = nn.BatchNorm2d(3)
module_tmp = copy.deepcopy(module)
generation_init_weights(module, init_type='normal', init_gain=0.02)
assert not torch.equal(module.weight.data, module_tmp.weight.data)
def test_extract_bbox_patch():
img_np = np.random.randn(100, 100, 3)
bbox = np.asarray([10, 10, 10, 10])
img_patch = extract_bbox_patch(bbox, img_np, channel_first=False)
assert np.array_equal(img_patch, img_np[10:20, 10:20, ...])
img_np = np.random.randn(1, 3, 100, 100)
bbox = np.asarray([[10, 10, 10, 10]])
img_patch = extract_bbox_patch(bbox, img_np)
assert np.array_equal(img_patch, img_np[..., 10:20, 10:20])
img_tensor = torch.from_numpy(img_np)
bbox = np.asarray([[10, 10, 10, 10]])
img_patch = extract_bbox_patch(bbox, img_tensor)
assert np.array_equal(img_patch.numpy(), img_np[..., 10:20, 10:20])
with pytest.raises(AssertionError):
img_np = np.random.randn(100, 100)
bbox = np.asarray([[10, 10, 10, 10]])
img_patch = extract_bbox_patch(bbox, img_np)
with pytest.raises(AssertionError):
img_np = np.random.randn(2, 3, 100, 100)
bbox = np.asarray([[10, 10, 10, 10]])
img_patch = extract_bbox_patch(bbox, img_np)
with pytest.raises(AssertionError):
img_np = np.random.randn(3, 100, 100)
bbox = np.asarray([[10, 10, 10, 10]])
img_patch = extract_bbox_patch(bbox, img_np)
def test_extract_around_bbox():
with pytest.raises(AssertionError):
img_np = np.random.randn(100, 100, 3)
bbox = np.asarray([10, 10, 10, 10])
extract_around_bbox(img_np, bbox, (4, 4))
with pytest.raises(TypeError):
bbox = dict(test='fail')
img_np = np.random.randn(100, 100, 3)
extract_around_bbox(img_np, bbox, (15, 15))
img_np = np.random.randn(100, 100, 3)
bbox = np.asarray([10, 10, 10, 10])
img_new, bbox_new = extract_around_bbox(
img_np, bbox, (14, 14), channel_first=False)
assert np.array_equal(img_np[8:22, 8:22, ...], img_new)
assert np.array_equal(bbox_new, np.asarray([8, 8, 14, 14]))
img_np = np.random.randn(1, 3, 100, 100)
bbox = np.asarray([[10, 10, 10, 10]])
img_tensor = torch.from_numpy(img_np)
bbox_tensor = torch.from_numpy(bbox)
img_new, bbox_new = extract_around_bbox(
img_tensor, bbox_tensor, target_size=[14, 14])
assert np.array_equal(img_np[..., 8:22, 8:22], img_new.numpy())
assert np.array_equal(bbox_new.numpy(), np.asarray([[8, 8, 14, 14]]))
| 5,756 | 38.979167 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_common/test_ensemble.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
import torch.nn as nn
from mmedit.models.common import SpatialTemporalEnsemble
def test_ensemble_cpu():
model = nn.Identity()
# spatial ensemble of an image
ensemble = SpatialTemporalEnsemble(is_temporal_ensemble=False)
inputs = torch.rand(1, 3, 4, 4)
outputs = ensemble(inputs, model)
np.testing.assert_almost_equal(inputs.numpy(), outputs.numpy())
# spatial ensemble of a sequence
ensemble = SpatialTemporalEnsemble(is_temporal_ensemble=False)
inputs = torch.rand(1, 2, 3, 4, 4)
outputs = ensemble(inputs, model)
np.testing.assert_almost_equal(inputs.numpy(), outputs.numpy())
# spatial and temporal ensemble of a sequence
ensemble = SpatialTemporalEnsemble(is_temporal_ensemble=True)
inputs = torch.rand(1, 2, 3, 4, 4)
outputs = ensemble(inputs, model)
np.testing.assert_almost_equal(inputs.numpy(), outputs.numpy())
# spatial and temporal ensemble of an image
with pytest.raises(ValueError):
ensemble = SpatialTemporalEnsemble(is_temporal_ensemble=True)
inputs = torch.rand(1, 3, 4, 4)
outputs = ensemble(inputs, model)
def test_ensemble_cuda():
if torch.cuda.is_available():
model = nn.Identity().cuda()
# spatial ensemble of an image
ensemble = SpatialTemporalEnsemble(is_temporal_ensemble=False)
inputs = torch.rand(1, 3, 4, 4).cuda()
outputs = ensemble(inputs, model)
np.testing.assert_almost_equal(inputs.cpu().numpy(),
outputs.cpu().numpy())
# spatial ensemble of a sequence
ensemble = SpatialTemporalEnsemble(is_temporal_ensemble=False)
inputs = torch.rand(1, 2, 3, 4, 4).cuda()
outputs = ensemble(inputs, model)
np.testing.assert_almost_equal(inputs.cpu().numpy(),
outputs.cpu().numpy())
# spatial and temporal ensemble of a sequence
ensemble = SpatialTemporalEnsemble(is_temporal_ensemble=True)
inputs = torch.rand(1, 2, 3, 4, 4).cuda()
outputs = ensemble(inputs, model)
np.testing.assert_almost_equal(inputs.cpu().numpy(),
outputs.cpu().numpy())
# spatial and temporal ensemble of an image
with pytest.raises(ValueError):
ensemble = SpatialTemporalEnsemble(is_temporal_ensemble=True)
inputs = torch.rand(1, 3, 4, 4).cuda()
outputs = ensemble(inputs, model)
| 2,575 | 36.882353 | 73 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_transformer/test_search_transformer.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmedit.models.builder import build_component
def test_search_transformer():
model_cfg = dict(type='SearchTransformer')
model = build_component(model_cfg)
lr_pad_level3 = torch.randn((2, 32, 32, 32))
ref_pad_level3 = torch.randn((2, 32, 32, 32))
ref_level3 = torch.randn((2, 32, 32, 32))
ref_level2 = torch.randn((2, 16, 64, 64))
ref_level1 = torch.randn((2, 8, 128, 128))
s, textures = model(lr_pad_level3, ref_pad_level3,
(ref_level3, ref_level2, ref_level1))
t_level3, t_level2, t_level1 = textures
assert s.shape == (2, 1, 32, 32)
assert t_level3.shape == (2, 32, 32, 32)
assert t_level2.shape == (2, 16, 64, 64)
assert t_level1.shape == (2, 8, 128, 128)
| 806 | 31.28 | 61 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_components/test_stylegan2.py | # Copyright (c) OpenMMLab. All rights reserved.
from copy import deepcopy
import pytest
import torch
import torch.nn as nn
from mmedit.models.components.stylegan2.common import get_module_device
from mmedit.models.components.stylegan2.generator_discriminator import (
StyleGAN2Discriminator, StyleGANv2Generator)
from mmedit.models.components.stylegan2.modules import (Blur,
ModulatedStyleConv,
ModulatedToRGB)
class TestBlur:
@classmethod
def setup_class(cls):
cls.kernel = [1, 3, 3, 1]
cls.pad = (1, 1)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_blur_cuda(self):
blur = Blur(self.kernel, self.pad)
x = torch.randn((2, 3, 8, 8))
res = blur(x)
assert res.shape == (2, 3, 7, 7)
class TestModStyleConv:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(
in_channels=3,
out_channels=1,
kernel_size=3,
style_channels=5,
upsample=True)
def test_mod_styleconv_cpu(self):
conv = ModulatedStyleConv(**self.default_cfg)
input_x = torch.randn((2, 3, 4, 4))
input_style = torch.randn((2, 5))
res = conv(input_x, input_style)
assert res.shape == (2, 1, 8, 8)
_cfg = deepcopy(self.default_cfg)
_cfg['upsample'] = False
conv = ModulatedStyleConv(**_cfg)
input_x = torch.randn((2, 3, 4, 4))
input_style = torch.randn((2, 5))
res = conv(input_x, input_style)
assert res.shape == (2, 1, 4, 4)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_mod_styleconv_cuda(self):
conv = ModulatedStyleConv(**self.default_cfg).cuda()
input_x = torch.randn((2, 3, 4, 4)).cuda()
input_style = torch.randn((2, 5)).cuda()
res = conv(input_x, input_style)
assert res.shape == (2, 1, 8, 8)
_cfg = deepcopy(self.default_cfg)
_cfg['upsample'] = False
conv = ModulatedStyleConv(**_cfg).cuda()
input_x = torch.randn((2, 3, 4, 4)).cuda()
input_style = torch.randn((2, 5)).cuda()
res = conv(input_x, input_style)
assert res.shape == (2, 1, 4, 4)
class TestToRGB:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(in_channels=5, style_channels=5, out_channels=3)
def test_torgb_cpu(self):
model = ModulatedToRGB(**self.default_cfg)
input_x = torch.randn((2, 5, 4, 4))
style = torch.randn((2, 5))
res = model(input_x, style)
assert res.shape == (2, 3, 4, 4)
input_x = torch.randn((2, 5, 8, 8))
style = torch.randn((2, 5))
skip = torch.randn(2, 3, 4, 4)
res = model(input_x, style, skip)
assert res.shape == (2, 3, 8, 8)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_torgb_cuda(self):
model = ModulatedToRGB(**self.default_cfg).cuda()
input_x = torch.randn((2, 5, 4, 4)).cuda()
style = torch.randn((2, 5)).cuda()
res = model(input_x, style)
assert res.shape == (2, 3, 4, 4)
input_x = torch.randn((2, 5, 8, 8)).cuda()
style = torch.randn((2, 5)).cuda()
skip = torch.randn(2, 3, 4, 4).cuda()
res = model(input_x, style, skip)
assert res.shape == (2, 3, 8, 8)
class TestStyleGAN2Generator:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(
out_size=64, style_channels=16, num_mlps=4, channel_multiplier=1)
def test_stylegan2_g_cpu(self):
# test default config
g = StyleGANv2Generator(**self.default_cfg)
res = g(None, num_batches=2)
assert res.shape == (2, 3, 64, 64)
truncation_mean = g.get_mean_latent()
res = g(
None,
num_batches=2,
randomize_noise=False,
truncation=0.7,
truncation_latent=truncation_mean)
assert res.shape == (2, 3, 64, 64)
res = g.style_mixing(2, 2, truncation_latent=truncation_mean)
assert res.shape[2] == 64
random_noise = g.make_injected_noise()
res = g(
None,
num_batches=1,
injected_noise=random_noise,
randomize_noise=False)
assert res.shape == (1, 3, 64, 64)
random_noise = g.make_injected_noise()
res = g(
None, num_batches=1, injected_noise=None, randomize_noise=False)
assert res.shape == (1, 3, 64, 64)
styles = [torch.randn((1, 16)) for _ in range(2)]
res = g(
styles, num_batches=1, injected_noise=None, randomize_noise=False)
assert res.shape == (1, 3, 64, 64)
res = g(
torch.randn,
num_batches=1,
injected_noise=None,
randomize_noise=False)
assert res.shape == (1, 3, 64, 64)
g.eval()
assert g.default_style_mode == 'single'
g.train()
assert g.default_style_mode == 'mix'
with pytest.raises(AssertionError):
styles = [torch.randn((1, 6)) for _ in range(2)]
_ = g(styles, injected_noise=None, randomize_noise=False)
cfg_ = deepcopy(self.default_cfg)
cfg_['out_size'] = 256
g = StyleGANv2Generator(**cfg_)
res = g(None, num_batches=2)
assert res.shape == (2, 3, 256, 256)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_g_cuda(self):
# test default config
g = StyleGANv2Generator(**self.default_cfg).cuda()
res = g(None, num_batches=2)
assert res.shape == (2, 3, 64, 64)
random_noise = g.make_injected_noise()
res = g(
None,
num_batches=1,
injected_noise=random_noise,
randomize_noise=False)
assert res.shape == (1, 3, 64, 64)
random_noise = g.make_injected_noise()
res = g(
None, num_batches=1, injected_noise=None, randomize_noise=False)
assert res.shape == (1, 3, 64, 64)
styles = [torch.randn((1, 16)).cuda() for _ in range(2)]
res = g(
styles, num_batches=1, injected_noise=None, randomize_noise=False)
assert res.shape == (1, 3, 64, 64)
res = g(
torch.randn,
num_batches=1,
injected_noise=None,
randomize_noise=False)
assert res.shape == (1, 3, 64, 64)
g.eval()
assert g.default_style_mode == 'single'
g.train()
assert g.default_style_mode == 'mix'
with pytest.raises(AssertionError):
styles = [torch.randn((1, 6)).cuda() for _ in range(2)]
_ = g(styles, injected_noise=None, randomize_noise=False)
cfg_ = deepcopy(self.default_cfg)
cfg_['out_size'] = 256
g = StyleGANv2Generator(**cfg_).cuda()
res = g(None, num_batches=2)
assert res.shape == (2, 3, 256, 256)
class TestStyleGANv2Disc:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(in_size=64, channel_multiplier=1)
def test_stylegan2_disc_cpu(self):
d = StyleGAN2Discriminator(**self.default_cfg)
img = torch.randn((2, 3, 64, 64))
score = d(img)
assert score.shape == (2, 1)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_stylegan2_disc_cuda(self):
d = StyleGAN2Discriminator(**self.default_cfg).cuda()
img = torch.randn((2, 3, 64, 64)).cuda()
score = d(img)
assert score.shape == (2, 1)
def test_get_module_device_cpu():
device = get_module_device(nn.Conv2d(3, 3, 3, 1, 1))
assert device == torch.device('cpu')
# The input module should contain parameters.
with pytest.raises(ValueError):
get_module_device(nn.Flatten())
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_get_module_device_cuda():
module = nn.Conv2d(3, 3, 3, 1, 1).cuda()
device = get_module_device(module)
assert device == next(module.parameters()).get_device()
# The input module should contain parameters.
with pytest.raises(ValueError):
get_module_device(nn.Flatten().cuda())
| 8,432 | 30.466418 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_components/test_refiners/test_mlp_refiner.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmedit.models.builder import build_component
def test_mlp_refiner():
model_cfg = dict(
type='MLPRefiner', in_dim=8, out_dim=3, hidden_list=[8, 8, 8, 8])
mlp = build_component(model_cfg)
# test attributes
assert mlp.__class__.__name__ == 'MLPRefiner'
# prepare data
inputs = torch.rand(2, 8)
targets = torch.rand(2, 3)
if torch.cuda.is_available():
inputs = inputs.cuda()
targets = targets.cuda()
mlp = mlp.cuda()
data_batch = {'in': inputs, 'target': targets}
# prepare optimizer
criterion = nn.L1Loss()
optimizer = torch.optim.Adam(mlp.parameters(), lr=1e-4)
# test train_step
output = mlp.forward(data_batch['in'])
assert output.shape == data_batch['target'].shape
loss = criterion(output, data_batch['target'])
optimizer.zero_grad()
loss.backward()
optimizer.step()
| 971 | 26.771429 | 73 | py |
Subsets and Splits