text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import ctypes
libgcc_s = ctypes.CDLL('libgcc_s.so.1')
from collections import defaultdict
from concurrent.futures import as_completed, ProcessPoolExecutor
import logging
from src._execution import check_correctness, check_correctness_with_test_cases
logging.basicConfig(
format="SystemLog: [%(asctime)s][%(name)s][%(levelname)s] - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
def evaluate_with_test_code(
samples,
timeout
):
logger.info(f'Start evaluation with test code, timeout={timeout}')
# Check the generated samples against test suites.
with ProcessPoolExecutor() as executor:
futures = []
existed_completion = defaultdict(set)
results = defaultdict(defaultdict)
for sample in samples:
task_id = sample["task_id"]
prompt = sample['prompt']
test = sample['test']
entry_point = sample['entry_point']
completion = sample["completion"]
if completion in existed_completion[task_id]:
continue
existed_completion[task_id].add(completion)
args = (task_id, prompt, completion, test, entry_point, timeout)
future = executor.submit(check_correctness, *args)
futures.append(future)
logger.info(f'{len(futures)} execution requests are submitted')
for idx, future in enumerate(as_completed(futures)):
logger.info('[{}/{}] execution completed'.format(idx+1, len(futures)))
result = future.result()
results[result["task_id"]][result["completion"]] = result
logger.info('execution finished! start parsing results')
samples_with_result = []
for sample in samples:
task_id = sample["task_id"]
completion = sample["completion"]
result = results[task_id][completion]
sample["result"] = result["result"]
sample["passed"] = result["passed"]
samples_with_result.append(sample)
assert len(samples_with_result) == len(samples), "Some problems are not attempted."
return samples_with_result
def evaluate_with_test_cases(
solutions,
test_cases_dict,
timeout,
limit
):
logger.info(f'Start evaluation with test cases, timeout={timeout}, limit={limit}')
# Check the generated solutions against test suites.
with ProcessPoolExecutor() as executor:
futures = []
results_list = []
existed_completion = defaultdict(set)
for solution in solutions:
task_id = solution['task_id']
prompt = solution['prompt']
completion = solution['completion']
if completion in existed_completion[task_id]:
continue
existed_completion[task_id].add(completion)
task_test_cases = test_cases_dict[task_id]
if not task_test_cases:
continue
# get limited test cases
limited_task_test_cases = [cases_per_sample[:limit] for cases_per_sample in task_test_cases]
limited_task_test_cases = sum(limited_task_test_cases, [])
args = (task_id, prompt, completion, list(set(limited_task_test_cases)), timeout)
future = executor.submit(check_correctness_with_test_cases, *args)
futures.append(future)
logger.info(f'{len(futures)} execution requests are submitted')
for idx, future in enumerate(as_completed(futures)):
logger.info('[{}/{}] execution completed'.format(idx+1, len(futures)))
result = future.result()
results_list.append(result)
logger.info('execution finished!')
return results_list
|
CodeT/CodeT/src/execution.py/0
|
{
"file_path": "CodeT/CodeT/src/execution.py",
"repo_id": "CodeT",
"token_count": 1577
}
| 218 |
import absl # Here to have a nice missing dependency error message early on
import nltk # Here to have a nice missing dependency error message early on
import numpy # Here to have a nice missing dependency error message early on
import six # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
import pdb
import numpy as np
import scipy
from tqdm import tqdm
from utils import (
GSM8KCase,
GSM8KExample,
TextEntailmentCase,
TextEntailmentExample,
convert_eval_sequences_to_cases,
compute_results,
compute_results_avg,
)
case_class_map = {
"GSM8K": GSM8KCase,
"CLUTRR": TextEntailmentCase,
"strategyQA": TextEntailmentCase,
}
example_class_map = {
"GSM8K": GSM8KExample,
"CLUTRR": TextEntailmentExample,
"strategyQA": TextEntailmentExample,
}
_CITATION = ""
_DESCRIPTION = ""
_KWARGS_DESCRIPTION = ""
def simple_accuracy(preds, labels):
correct_case_num = 0
for pred, label in zip(preds, labels):
pred = pred.replace(" ", "")
label = label.replace(" ", "")
if pred == label:
correct_case_num += 1
return correct_case_num / len(preds)
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class VerifierMetrics(datasets.Metric):
def __init__(self, eval_sequences=None, pred_num_per_case=None, dataset_name=None, **kwargs,):
super().__init__(**kwargs)
self.pred_num_per_case = pred_num_per_case
self.cases = convert_eval_sequences_to_cases(
eval_sequences=eval_sequences,
pred_num_per_case=pred_num_per_case,
case_class=case_class_map[dataset_name],
example_class=example_class_map[dataset_name],
)
def assign_scores(self, predictions):
for i in range(0, len(predictions), self.pred_num_per_case + 1):
curr_case_index = i // (self.pred_num_per_case + 1)
self.cases[curr_case_index].ground_truth.verifier_score = predictions[i]
for j in range(0, self.pred_num_per_case):
self.cases[curr_case_index].preds[j].verifier_score = predictions[i+j+1]
def _compute(self, predictions=None, references=None):
self.assign_scores(predictions)
result = {}
result.update(compute_results_avg(self.cases, rand_k=100, repeat_time=10))
result.update(compute_results_avg(self.cases, rand_k=75, repeat_time=10))
result.update(compute_results_avg(self.cases, rand_k=50, repeat_time=10))
result.update(compute_results_avg(self.cases, rand_k=25, repeat_time=10))
result.update(compute_results_avg(self.cases, rand_k=20, repeat_time=10))
result.update(compute_results_avg(self.cases, rand_k=10, repeat_time=10))
result.update(compute_results_avg(self.cases, rand_k=5, repeat_time=10))
result.update(compute_results_avg(self.cases, rand_k=2, repeat_time=10))
return result
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Value("float32", id="scores"),
"references": datasets.Value("float32", id="scores"),
}
),
codebase_urls=[],
reference_urls=[],
)
def _metric_info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Value("string", id="sequence"),
"references": datasets.Value("string", id="sequence"),
}
),
codebase_urls=[],
reference_urls=[],
)
|
CodeT/DIVERSE/code/src/verifier_metrics.py/0
|
{
"file_path": "CodeT/DIVERSE/code/src/verifier_metrics.py",
"repo_id": "CodeT",
"token_count": 1806
}
| 219 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import tqdm
import itertools
from collections import defaultdict
from concurrent.futures import as_completed, ProcessPoolExecutor
from utils import Tools, FilePathBuilder, CONSTANTS
class BagOfWords:
def __init__(self, input_file):
self.input_file = input_file
def build(self):
print(f'building one gram vector for {self.input_file}')
futures = dict()
lines = Tools.load_pickle(self.input_file)
with ProcessPoolExecutor(max_workers=48) as executor:
for line in lines:
futures[executor.submit(Tools.tokenize, line['context'])] = line
new_lines = []
t = tqdm.tqdm(total=len(futures))
for future in as_completed(futures):
line = futures[future]
tokenized = future.result()
new_lines.append({
'context': line['context'],
'metadata': line['metadata'],
'data': [{'embedding': tokenized}]
})
tqdm.tqdm.update(t)
output_file_path = FilePathBuilder.one_gram_vector_path(self.input_file)
Tools.dump_pickle(new_lines, output_file_path)
class BuildVectorWrapper:
def __init__(self, benchmark, vector_builder, repos, window_sizes, slice_sizes):
self.repos = repos
self.window_sizes = window_sizes
self.slice_sizes = slice_sizes
self.vector_builder = vector_builder
self.benchmark = benchmark
def vectorize_repo_windows(self):
for window_size, slice_size in itertools.product(self.window_sizes, self.slice_sizes):
for repo in self.repos:
builder = self.vector_builder(
FilePathBuilder.repo_windows_path(repo, window_size, slice_size)
)
builder.build()
def vectorize_baseline_and_ground_windows(self):
for window_size in self.window_sizes:
for repo in self.repos:
builder = self.vector_builder(FilePathBuilder.search_first_window_path(self.benchmark, CONSTANTS.rg, repo, window_size))
builder.build()
builder = self.vector_builder(FilePathBuilder.search_first_window_path(self.benchmark, CONSTANTS.gt, repo, window_size))
builder.build()
def vectorize_prediction_windows(self, mode, prediction_path_template):
for window_size, slice_size in itertools.product(self.window_sizes, self.slice_sizes):
prediction_path = prediction_path_template.format(window_size=window_size, slice_size=slice_size)
for repo in self.repos:
window_path = FilePathBuilder.gen_first_window_path(
self.benchmark, mode, prediction_path, repo, window_size
)
builder = self.vector_builder(window_path)
builder.build()
class BuildEmbeddingVector:
'''
utilize external embedding model to generate embedding vector
'''
def __init__(self, repos, window_sizes, slice_sizes):
self.repos = repos
self.window_sizes = window_sizes
self.slice_sizes = slice_sizes
def build_input_file_for_repo_window(self, slice_size):
lines = []
for window_size in self.window_sizes:
for repo in self.repos:
file_path = FilePathBuilder.repo_windows_path(repo, window_size, slice_size)
loaded_lines = Tools.load_pickle(file_path)
for line in loaded_lines:
lines.append({
'context': line['context'],
'metadata': {
'window_file_path': file_path,
'original_metadata': line['metadata'],
},})
return lines
def build_input_file_search_first_window(self, mode, benchmark):
lines = []
for window_size in self.window_sizes:
for repo in self.repos:
file_path = FilePathBuilder.search_first_window_path(benchmark, mode, repo, window_size)
loaded_lines = Tools.load_pickle(file_path)
for line in loaded_lines:
lines.append({
'context': line['context'],
'metadata': {
'window_file_path': file_path,
'original_metadata': line['metadata']
}})
return lines
def build_input_file_for_gen_first_window(self, mode, benchmark, prediction_path):
lines = []
for window_size in self.window_sizes:
for repo in self.repos:
file_path = FilePathBuilder.gen_first_window_path(benchmark, mode, prediction_path, repo, window_size)
loaded_lines = Tools.load_pickle(file_path)
for line in loaded_lines:
lines.append({
'context': line['context'],
'metadata': {
'window_file_path': file_path,
'original_metadata': line['metadata']
}})
return lines
@staticmethod
def place_generated_embeddings(generated_embeddings):
vector_file_path_to_lines = defaultdict(list)
for line in generated_embeddings:
window_path = line['metadata']['window_file_path']
original_metadata = line['metadata']['original_metadata']
vector_file_path = FilePathBuilder.ada002_vector_path(window_path)
vector_file_path_to_lines[vector_file_path].append({
'context': line['context'],
'metadata': original_metadata,
'data': line['data']
})
for vector_file_path, lines in vector_file_path_to_lines.items():
Tools.dump_pickle(lines, vector_file_path)
|
CodeT/RepoCoder/build_vector.py/0
|
{
"file_path": "CodeT/RepoCoder/build_vector.py",
"repo_id": "CodeT",
"token_count": 2906
}
| 220 |
#!/bin/zsh
# This ZSH plugin reads the text from the current buffer
# and uses a Python script to complete the text.
create_completion() {
# Get the text typed until now.
text=${BUFFER}
completion=$(echo -n "$text" | $CODEX_CLI_PATH/src/codex_query.py)
# Add completion to the current buffer.
BUFFER="${text}${completion}"
# Put the cursor at the end of the line.
CURSOR=${#BUFFER}
}
# Bind the create_completion function to a key.
zle -N create_completion
setopt interactivecomments
|
Codex-CLI/scripts/zsh_plugin.zsh/0
|
{
"file_path": "Codex-CLI/scripts/zsh_plugin.zsh",
"repo_id": "Codex-CLI",
"token_count": 182
}
| 221 |
[MESSAGES CONTROL]
# Use Python 3 style print for both support 2 and 3.
disable=superfluous-parens
[SIMILARITIES]
# Minimum lines number of a similarity.
min-similarity-lines=8
|
Cognitive-Face-Python/.pylintrc/0
|
{
"file_path": "Cognitive-Face-Python/.pylintrc",
"repo_id": "Cognitive-Face-Python",
"token_count": 59
}
| 222 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: large_person_group_person.py
Description: Large Person Group Person section of the Cognitive Face API.
"""
from . import util
def create(large_person_group_id, name, user_data=None):
"""Create a new person in a specified large person group. A newly created
person have no registered face.
Args:
large_person_group_id: `large_person_group_id` of the target large
person group.
name: Name of the created person, maximum length is 128.
user_data: Optional user defined data for the person. Length should not
exceed 16KB.
Returns:
A new `person_id` created.
"""
url = 'largepersongroups/{}/persons'.format(large_person_group_id)
json = {
'name': name,
'userData': user_data,
}
return util.request('POST', url, json=json)
def delete(large_person_group_id, person_id):
"""Delete an existing person from a large person group. Persisted face
images of the person will also be deleted.
Args:
large_person_group_id: `large_person_group_id` of the target large
person group.
person_id: personId of the target person.
Returns:
An empty response body.
"""
url = 'largepersongroups/{}/persons/{}'.format(large_person_group_id,
person_id)
return util.request('DELETE', url)
def get(large_person_group_id, person_id):
"""Retrieve a person's information, including registered persisted faces,
`name` and `user_data`.
Args:
large_person_group_id: `large_person_group_id` of the target large
person group.
person_id: Specifying the target person.
Returns:
The person's information.
"""
url = 'largepersongroups/{}/persons/{}'.format(large_person_group_id,
person_id)
return util.request('GET', url)
def list(large_person_group_id, start=None, top=None):
"""List `top` persons in a large person group with `person_id` greater than
`start`, and retrieve person information (including `person_id`, `name`,
`user_data` and `persisted_face_ids` of registered faces of the person).
Args:
large_person_group_id: `large_person_group_id` of the target large
person group.
start: List persons from the least `person_id` greater than this.
top: The number of persons to list, rangeing in [1, 1000]. Default is
1000.
Returns:
An array of person information that belong to the large person group.
"""
url = 'largepersongroups/{}/persons'.format(large_person_group_id)
params = {
'start': start,
'top': top,
}
return util.request('GET', url, params=params)
def update(large_person_group_id, person_id, name=None, user_data=None):
"""Update `name` or `user_data` of a person.
Args:
large_person_group_id: `large_person_group_id` of the target large
person group.
person_id: `person_id` of the target person.
name: Name of the created person, maximum length is 128.
user_data: Optional user defined data for the person. Length should not
exceed 16KB.
Returns:
An empty response body.
"""
url = 'largepersongroups/{}/persons/{}'.format(large_person_group_id,
person_id)
json = {
'name': name,
'userData': user_data,
}
return util.request('PATCH', url, json=json)
|
Cognitive-Face-Python/cognitive_face/large_person_group_person.py/0
|
{
"file_path": "Cognitive-Face-Python/cognitive_face/large_person_group_person.py",
"repo_id": "Cognitive-Face-Python",
"token_count": 1484
}
| 223 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: util.py
Description: Shared utilities for the Python SDK of the Cognitive Face API.
"""
import os.path
import time
import requests
import cognitive_face as CF
DEFAULT_BASE_URL = os.environ['FACE_ENDPOINT']
TIME_SLEEP = 1
class CognitiveFaceException(Exception):
"""Custom Exception for the python SDK of the Cognitive Face API.
Attributes:
status_code: HTTP response status code.
code: error code.
msg: error message.
"""
def __init__(self, status_code, code, msg):
super(CognitiveFaceException, self).__init__()
self.status_code = status_code
self.code = code
self.msg = msg
def __str__(self):
return ('Error when calling Cognitive Face API:\n'
'\tstatus_code: {}\n'
'\tcode: {}\n'
'\tmessage: {}\n').format(self.status_code, self.code,
self.msg)
class Key(object):
"""Manage Subscription Key."""
@classmethod
def set(cls, key):
"""Set the Subscription Key."""
cls.key = key
@classmethod
def get(cls):
"""Get the Subscription Key."""
if not hasattr(cls, 'key'):
cls.key = None
return cls.key
class BaseUrl(object):
@classmethod
def set(cls, base_url):
if not base_url.endswith('/'):
base_url += '/'
cls.base_url = base_url
@classmethod
def get(cls):
if not hasattr(cls, 'base_url') or not cls.base_url:
cls.base_url = DEFAULT_BASE_URL
return cls.base_url
def request(method, url, data=None, json=None, headers=None, params=None):
# pylint: disable=too-many-arguments
"""Universal interface for request."""
# Make it possible to call only with short name (without BaseUrl).
if not url.startswith('https://'):
url = BaseUrl.get() + url
# Setup the headers with default Content-Type and Subscription Key.
headers = headers or {}
if 'Content-Type' not in headers:
headers['Content-Type'] = 'application/json'
headers['Ocp-Apim-Subscription-Key'] = Key.get()
response = requests.request(
method,
url,
params=params,
data=data,
json=json,
headers=headers)
# Handle result and raise custom exception when something wrong.
result = None
# `person_group.train` return 202 status code for success.
if response.status_code not in (200, 202):
try:
error_msg = response.json()['error']
except:
raise CognitiveFaceException(response.status_code,
response.status_code, response.text)
raise CognitiveFaceException(response.status_code,
error_msg.get('code'),
error_msg.get('message'))
# Prevent `response.json()` complains about empty response.
if response.text:
result = response.json()
else:
result = {}
return result
def parse_image(image):
"""Parse the image smartly and return metadata for request.
First check whether the image is a URL or a file path or a file-like object
and return corresponding metadata.
Args:
image: A URL or a file path or a file-like object represents an image.
Returns:
a three-item tuple consist of HTTP headers, binary data and json data
for POST.
"""
if hasattr(image, 'read'): # When image is a file-like object.
headers = {'Content-Type': 'application/octet-stream'}
data = image.read()
return headers, data, None
elif os.path.isfile(image): # When image is a file path.
headers = {'Content-Type': 'application/octet-stream'}
data = open(image, 'rb').read()
return headers, data, None
else: # Default treat it as a URL (string).
headers = {'Content-Type': 'application/json'}
json = {'url': image}
return headers, None, json
def wait_for_person_group_training(person_group_id):
"""Wait for the finish of person group training."""
idx = 1
while True:
res = CF.person_group.get_status(person_group_id)
if res['status'] in ('succeeded', 'failed'):
break
print('The training of Person Group {} is onging: #{}'.format(
person_group_id, idx))
time.sleep(2**idx)
idx += 1
def wait_for_large_face_list_training(large_face_list_id):
"""Wait for the finish of large face list training."""
idx = 1
while True:
res = CF.large_face_list.get_status(large_face_list_id)
if res['status'] in ('succeeded', 'failed'):
break
print('The training of Large Face List {} is onging: #{}'.format(
large_face_list_id, idx))
time.sleep(2**idx)
idx += 1
def wait_for_large_person_group_training(large_person_group_id):
"""Wait for the finish of large person group training."""
idx = 1
while True:
res = CF.large_person_group.get_status(large_person_group_id)
if res['status'] in ('succeeded', 'failed'):
break
print('The training of Large Person Group {} is onging: #{}'.format(
large_person_group_id, idx))
time.sleep(2**idx)
idx += 1
def clear_face_lists():
"""[Dangerous] Clear all the face lists and all related persisted data."""
face_lists = CF.face_list.lists()
time.sleep(TIME_SLEEP)
for face_list in face_lists:
face_list_id = face_list['faceListId']
CF.face_list.delete(face_list_id)
print('Deleting Face List {}'.format(face_list_id))
time.sleep(TIME_SLEEP)
def clear_person_groups():
"""[Dangerous] Clear all the person groups and all related persisted data.
"""
person_groups = CF.person_group.lists()
time.sleep(TIME_SLEEP)
for person_group in person_groups:
person_group_id = person_group['personGroupId']
CF.person_group.delete(person_group_id)
print('Deleting Person Group {}'.format(person_group_id))
time.sleep(TIME_SLEEP)
def clear_large_face_lists():
"""[Dangerous] Clear all the large face lists and all related persisted
data.
"""
large_face_lists = CF.large_face_list.list()
time.sleep(TIME_SLEEP)
for large_face_list in large_face_lists:
large_face_list_id = large_face_list['largeFaceListId']
CF.large_face_list.delete(large_face_list_id)
print('Deleting Large Face List {}'.format(large_face_list_id))
time.sleep(TIME_SLEEP)
def clear_large_person_groups():
"""[Dangerous] Clear all the large person groups and all related persisted
data.
"""
large_person_groups = CF.large_person_group.list()
time.sleep(TIME_SLEEP)
for large_person_group in large_person_groups:
large_person_group_id = large_person_group['largePersonGroupId']
CF.large_person_group.delete(large_person_group_id)
print('Deleting Large Person Group {}'.format(large_person_group_id))
time.sleep(TIME_SLEEP)
|
Cognitive-Face-Python/cognitive_face/util.py/0
|
{
"file_path": "Cognitive-Face-Python/cognitive_face/util.py",
"repo_id": "Cognitive-Face-Python",
"token_count": 3029
}
| 224 |
""" Official evaluation script for v1.1 of the SQuAD dataset.
Credit from: https://worksheets.codalab.org/rest/bundles/0xbcd57bee090b421c982906709c8c27e1/contents/blob/
"""
from __future__ import print_function
from collections import Counter
import string
import re
import argparse
import json
import sys
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return normalize_answer(prediction) == normalize_answer(ground_truth)
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article["paragraphs"]:
for qa in paragraph["qas"]:
total += 1
if qa["id"] not in predictions:
message = (
"Unanswered question " + qa["id"] + " will receive score 0."
)
print(message, file=sys.stderr)
continue
ground_truths = list(map(lambda x: x["text"], qa["answers"]))
prediction = predictions[qa["id"]]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths
)
f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {"exact_match": exact_match, "f1": f1}
def evaluate_func(human, predictions):
f1 = exact_match = total = 0
for uid, ground_truths in human.items():
total += 1
if uid not in predictions:
message = "Unanswered question " + uid + " will receive score 0."
print(message, file=sys.stderr)
continue
prediction = predictions[uid]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths
)
f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return str({"exact_match": exact_match, "f1": f1})
if __name__ == "__main__":
expected_version = "1.1"
parser = argparse.ArgumentParser(
description="Evaluation for SQuAD " + expected_version
)
parser.add_argument("dataset_file", help="Dataset file")
parser.add_argument("prediction_file", help="Prediction File")
args = parser.parse_args()
with open(args.dataset_file) as dataset_file:
dataset_json = json.load(dataset_file)
if dataset_json["version"] != expected_version:
print(
"Evaluation expects v-"
+ expected_version
+ ", but got dataset with v-"
+ dataset_json["version"],
file=sys.stderr,
)
dataset = dataset_json["data"]
with open(args.prediction_file) as prediction_file:
predictions = json.load(prediction_file)
print(json.dumps(evaluate(dataset, predictions)))
|
ContextualSP/adaptershare/data_utils/squad_eval.py/0
|
{
"file_path": "ContextualSP/adaptershare/data_utils/squad_eval.py",
"repo_id": "ContextualSP",
"token_count": 1832
}
| 225 |
# coding=utf-8
# Copyright (c) Microsoft. All rights reserved.
import yaml
from data_utils.vocab import Vocabulary
from data_utils.task_def import TaskType, DataFormat, EncoderModelType
from data_utils.metrics import Metric
from mt_dnn.loss import LossCriterion
class TaskDef(dict):
def __init__(
self,
label_vocab,
n_class,
data_type,
task_type,
metric_meta,
split_names,
enable_san,
dropout_p,
loss,
kd_loss,
adv_loss,
):
"""
:param label_vocab: map string label to numbers.
only valid for Classification task or ranking task.
For ranking task, better label should have large number
"""
super().__init__(
**{k: repr(v) for k, v in locals().items()}
) # ensure the class is JSON serializable
self.label_vocab = label_vocab
self.n_class = n_class
self.data_type = data_type
self.task_type = task_type
self.metric_meta = metric_meta
self.split_names = split_names
self.enable_san = enable_san
self.dropout_p = dropout_p
self.loss = loss
self.kd_loss = kd_loss
self.adv_loss = adv_loss
@classmethod
def from_dict(cls, dict_rep):
return cls(**dict_rep)
class TaskDefs:
def __init__(self, task_def_path):
self._task_def_dic = yaml.safe_load(open(task_def_path))
global_map = {}
n_class_map = {}
data_type_map = {}
task_type_map = {}
metric_meta_map = {}
split_names_map = {}
enable_san_map = {}
dropout_p_map = {}
loss_map = {}
kd_loss_map = {}
adv_loss_map = {}
for task, task_def in self._task_def_dic.items():
assert "_" not in task, (
"task name should not contain '_', current task name: %s" % task
)
n_class_map[task] = task_def["n_class"]
data_format = DataFormat[task_def["data_format"]]
data_type_map[task] = data_format
task_type_map[task] = TaskType[task_def["task_type"]]
metric_meta_map[task] = tuple(
Metric[metric_name] for metric_name in task_def["metric_meta"]
)
split_names_map[task] = task_def.get(
"split_names", ["train", "dev", "test"]
)
enable_san_map[task] = task_def["enable_san"]
if "labels" in task_def:
labels = task_def["labels"]
label_mapper = Vocabulary(True)
for label in labels:
label_mapper.add(label)
global_map[task] = label_mapper
if "dropout_p" in task_def:
dropout_p_map[task] = task_def["dropout_p"]
# loss map
if "loss" in task_def:
t_loss = task_def["loss"]
loss_crt = LossCriterion[t_loss]
loss_map[task] = loss_crt
else:
loss_map[task] = None
if "kd_loss" in task_def:
t_loss = task_def["kd_loss"]
loss_crt = LossCriterion[t_loss]
kd_loss_map[task] = loss_crt
else:
kd_loss_map[task] = None
if "adv_loss" in task_def:
t_loss = task_def["adv_loss"]
loss_crt = LossCriterion[t_loss]
adv_loss_map[task] = loss_crt
else:
adv_loss_map[task] = None
self._global_map = global_map
self._n_class_map = n_class_map
self._data_type_map = data_type_map
self._task_type_map = task_type_map
self._metric_meta_map = metric_meta_map
self._split_names_map = split_names_map
self._enable_san_map = enable_san_map
self._dropout_p_map = dropout_p_map
self._loss_map = loss_map
self._kd_loss_map = kd_loss_map
self._adv_loss_map = adv_loss_map
self._task_def_dic = {}
def get_task_names(self):
return list(self._task_type_map.keys())
def get_task_def(self, task_name):
if task_name not in self._task_def_dic:
assert task_name in self._task_type_map
self._task_def_dic[task_name] = TaskDef(
self._global_map.get(task_name, None),
self._n_class_map[task_name],
self._data_type_map[task_name],
self._task_type_map[task_name],
self._metric_meta_map[task_name],
self._split_names_map[task_name],
self._enable_san_map[task_name],
self._dropout_p_map.get(task_name, None),
self._loss_map[task_name],
self._kd_loss_map[task_name],
self._adv_loss_map[task_name],
)
return self._task_def_dic[task_name]
|
ContextualSP/adaptershare/experiments/exp_def.py/0
|
{
"file_path": "ContextualSP/adaptershare/experiments/exp_def.py",
"repo_id": "ContextualSP",
"token_count": 2605
}
| 226 |
#!/bin/bash
# Reuse of GLUE process script
# Copyright (c) Microsoft, Inc. and its affiliates.
#
# by Xiaodong Liu
# [email protected]
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
set -e
# This script is used to cook SuperGLUE data in FairSEQ format.
#
# offical data from SuperGLUE team is located: https://super.gluebenchmark.com/tasks
# ***Download***
# wget https://dl.fbaipublicfiles.com/glue/superglue/data/v2/combined.zip
# unzip combined.zip
# or sh download.sh in mt-dnn repo
if [[ $# -ne 4 ]]; then
echo "Run as following:"
echo "process.sh <glud_data_folder> <task_name> <dict_dir> <output>"
exit 1
fi
SUPERGLUE_DATA_FOLDER=$1
# e.g., BoolQ
TASKS=$2
DICT=$3
OUTPUT=$4
mkdir -p $OUTPUT
if [ "$TASKS" = "ALL" ]
then
TASKS="BoolQ MultiRC BC ReCoRD COPA WiC WSC"
INPUT_COUNT=2
fi
INPUT_COUNT=2
for TASK in $TASKS
do
echo "Preprocessing $TASK"
TASK_DATA_FOLDER="$SUPERGLUE_DATA_FOLDER/$TASK"
echo "Raw data as downloaded from glue website: $TASK_DATA_FOLDER"
SPLITS="train val test"
if [ "$TASK" = "MultiRC" ]
then
INPUT_COUNT=3
fi
if [ "$TASK" = "WiC" ]
then
INPUT_COUNT=3
fi
if [ "$TASK" = "ReCoRD" ]
then
INPUT_COUNT=3
fi
if [ "$TASK" = "COPA" ]
then
INPUT_COUNT=3
fi
# Strip out header and filter lines that don't have expected number of fields.
rm -rf "$TASK_DATA_FOLDER/processed" ||:
mkdir -p "$TASK_DATA_FOLDER/processed"
for SPLIT in $SPLITS
do
# CoLA train and dev doesn't have header.
cp "$TASK_DATA_FOLDER/$SPLIT.jsonl" "$TASK_DATA_FOLDER/processed/$SPLIT.jsonl";
done
# Split into input0, input1 and label
python superglue_fairseq.py --data_dir $TASK_DATA_FOLDER/processed --task $TASK
for SPLIT in $SPLITS
do
echo ${SPLIT}
echo $(seq 0 $((INPUT_COUNT-1)))
# BPE encode.
for INPUT_TYPE in $(seq 0 $((INPUT_COUNT-1)))
do
MYLANG="input$INPUT_TYPE"
echo "BPE encoding $SPLIT/$MYLANG"
## bpe for RoBERTa
python -m examples.roberta.multiprocessing_bpe_encoder \
--encoder-json encoder.json \
--vocab-bpe vocab.bpe \
--inputs "$TASK_DATA_FOLDER/processed/$SPLIT.raw.$MYLANG" \
--outputs "$TASK_DATA_FOLDER/processed/$SPLIT.$MYLANG" \
--workers 60 \
--keep-empty;
done
done
# Remove output directory.
rm -rf "$TASK-bin" ||:
DEVPREF="$TASK_DATA_FOLDER/processed/val.LANG"
TESTPREF="$TASK_DATA_FOLDER/processed/test.LANG"
# Run fairseq preprocessing:
for INPUT_TYPE in $(seq 0 $((INPUT_COUNT-1)))
do
MYLANG="input$INPUT_TYPE"
python ../../fairseq_cli/preprocess.py \
--only-source \
--trainpref "$TASK_DATA_FOLDER/processed/train.$MYLANG" \
--validpref "${DEVPREF//LANG/$MYLANG}" \
--testpref "${TESTPREF//LANG/$MYLANG}" \
--destdir "${OUTPUT}/$TASK-bin/$MYLANG" \
--workers 8 \
--srcdict $DICT/dict.txt;
done
# bin the data
python ../../fairseq_cli/preprocess.py \
--only-source \
--trainpref "$TASK_DATA_FOLDER/processed/train.label" \
--validpref "${DEVPREF//LANG/'label'}" \
--destdir "${OUTPUT}/$TASK-bin/label" \
--workers 8;
done
|
ContextualSP/adaptershare/experiments/superglue/superglue_process.sh/0
|
{
"file_path": "ContextualSP/adaptershare/experiments/superglue/superglue_process.sh",
"repo_id": "ContextualSP",
"token_count": 1422
}
| 227 |
# coding=utf-8
# Copyright (c) Microsoft. All rights reserved.
import copy
import imp
import sys, os
import torch
import tasks
import math
import logging
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import *
from data_utils.utils import AverageMeter
from mt_dnn.loss import LOSS_REGISTRY
from mt_dnn.matcher import SANBertNetwork
from mt_dnn.batcher import Collater
from mt_dnn.perturbation import SmartPerturbation
from mt_dnn.loss import *
from mt_dnn.optim import AdamaxW
from data_utils.task_def import TaskType, EncoderModelType
from experiments.exp_def import TaskDef
from data_utils.my_statics import DUMPY_STRING_FOR_EMPTY_ANS
from transformers.modeling_utils import unwrap_model
from transformers import PreTrainedModel
from transformers.trainer_pt_utils import get_parameter_names
logger = logging.getLogger(__name__)
def calculate_cosine_similarity(task_grads):
dot_prod = torch.mm(task_grads, task_grads.t())
norm = torch.norm(task_grads, p=2, dim=1).unsqueeze(0)
cos = dot_prod.div(torch.mm(norm.t(), norm))
return cos
class MTDNNModel(object):
def __init__(self,
opt,
device=None,
state_dict=None,
num_train_step=-1,
adapter=False,
adapter_args=None,
task_name='adapter',
id_task_map=None,
heldout_eval_dataset=None):
self.config = opt
self.updates = (
state_dict["updates"] if state_dict and "updates" in state_dict else 0
)
self.id_task_map = id_task_map
self.heldout_eval_dataset = heldout_eval_dataset
self.adapter = adapter
self.adapter_cache_path = adapter_args.adapter_cache_path
self.min_intra_simiarity = adapter_args.min_intra_simiarity
self.max_entropy_threshold = adapter_args.max_entropy_threshold
self.max_interference_degree = adapter_args.max_interference_degree
self.train_adapter_fusion = False
self.entropy_validate = False
self.local_updates = 0
self.device = device
self.train_loss = AverageMeter()
self.adv_loss = AverageMeter()
self.emb_val = AverageMeter()
self.eff_perturb = AverageMeter()
self.initial_from_local = True if state_dict else False
model = SANBertNetwork(opt, initial_from_local=self.initial_from_local, adapter_args=adapter_args, adapter=adapter, task_name=task_name)
self.diff_task_names = task_name.split('-')
self.current_task = self.diff_task_names[0]
if adapter_args.adapter_diff:
self.laryerwise_candidate_adapter = dict()
self.current_active_adapters = dict()
for i in range(len(model.bert.encoder.layer)):
self.laryerwise_candidate_adapter[f'L{str(i)}'] = \
dict([(name, f'{task_name}-L{str(i)}') for name in self.diff_task_names])
self.current_active_adapters[f'L{str(i)}'] = f'{task_name}-L{str(i)}'
# train adapter fusion with adapter differentiation
if self.train_adapter_fusion:
fusion_active = False
self.laryerwise_fusion_adapters = dict()
for i in range(model.bert.encoder.layer):
self.laryerwise_fusion_adapters[f'L{str(i)}'] = [fusion_active, f'Fusion-L{str(i)}']
self.total_param = sum(
[p.nelement() for p in model.parameters() if p.requires_grad]
)
if opt["cuda"]:
if self.config["local_rank"] != -1:
model = model.to(self.device)
else:
model = model.to(self.device)
self.network = model
if state_dict:
missing_keys, unexpected_keys = self.network.load_state_dict(
state_dict["state"], strict=False
)
optimizer_parameters = self._get_param_groups()
self._setup_optim(optimizer_parameters, state_dict, num_train_step)
self.optimizer.zero_grad()
# if self.config["local_rank"] not in [-1, 0]:
# torch.distributed.barrier()
if self.config["local_rank"] != -1:
self.mnetwork = torch.nn.parallel.DistributedDataParallel(
self.network,
device_ids=[self.config["local_rank"]],
output_device=self.config["local_rank"],
find_unused_parameters=True,
)
elif self.config["multi_gpu_on"]:
self.mnetwork = nn.DataParallel(self.network)
else:
self.mnetwork = self.network
self._setup_lossmap(self.config)
self._setup_kd_lossmap(self.config)
self._setup_adv_lossmap(self.config)
self._setup_adv_training(self.config)
self._setup_tokenizer()
# Adapter Differentiation
def _switch_model_task_mode(self, target_task):
# Switch the model on the target task mode
self.current_task = target_task
if not self.train_adapter_fusion:
adapter_names = []
for layer, adapter_name in self.current_active_adapters.items():
if adapter_name.startswith(f'{target_task}-') or f'-{target_task}-' in adapter_name:
adapter_names.append(adapter_name)
continue
else:
if len(adapter_name) > 0:
self._deactivate_adapter_runtime(adapter_name)
target_adapter = self.laryerwise_candidate_adapter[layer][target_task]
adapter_names.append(target_adapter)
self._activate_adapter_runtime(target_adapter)
self.current_active_adapters[layer] = target_adapter
# print('Switch to', target_task, adapter_names)
self.mnetwork.bert.train_adapter(adapter_names)
else:
adapter_fusion_names = []
undiff_adapters = []
for layer, fusion_state in self.laryerwise_fusion_adapters.items():
if fusion_state[0]:
adapter_fusion_names.append(fusion_state[1].split(','))
else:
undiff_adapter = self.laryerwise_candidate_adapter[layer][self.current_task]
undiff_adapters.append(undiff_adapter)
if len(adapter_fusion_names) > 0:
self.mnetwork.bert.train_adapter_and_fusion(undiff_adapters, adapter_fusion_names, unfreeze_adapters=True, target_task=target_task)
else:
self.mnetwork.bert.train_adapter(undiff_adapters)
if torch.cuda.is_available():
self.mnetwork.bert = self.mnetwork.bert.to(self.device)
self.update_optimizer_params_groups()
def _extract_adapter_grads(self, heldout_eval_datasets):
# record all the adapter gradients on held-out evaluation data
heldout_evl_nums = len(heldout_eval_datasets)
exist_adapter_cell_grads = [dict() for _ in range(heldout_evl_nums)]
# TODO
heldout_dataloaders = heldout_eval_datasets
for current_task in self.diff_task_names:
self._switch_model_task_mode(current_task)
for hi, heldout_dataloader in enumerate(heldout_dataloaders):
self.optimizer.zero_grad()
for _, (batch_meta, batch_data) in enumerate(heldout_dataloader):
batch_meta, batch_data = Collater.patch_data(self.device, batch_meta, batch_data)
task_name = self.id_task_map[batch_meta["task_id"]]
if task_name != current_task:
continue
# Calculate the gradient of the given heldout evaluation data
loss = self.compute_loss(batch_meta, batch_data)
loss.backward()
if self.entropy_validate:
tmp_adapter_cell_grads = dict()
for name, param in self.mnetwork.bert.named_parameters():
if 'adapter' in name and (f'.{current_task}-' in name or f'-{current_task}-' in name) and ',' not in name and param.requires_grad:
layer = name.split('.')[5].split('-')[-1]
if layer not in tmp_adapter_cell_grads:
tmp_adapter_cell_grads[layer] = {}
if current_task not in tmp_adapter_cell_grads[layer]:
tmp_adapter_cell_grads[layer][current_task] = []
tmp_adapter_cell_grads[layer][current_task].append(param.grad.clone().detach().view(1, -1))
for layer, task_grads in tmp_adapter_cell_grads.items():
for task, task_grad in task_grads.items():
cat_grad = torch.cat(task_grad, dim=1)
if layer not in exist_adapter_cell_grads[hi]:
exist_adapter_cell_grads[hi][layer] = {}
if task not in exist_adapter_cell_grads[hi][layer]:
exist_adapter_cell_grads[hi][layer][task] = []
exist_adapter_cell_grads[hi][layer][task].append(cat_grad)
self.optimizer.zero_grad()
if not self.entropy_validate:
for name, param in self.mnetwork.bert.named_parameters():
if 'adapter' in name and (f'.{current_task}-' in name or f'-{current_task}-' in name) and ',' not in name and param.requires_grad:
layer = name.split('.')[5].split('-')[-1]
if layer not in exist_adapter_cell_grads[hi]:
exist_adapter_cell_grads[hi][layer] = {}
if current_task not in exist_adapter_cell_grads[hi][layer]:
exist_adapter_cell_grads[hi][layer][current_task] = []
exist_adapter_cell_grads[hi][layer][current_task].append(param.grad.clone().detach().view(1, -1))
return exist_adapter_cell_grads
def _find_differentiatable_cell(self, exist_adapter_cell_grads):
# find all the differentiatable cells according to the
# MAIN algorithem in the paper
# output: {'original_cell': List(differentiated cells)}
# update global active cells
def _calculate_interference_degree(task_grad_mapping):
shared_task_len = len(task_grad_mapping)
assert shared_task_len > 1
task_grads = torch.stack([g.view(-1,) for g in task_grad_mapping.values()])
cos = calculate_cosine_similarity(task_grads)
interference_degree = []
for i in range(shared_task_len-1):
# interference degree equals to nagetive cosine similarity
interference_degree.append(-cos[i, i+1:])
interference_degree = torch.cat(interference_degree)
return list(task_grad_mapping.keys()), interference_degree
# To alleviate over-differentiaiton problem
if not self.entropy_validate:
laryerwise_adapter_grad_mappings = []
for exist_adapter_cell_grad in exist_adapter_cell_grads:
laryerwise_adapter_grad_mapping = {}
for layer, task_grads in exist_adapter_cell_grad.items():
for task_name, task_grad in task_grads.items():
adapter_name = self.laryerwise_candidate_adapter[layer][task_name]
if adapter_name not in laryerwise_adapter_grad_mapping:
laryerwise_adapter_grad_mapping[adapter_name] = {}
task_grad = torch.cat(task_grad, dim=1)
laryerwise_adapter_grad_mapping[adapter_name][task_name] = task_grad
laryerwise_adapter_grad_mappings.append(laryerwise_adapter_grad_mapping)
if len(laryerwise_adapter_grad_mappings) == 1:
merge_laryerwise_adapter_grad_mapping = laryerwise_adapter_grad_mappings[0]
else:
assert len(laryerwise_adapter_grad_mappings) == 2
merge_laryerwise_adapter_grad_mapping = dict()
laryerwise_adapter_grad_mapping1 = laryerwise_adapter_grad_mappings[0]
laryerwise_adapter_grad_mapping2 = laryerwise_adapter_grad_mappings[1]
differentiable_adapters = []
for adapter_name, task_grad_mapping in laryerwise_adapter_grad_mapping1.items():
if len(task_grad_mapping) > 1:
diff_flag = True
for task, grad in task_grad_mapping.items():
aux_grad = laryerwise_adapter_grad_mapping2[adapter_name][task]
grad_ = grad.view(1,-1)
aux_grad_ = aux_grad.view(1,-1)
dot_prod = calculate_cosine_similarity(torch.stack([grad_.view(-1,), aux_grad_.view(-1,)]))
dot_prod = dot_prod[0][1]
print('>>>> dot_prod', dot_prod)
if dot_prod < math.cos(math.pi / self.min_intra_simiarity):
diff_flag = False
break
if diff_flag:
differentiable_adapters.append(adapter_name)
for adapter_name in differentiable_adapters:
merge_laryerwise_adapter_grad_mapping[adapter_name] = dict()
for task, grad in laryerwise_adapter_grad_mapping1[adapter_name].items():
aux_grad = laryerwise_adapter_grad_mapping2[adapter_name][task]
merge_laryerwise_adapter_grad_mapping[adapter_name][task] = grad + aux_grad
else:
merge_laryerwise_adapter_grad_mapping = dict()
laryerwise_adapter_grad_mappings = []
for exist_adapter_cell_grad in exist_adapter_cell_grads:
laryerwise_adapter_grad_mapping = {}
for layer, task_grads in exist_adapter_cell_grad.items():
for task_name, task_grad in task_grads.items():
adapter_name = self.laryerwise_candidate_adapter[layer][task_name]
if adapter_name not in laryerwise_adapter_grad_mapping:
laryerwise_adapter_grad_mapping[adapter_name] = {}
task_grad = torch.cat(task_grad, dim=0)
laryerwise_adapter_grad_mapping[adapter_name][task_name] = task_grad
laryerwise_adapter_grad_mappings.append(laryerwise_adapter_grad_mapping)
if len(laryerwise_adapter_grad_mappings) == 1:
laryerwise_adapter_grad_mapping = laryerwise_adapter_grad_mappings[0]
else:
assert len(laryerwise_adapter_grad_mappings) == 2
laryerwise_adapter_grad_mapping1 = laryerwise_adapter_grad_mappings[0]
laryerwise_adapter_grad_mapping2 = laryerwise_adapter_grad_mappings[1]
for adapter_name, task_grad_mapping in laryerwise_adapter_grad_mapping1.items():
if len(task_grad_mapping) > 1:
for task, grad in task_grad_mapping.items():
aux_grad = laryerwise_adapter_grad_mapping2[adapter_name][task]
laryerwise_adapter_grad_mapping1[adapter_name][task] = torch.cat([grad, aux_grad], dim=0)
laryerwise_adapter_grad_mapping = laryerwise_adapter_grad_mapping1
differentiable_adapters = []
for adapter_name, task_grads in laryerwise_adapter_grad_mapping.items():
diff_flag = True
for task, grad in task_grads.items():
ave_grad = torch.mean(grad, dim=0)
hd_size = grad.size(0)
positive_grad = 0
for hd_i in range(hd_size):
grad_ = grad[hd_i]
cos = calculate_cosine_similarity(torch.stack([grad_.view(-1,), ave_grad.view(-1,)]))
if cos[0][1] > math.cos(math.pi / self.max_entropy_threshold):
positive_grad += 1
positive_prob = positive_grad / hd_size
if positive_prob == 0 or positive_prob == 1:
task_entropy = 0
else:
task_entropy = -(positive_prob * math.log(positive_prob, 2) + (1-positive_prob) * math.log(1-positive_prob, 2))
if task_entropy > -(0.8 * math.log(0.8, 2) + 0.2 * math.log(0.2, 2)) or positive_prob < 0.5:
diff_flag = False
break
if diff_flag:
differentiable_adapters.append(adapter_name)
for adapter_name in differentiable_adapters:
merge_laryerwise_adapter_grad_mapping[adapter_name] = dict()
for task, grad in laryerwise_adapter_grad_mapping[adapter_name].items():
merge_laryerwise_adapter_grad_mapping[adapter_name][task] = torch.mean(grad, dim=0)
differentiated_cell_mapping = {}
for adapter_name, task_grad_mapping in merge_laryerwise_adapter_grad_mapping.items():
if len(task_grad_mapping) == 1:
continue
else:
tasks, interference_degrees = _calculate_interference_degree(task_grad_mapping)
max_interference_degree = torch.max(interference_degrees)
print('>>> max_interference_degree', max_interference_degree)
task_len = len(tasks)
if max_interference_degree > self.max_interference_degree:
if layer not in differentiated_cell_mapping:
differentiated_cell_mapping[adapter_name] = {}
# start to differentiate
flag = 0
group1 = []
group2 = []
task_distance = {}
for i in range(task_len-1):
for j in range(i+1, task_len):
if interference_degrees[flag] == max_interference_degree:
group1.append(i)
group2.append(j)
task_distance[(i,j)] = interference_degrees[flag]
flag += 1
for i in range(task_len):
if i in group1 or i in group2:
continue
distance_to_g1 = []
for j in group1:
a = i
b = j
if i == j:
continue
if i > j:
a,b = b,a
distance_to_g1.append(task_distance[(a,b)])
distance_to_g2 = []
for k in group2:
a = i
b = k
if i == k:
continue
if i > k:
a,b = b,a
distance_to_g2.append(task_distance[(a,b)])
distance_to_g1 = torch.stack(distance_to_g1).view(-1,)
distance_to_g2 = torch.stack(distance_to_g2).view(-1,)
if torch.max(distance_to_g1) < torch.max(distance_to_g2):
group1.append(i)
else:
group2.append(i)
group1 = [tasks[t] for t in group1]
group2 = [tasks[t] for t in group2]
differentiated_cell_mapping[adapter_name] = [group1, group2]
# print('>>> differentiated_cell_mapping', differentiated_cell_mapping)
return differentiated_cell_mapping
def _update_differentiated_model(self, differentiated_cells):
# add new differentiated cells in the model and load
# the corresponding params in the optimizer
for adapter_name, split_group in differentiated_cells.items():
layer = adapter_name.split('-')[-1]
adapter_group1 = '-'.join(split_group[0]) + f'-{layer}'
adapter_group2 = '-'.join(split_group[1]) + f'-{layer}'
if adapter_name == self.current_active_adapters[layer]:
self._deactivate_adapter_runtime(adapter_name)
self._copy_adapter_runtime(adapter_group1, adapter_name)
self._copy_adapter_runtime(adapter_group2, adapter_name)
self.current_active_adapters[layer] = ''
for task in split_group[0]:
self.laryerwise_candidate_adapter[layer][task] = adapter_group1
for task in split_group[1]:
self.laryerwise_candidate_adapter[layer][task] = adapter_group2
def _update_differentiated_fusion_model(self, differentiated_cells):
# add new differentiated cells in the model and load
# the corresponding params in the optimizer
processed_fusion_layer = []
for adapter_name, split_group in differentiated_cells.items():
layer = adapter_name.split('-')[-1]
adapter_group1 = '-'.join(split_group[0]) + f'-{layer}'
adapter_group2 = '-'.join(split_group[1]) + f'-{layer}'
layer_fusion_active = self.laryerwise_fusion_adapters[layer][0]
self._deactivate_adapter_runtime(adapter_name)
if layer_fusion_active and layer not in processed_fusion_layer:
self._deactivate_adapter_fusion_runtime(self.laryerwise_fusion_adapters[layer][1])
if layer not in processed_fusion_layer:
processed_fusion_layer.append(layer)
self._copy_adapter_runtime(adapter_group1, adapter_name)
self._copy_adapter_runtime(adapter_group2, adapter_name)
for task in split_group[0]:
self.laryerwise_candidate_adapter[layer][task] = adapter_group1
for task in split_group[1]:
self.laryerwise_candidate_adapter[layer][task] = adapter_group2
for layer in processed_fusion_layer:
layer_fusion_active = self.laryerwise_fusion_adapters[layer][0]
layer_adapters = list(set(list(self.laryerwise_candidate_adapter[layer].values())))
layer_fusion_name = ','.join(layer_adapters)
if not layer_fusion_active:
self._create_adapter_fusion_runtime(layer_fusion_name)
self.laryerwise_fusion_adapters[layer][0] = True
else:
self._copy_adapter_fusion_runtime(layer_fusion_name, self.laryerwise_fusion_adapters[layer][1])
self.laryerwise_fusion_adapters[layer][1] = layer_fusion_name
def _differentiate_operate(self):
exist_adapter_cell_grads = self._extract_adapter_grads(self.heldout_eval_dataset)
diff_cells = self._find_differentiatable_cell(exist_adapter_cell_grads)
print(diff_cells)
if not self.train_adapter_fusion:
self._update_differentiated_model(diff_cells)
else:
self._update_differentiated_fusion_model(diff_cells)
def _calculate_differentiated_rate(self):
initial_adapter_num = len(self.laryerwise_candidate_adapter)
current_adapter_names = []
for layer in self.laryerwise_candidate_adapter.keys():
for task_name in self.laryerwise_candidate_adapter[layer].keys():
current_adapter_names.append(self.laryerwise_candidate_adapter[layer][task_name])
current_adapter_num = len(list(set(current_adapter_names)))
return current_adapter_num / initial_adapter_num
def update_optimizer_params_groups(self):
decay_parameters = get_parameter_names(self.mnetwork.bert, [nn.LayerNorm])
decay_parameters = [name for name in decay_parameters if "bias" not in name]
add_decay_params = [p for n, p in self.mnetwork.bert.named_parameters() if n in decay_parameters and p.requires_grad and n not in self.candidate_params]
add_not_decay_params = [p for n, p in self.mnetwork.bert.named_parameters() if n not in decay_parameters and p.requires_grad and n not in self.candidate_params]
for n, p in self.mnetwork.bert.named_parameters():
if p.requires_grad and n not in self.candidate_params:
self.candidate_params.append(n)
optimizer_grouped_parameters = []
if len(add_decay_params) > 0:
optimizer_grouped_parameters.append(
{
"params": add_decay_params,
"weight_decay": self.config['weight_decay'],
}
)
if len(add_not_decay_params) > 0:
optimizer_grouped_parameters.append(
{
"params": add_not_decay_params,
"weight_decay": 0.0,
}
)
for param_group in optimizer_grouped_parameters:
self.optimizer.add_param_group(param_group)
def _deactivate_adapter_runtime(self, adapter_name):
save_path = os.path.join(self.adapter_cache_path, adapter_name)
if not os.path.exists(save_path):
os.mkdir(save_path)
self.mnetwork.bert.save_adapter(save_path, adapter_name)
self.mnetwork.bert.delete_adapter(adapter_name)
def _deactivate_adapter_fusion_runtime(self, adapter_fusion_name):
save_path = os.path.join(self.adapter_cache_path, adapter_fusion_name)
if not os.path.exists(save_path):
os.mkdir(save_path)
self.mnetwork.bert.save_adapter_fusion(save_path, adapter_fusion_name)
self.mnetwork.bert.delete_adapter_fusion(adapter_fusion_name)
def _activate_adapter_runtime(self, adapter_name):
save_path = os.path.join(self.adapter_cache_path, adapter_name)
if not os.path.exists(save_path):
os.mkdir(save_path)
self.mnetwork.bert.load_adapter(save_path, load_as=adapter_name, set_active=True)
def _copy_adapter_fusion_runtime(self, new_adapter_fusion_name, adapter_fusion_name):
save_path = os.path.join(self.adapter_cache_path, adapter_fusion_name)
assert os.path.exists(save_path)
self.mnetwork.bert.load_adapter_fusion(save_path, load_as=new_adapter_fusion_name, set_active=True)
def _create_adapter_fusion_runtime(self, adapter_fusion_name):
self.mnetwork.bert.add_adapter_fusion(adapter_fusion_name, set_active=True)
def _copy_adapter_runtime(self, target_adapter, source_adapter):
save_path = os.path.join(self.adapter_cache_path, source_adapter)
if not os.path.exists(save_path):
os.mkdir(save_path)
self.mnetwork.bert.load_adapter(save_path, load_as=target_adapter, set_active=True)
save_path = os.path.join(self.adapter_cache_path, target_adapter)
if not os.path.exists(save_path):
os.mkdir(save_path)
self.mnetwork.bert.save_adapter(save_path, target_adapter)
def _setup_adv_training(self, config):
self.adv_teacher = None
if config.get("adv_train", False):
self.adv_teacher = SmartPerturbation(
config["adv_epsilon"],
config["multi_gpu_on"],
config["adv_step_size"],
config["adv_noise_var"],
config["adv_p_norm"],
config["adv_k"],
config["fp16"],
config["encoder_type"],
loss_map=self.adv_task_loss_criterion,
norm_level=config["adv_norm_level"],
)
def _get_param_groups(self):
no_decay = ["bias", "gamma", "beta", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_parameters = [
{
"params": [
p
for n, p in self.network.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.01,
},
{
"params": [
p
for n, p in self.network.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
self.candidate_params = [n for n, _ in self.network.bert.named_parameters()]
return optimizer_parameters
def _setup_optim(self, optimizer_parameters, state_dict=None, num_train_step=-1):
if self.config['optimizer'] == 'sgd':
self.optimizer = optim.SGD(optimizer_parameters, self.config['learning_rate'],
weight_decay=self.config['weight_decay'])
elif self.config['optimizer'] == 'adamax':
self.optimizer = AdamaxW(optimizer_parameters,
lr=self.config['learning_rate'],
weight_decay=self.config['weight_decay'])
elif self.config['optimizer'] == 'adam':
self.optimizer = optim.AdamW(optimizer_parameters,
lr=self.config['learning_rate'],
weight_decay=self.config['weight_decay'])
else:
raise RuntimeError('Unsupported optimizer: %s' % self.config['optimizer'])
if state_dict and 'optimizer' in state_dict:
self.optimizer.load_state_dict(state_dict['optimizer'])
if state_dict and "optimizer" in state_dict:
self.optimizer.load_state_dict(state_dict["optimizer"])
# if self.config["fp16"]:
# try:
# from apex import amp
# global amp
# except ImportError:
# raise ImportError(
# "Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
# )
# model, optimizer = amp.initialize(
# self.network, self.optimizer, opt_level=self.config["fp16_opt_level"]
# )
# self.network = model
# self.optimizer = optimizer
# # set up scheduler
self.scheduler = None
scheduler_type = self.config['scheduler_type']
warmup_steps = self.config['warmup'] * num_train_step
if scheduler_type == 3:
from transformers import get_polynomial_decay_schedule_with_warmup
self.scheduler = get_polynomial_decay_schedule_with_warmup(
self.optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=num_train_step
)
if scheduler_type == 2:
from transformers import get_constant_schedule_with_warmup
self.scheduler = get_constant_schedule_with_warmup(
self.optimizer,
num_warmup_steps=warmup_steps
)
elif scheduler_type == 1:
from transformers import get_cosine_schedule_with_warmup
self.scheduler = get_cosine_schedule_with_warmup(
self.optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=num_train_step
)
else:
from transformers import get_linear_schedule_with_warmup
self.scheduler = get_linear_schedule_with_warmup(
self.optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=num_train_step
)
def _setup_lossmap(self, config):
task_def_list = config["task_def_list"]
self.task_loss_criterion = []
for idx, task_def in enumerate(task_def_list):
cs = task_def.loss
lc = LOSS_REGISTRY[cs](name="Loss func of task {}: {}".format(idx, cs))
self.task_loss_criterion.append(lc)
def _setup_kd_lossmap(self, config):
task_def_list = config["task_def_list"]
self.kd_task_loss_criterion = []
if config.get("mkd_opt", 0) > 0:
for idx, task_def in enumerate(task_def_list):
cs = task_def.kd_loss
assert cs is not None
lc = LOSS_REGISTRY[cs](
name="KD Loss func of task {}: {}".format(idx, cs)
)
self.kd_task_loss_criterion.append(lc)
def _setup_adv_lossmap(self, config):
task_def_list = config["task_def_list"]
self.adv_task_loss_criterion = []
if config.get("adv_train", False):
for idx, task_def in enumerate(task_def_list):
cs = task_def.adv_loss
assert cs is not None
lc = LOSS_REGISTRY[cs](
name="Adv Loss func of task {}: {}".format(idx, cs)
)
self.adv_task_loss_criterion.append(lc)
def _setup_tokenizer(self):
try:
from transformers import AutoTokenizer
self.tokenizer = AutoTokenizer.from_pretrained(
self.config["init_checkpoint"],
cache_dir=self.config["transformer_cache"],
)
except:
self.tokenizer = None
def _to_cuda(self, tensor):
if tensor is None:
return tensor
if isinstance(tensor, list) or isinstance(tensor, tuple):
# y = [e.cuda(non_blocking=True) for e in tensor]
y = [e.to(self.device) for e in tensor]
for e in y:
e.requires_grad = False
else:
# y = tensor.cuda(non_blocking=True)
y = tensor.to(self.device)
y.requires_grad = False
return y
def compute_loss(self, batch_meta, batch_data):
self.network.train()
y = batch_data[batch_meta["label"]]
y = self._to_cuda(y) if self.config["cuda"] else y
if batch_meta["task_def"]["task_type"] == TaskType.SeqenceGeneration:
seq_length = y.size(1)
y = y.view(-1)
task_id = batch_meta["task_id"]
inputs = batch_data[: batch_meta["input_len"]]
if len(inputs) == 3:
inputs.append(None)
inputs.append(None)
inputs.append(task_id)
if "y_token_id" in batch_meta:
inputs.append(batch_data[batch_meta["y_token_id"]])
weight = None
if self.config.get("weighted_on", False):
if self.config["cuda"]:
weight = batch_data[batch_meta["factor"]].cuda(non_blocking=True)
else:
weight = batch_data[batch_meta["factor"]]
# fw to get logits
logits = self.mnetwork(*inputs)
# compute loss
loss = 0
if self.task_loss_criterion[task_id] and (y is not None):
loss_criterion = self.task_loss_criterion[task_id]
if (
isinstance(loss_criterion, RankCeCriterion)
and batch_meta["pairwise_size"] > 1
):
# reshape the logits for ranking.
loss = self.task_loss_criterion[task_id](
logits,
y,
weight,
ignore_index=-1,
pairwise_size=batch_meta["pairwise_size"],
)
elif batch_meta["task_def"]["task_type"] == TaskType.SeqenceGeneration:
weight = (
(
1.0
/ torch.sum(
(y > -1).float().view(-1, seq_length), 1, keepdim=True
)
)
.repeat(1, seq_length)
.view(-1)
)
loss = self.task_loss_criterion[task_id](
logits, y, weight, ignore_index=-1
)
else:
loss = self.task_loss_criterion[task_id](
logits, y, weight, ignore_index=-1
)
# compute kd loss
if self.config.get("mkd_opt", 0) > 0 and ("soft_label" in batch_meta):
soft_labels = batch_meta["soft_label"]
soft_labels = (
self._to_cuda(soft_labels) if self.config["cuda"] else soft_labels
)
kd_lc = self.kd_task_loss_criterion[task_id]
kd_loss = (
kd_lc(logits, soft_labels, weight, ignore_index=-1) if kd_lc else 0
)
loss = loss + kd_loss
# adv training
if self.config.get("adv_train", False) and self.adv_teacher:
# task info
task_type = batch_meta["task_def"]["task_type"]
adv_inputs = (
[self.mnetwork, logits]
+ inputs
+ [task_type, batch_meta.get("pairwise_size", 1)]
)
adv_loss, emb_val, eff_perturb = self.adv_teacher.forward(*adv_inputs)
loss = loss + self.config["adv_alpha"] * adv_loss
batch_size = batch_data[batch_meta["token_id"]].size(0)
# rescale loss as dynamic batching
if self.config["bin_on"]:
loss = loss * (1.0 * batch_size / self.config["batch_size"])
if self.config["local_rank"] != -1:
# print('Rank ', self.config['local_rank'], ' loss ', loss)
copied_loss = copy.deepcopy(loss.data)
torch.distributed.all_reduce(copied_loss)
copied_loss = copied_loss / self.config["world_size"]
self.train_loss.update(copied_loss.item(), batch_size)
else:
self.train_loss.update(loss.item(), batch_size)
if self.config.get("adv_train", False) and self.adv_teacher:
if self.config["local_rank"] != -1:
copied_adv_loss = copy.deepcopy(adv_loss.data)
torch.distributed.all_reduce(copied_adv_loss)
copied_adv_loss = copied_adv_loss / self.config["world_size"]
self.adv_loss.update(copied_adv_loss.item(), batch_size)
copied_emb_val = copy.deepcopy(emb_val.data)
torch.distributed.all_reduce(copied_emb_val)
copied_emb_val = copied_emb_val / self.config["world_size"]
self.emb_val.update(copied_emb_val.item(), batch_size)
copied_eff_perturb = copy.deepcopy(eff_perturb.data)
torch.distributed.all_reduce(copied_eff_perturb)
copied_eff_perturb = copied_eff_perturb / self.config["world_size"]
self.eff_perturb.update(copied_eff_perturb.item(), batch_size)
else:
self.adv_loss.update(adv_loss.item(), batch_size)
self.emb_val.update(emb_val.item(), batch_size)
self.eff_perturb.update(eff_perturb.item(), batch_size)
# scale loss
loss = loss / self.config.get("grad_accumulation_step", 1)
return loss
def update(self, batch_meta, batch_data):
loss = self.compute_loss(batch_meta, batch_data)
# if self.config["fp16"]:
# with amp.scale_loss(loss, self.optimizer) as scaled_loss:
# scaled_loss.backward()
# else:
loss.backward()
self.local_updates += 1
if self.local_updates % self.config.get("grad_accumulation_step", 1) == 0:
if self.config["global_grad_clipping"] > 0:
# if self.config["fp16"]:
# torch.nn.utils.clip_grad_norm_(
# amp.master_params(self.optimizer),
# self.config["global_grad_clipping"],
# )
# else:
torch.nn.utils.clip_grad_norm_(
self.network.parameters(), self.config["global_grad_clipping"]
)
self.updates += 1
# reset number of the grad accumulation
self.optimizer.step()
self.optimizer.zero_grad()
if self.scheduler:
self.scheduler.step()
def encode(self, batch_meta, batch_data):
self.network.eval()
inputs = batch_data[:3]
sequence_output = self.network.encode(*inputs)[0]
return sequence_output
# TODO: similar as function extract, preserve since it is used by extractor.py
# will remove after migrating to transformers package
def extract(self, batch_meta, batch_data):
self.network.eval()
# 'token_id': 0; 'segment_id': 1; 'mask': 2
inputs = batch_data[:3]
all_encoder_layers, pooled_output = self.mnetwork.bert(*inputs)
return all_encoder_layers, pooled_output
def predict(self, batch_meta, batch_data):
self.network.eval()
task_id = batch_meta["task_id"]
task_def = TaskDef.from_dict(batch_meta["task_def"])
task_type = task_def.task_type
task_obj = tasks.get_task_obj(task_def)
inputs = batch_data[: batch_meta["input_len"]]
if len(inputs) == 3:
inputs.append(None)
inputs.append(None)
inputs.append(task_id)
if task_type == TaskType.SeqenceGeneration:
# y_idx, #3 -> gen
inputs.append(None)
inputs.append(3)
score = self.mnetwork(*inputs)
if task_obj is not None:
score, predict = task_obj.test_predict(score)
elif task_type == TaskType.Ranking:
score = score.contiguous().view(-1, batch_meta["pairwise_size"])
assert task_type == TaskType.Ranking
score = F.softmax(score, dim=1)
score = score.data.cpu()
score = score.numpy()
predict = np.zeros(score.shape, dtype=int)
positive = np.argmax(score, axis=1)
for idx, pos in enumerate(positive):
predict[idx, pos] = 1
predict = predict.reshape(-1).tolist()
score = score.reshape(-1).tolist()
return score, predict, batch_meta["true_label"]
elif task_type == TaskType.SeqenceLabeling:
mask = batch_data[batch_meta["mask"]]
score = score.contiguous()
score = score.data.cpu()
score = score.numpy()
predict = np.argmax(score, axis=1).reshape(mask.size()).tolist()
valied_lenght = mask.sum(1).tolist()
final_predict = []
for idx, p in enumerate(predict):
final_predict.append(p[: valied_lenght[idx]])
score = score.reshape(-1).tolist()
return score, final_predict, batch_meta["label"]
elif task_type == TaskType.Span or task_type == TaskType.SpanYN:
predictions = []
features = []
for idx, offset in enumerate(batch_meta["offset_mapping"]):
token_is_max_context = (
batch_meta["token_is_max_context"][idx]
if batch_meta.get("token_is_max_context", None)
else None
)
sample_id = batch_meta["uids"][idx]
if "label" in batch_meta:
feature = {
"offset_mapping": offset,
"token_is_max_context": token_is_max_context,
"uid": sample_id,
"context": batch_meta["context"][idx],
"answer": batch_meta["answer"][idx],
"label": batch_meta["label"][idx],
}
else:
feature = {
"offset_mapping": offset,
"token_is_max_context": token_is_max_context,
"uid": sample_id,
"context": batch_meta["context"][idx],
"answer": batch_meta["answer"][idx],
}
if "null_ans_index" in batch_meta:
feature["null_ans_index"] = batch_meta["null_ans_index"]
features.append(feature)
start, end = score
start = start.contiguous()
start = start.data.cpu()
start = start.numpy().tolist()
end = end.contiguous()
end = end.data.cpu()
end = end.numpy().tolist()
return (start, end), predictions, features
elif task_type == TaskType.SeqenceGeneration:
predicts = self.tokenizer.batch_decode(score, skip_special_tokens=True)
predictions = {}
golds = {}
for idx, predict in enumerate(predicts):
sample_id = batch_meta["uids"][idx]
answer = batch_meta["answer"][idx]
predict = predict.strip()
if predict == DUMPY_STRING_FOR_EMPTY_ANS:
predict = ""
predictions[sample_id] = predict
golds[sample_id] = answer
score = score.contiguous()
score = score.data.cpu()
score = score.numpy().tolist()
return score, predictions, golds
elif task_type == TaskType.ClozeChoice:
score = score.contiguous().view(-1)
score = score.data.cpu()
score = score.numpy()
copy_score = score.tolist()
answers = batch_meta["answer"]
choices = batch_meta["choice"]
chunks = batch_meta["pairwise_size"]
uids = batch_meta["uids"]
predictions = {}
golds = {}
for chunk in chunks:
uid = uids[0]
answer = eval(answers[0])
choice = eval(choices[0])
answers = answers[chunk:]
choices = choices[chunk:]
current_p = score[:chunk]
score = score[chunk:]
positive = np.argmax(current_p)
predict = choice[positive]
predictions[uid] = predict
golds[uid] = answer
return copy_score, predictions, golds
else:
raise ValueError("Unknown task_type: %s" % task_type)
return score, predict, batch_meta["label"]
def save(self, filename):
if isinstance(self.mnetwork, torch.nn.parallel.DistributedDataParallel):
model = self.mnetwork.module
else:
model = self.network
if not self.adapter:
# network_state = dict([(k, v.cpu()) for k, v in self.network.state_dict().items()])
network_state = dict([(k, v.cpu()) for k, v in model.state_dict().items()])
params = {
"state": network_state,
"optimizer": self.optimizer.state_dict(),
"config": self.config,
}
torch.save(params, filename)
logger.info("model saved to {}".format(filename))
else:
self.save_all_adapters('/'.join(filename.split('/')[:-1]))
network_state = dict([(k, v.cpu()) for k, v in model.state_dict().items() if 'bert' not in k])
params = {
"state": network_state,
"optimizer": self.optimizer.state_dict(),
"config": self.config,
}
torch.save(params, filename)
logger.info("model saved to {}".format(filename))
def load(self, checkpoint):
model_state_dict = torch.load(checkpoint)
if "state" in model_state_dict:
self.network.load_state_dict(model_state_dict["state"], strict=False)
if "optimizer" in model_state_dict:
self.optimizer.load_state_dict(model_state_dict["optimizer"])
if "config" in model_state_dict:
self.config.update(model_state_dict["config"])
if isinstance(self.mnetwork, torch.nn.parallel.DistributedDataParallel):
model = self.mnetwork.module
else:
model = self.network
if self.adapter:
self._load_adapters(model.bert, '/'.join(checkpoint.split('/')[:-1]))
def cuda(self):
self.network.cuda()
def _load_adapters(self, model, resume_from_checkpoint):
adapter_loaded = False
for file_name in os.listdir(resume_from_checkpoint):
if os.path.isdir(os.path.join(resume_from_checkpoint, file_name)):
if "," not in file_name and "adapter_config.json" in os.listdir(
os.path.join(resume_from_checkpoint, file_name)
):
model.load_adapter(os.path.join(os.path.join(resume_from_checkpoint, file_name)))
adapter_loaded = True
return adapter_loaded
def save_all_adapters(self, output_dir=None):
import json
# If we are executing this function, we are the process zero, so we don't check for that.
# output_dir = output_dir if output_dir is not None else self.output_dir
# os.makedirs(output_dir, exist_ok=True)
logger.info(f"Saving adapter checkpoint to {output_dir}")
if not self.train_adapter_fusion:
activate_adapters = []
for layer in self.laryerwise_candidate_adapter.keys():
for target_task in self.laryerwise_candidate_adapter[layer].keys():
activate_adapters.append(self.laryerwise_candidate_adapter[layer][target_task])
activate_adapters = list(set(activate_adapters))
current_activate_adapters = list(self.current_active_adapters.values())
for adapter in activate_adapters:
if adapter in current_activate_adapters:
self.mnetwork.bert.save_adapter(os.path.join(output_dir, adapter), adapter)
else:
adapter_path = f'{self.adapter_cache_path}/{adapter}'
os.system(f'cp -rf {adapter_path} {output_dir}')
else:
activate_adapter_fusions = []
for layer in self.laryerwise_candidate_adapter.keys():
layer_activate_adapters = list(set(list(self.laryerwise_candidate_adapter[layer].values())))
if len(layer_activate_adapters) == 1:
adapter = layer_activate_adapters[0]
self.mnetwork.bert.save_adapter(os.path.join(output_dir, adapter), adapter)
else:
assert self.laryerwise_fusion_adapters[layer][0]
adapter_fusion = self.laryerwise_fusion_adapters[layer][1]
# print('>>> ADAPTER FUSION', adapter_fusion)
activate_adapter_fusions.append(adapter_fusion)
self.mnetwork.bert.save_adapter_fusion(os.path.join(output_dir, adapter_fusion), adapter_fusion)
for adapter in adapter_fusion.split(','):
self.mnetwork.bert.save_adapter(os.path.join(output_dir, adapter), adapter)
json.dump(activate_adapter_fusions, open(os.path.join(output_dir, 'activate_adapter_fusions.json'), 'w'), indent=4)
json.dump(self.laryerwise_candidate_adapter, open(os.path.join(output_dir, 'adapter_structure.json'), 'w'), indent=4)
|
ContextualSP/adaptershare/mt_dnn/adapter_diff_model.py/0
|
{
"file_path": "ContextualSP/adaptershare/mt_dnn/adapter_diff_model.py",
"repo_id": "ContextualSP",
"token_count": 26955
}
| 228 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from data_utils.task_def import TaskType
from module.san import SANClassifier
TASK_REGISTRY = {}
TASK_CLASS_NAMES = set()
class MTDNNTask:
def __init__(self, task_def):
self._task_def = task_def
def input_parse_label(self, label: str):
raise NotImplementedError()
@staticmethod
def input_is_valid_sample(sample, max_len):
return len(sample['token_id']) <= max_len
@staticmethod
def train_prepare_label(labels):
raise NotImplementedError()
@staticmethod
def train_prepare_soft_label(softlabels):
raise NotImplementedError()
@staticmethod
def train_build_task_layer(decoder_opt, hidden_size, lab, opt, prefix, dropout):
if decoder_opt == 1:
out_proj = SANClassifier(hidden_size, hidden_size, lab, opt, prefix, dropout=dropout)
else:
out_proj = nn.Linear(hidden_size, lab)
return out_proj
# TODO redesign hypers
@staticmethod
def train_forward(sequence_output, pooled_output, premise_mask, hyp_mask, decoder_opt, dropout_layer, task_layer):
if decoder_opt == 1:
max_query = hyp_mask.size(1)
assert max_query > 0
assert premise_mask is not None
assert hyp_mask is not None
hyp_mem = sequence_output[:, :max_query, :]
logits = task_layer(sequence_output, hyp_mem, premise_mask, hyp_mask)
else:
pooled_output = dropout_layer(pooled_output)
logits = task_layer(pooled_output)
return logits
@staticmethod
def test_prepare_label(batch_info, labels):
batch_info['label'] = labels
@staticmethod
def test_predict(score):
raise NotImplementedError()
def register_task(name):
"""
@register_task('Classification')
class ClassificationTask(MTDNNTask):
(...)
.. note::
All Tasks must implement the :class:`~MTDNNTask`
interface.
Args:
name (str): the name of the task
"""
def register_task_cls(cls):
if name in TASK_REGISTRY:
raise ValueError('Cannot register duplicate task ({})'.format(name))
if not issubclass(cls, MTDNNTask):
raise ValueError('Task ({}: {}) must extend MTDNNTask'.format(name, cls.__name__))
if cls.__name__ in TASK_CLASS_NAMES:
raise ValueError('Cannot register task with duplicate class name ({})'.format(cls.__name__))
TASK_REGISTRY[name] = cls
TASK_CLASS_NAMES.add(cls.__name__)
return cls
return register_task_cls
def get_task_obj(task_def):
task_name = task_def.task_type.name
task_cls = TASK_REGISTRY.get(task_name, None)
if task_cls is None:
return None
return task_cls(task_def)
@register_task('Regression')
class RegressionTask(MTDNNTask):
def __init__(self, task_def):
super().__init__(task_def)
def input_parse_label(self, label: str):
return float(label)
@staticmethod
def train_prepare_label(labels):
return torch.FloatTensor(labels)
@staticmethod
def train_prepare_soft_label(softlabels):
return torch.FloatTensor(softlabels)
@staticmethod
def test_predict(score):
score = score.data.cpu()
score = score.numpy()
predict = np.argmax(score, axis=1).tolist()
score = score.reshape(-1).tolist()
return score, predict
@register_task('Classification')
class ClassificationTask(MTDNNTask):
def __init__(self, task_def):
super().__init__(task_def)
def input_parse_label(self, label: str):
label_dict = self._task_def.label_vocab
if label_dict is not None:
return label_dict[label]
else:
return int(label)
@staticmethod
def train_prepare_label(labels):
return torch.LongTensor(labels)
@staticmethod
def train_prepare_soft_label(softlabels):
return torch.FloatTensor(softlabels)
@staticmethod
def test_predict(score):
score = F.softmax(score, dim=1)
score = score.data.cpu()
score = score.numpy()
predict = np.argmax(score, axis=1).tolist()
score = score.reshape(-1).tolist()
return score, predict
# TODO
# Span/SpanYN/SeqenceLabeling/SeqenceGeneration
|
ContextualSP/adaptershare/tasks/__init__.py/0
|
{
"file_path": "ContextualSP/adaptershare/tasks/__init__.py",
"repo_id": "ContextualSP",
"token_count": 1963
}
| 229 |
#!/bin/sh
tmpfile=$(mktemp)
head -n 2 $1 > ${tmpfile}
cat ${tmpfile} > $1
rm -f ${tmpfile}
|
ContextualSP/adaptershare/tests/sample_data/input/my_head.sh/0
|
{
"file_path": "ContextualSP/adaptershare/tests/sample_data/input/my_head.sh",
"repo_id": "ContextualSP",
"token_count": 43
}
| 230 |
# coding=utf-8
# Copyright (c) Microsoft. All rights reserved.
import argparse
import json
import os
import random
from datetime import datetime
from pprint import pprint
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader, BatchSampler
from pretrained_models import *
# from tensorboardX import SummaryWriter
# from torch.utils.tensorboard import SummaryWriter
from experiments.exp_def import TaskDefs
from mt_dnn.inference import eval_model, extract_encoding
from data_utils.log_wrapper import create_logger
from data_utils.task_def import EncoderModelType
from data_utils.utils import set_environment
from mt_dnn.batcher import (
SingleTaskDataset,
MultiTaskDataset,
Collater,
MultiTaskBatchSampler,
DistMultiTaskBatchSampler,
DistSingleTaskBatchSampler,
)
from mt_dnn.batcher import DistTaskDataset
from mt_dnn.model import MTDNNModel
def model_config(parser):
parser.add_argument("--update_bert_opt", default=0, type=int)
parser.add_argument("--multi_gpu_on", action="store_true")
parser.add_argument(
"--mem_cum_type", type=str, default="simple", help="bilinear/simple/defualt"
)
parser.add_argument("--answer_num_turn", type=int, default=5)
parser.add_argument("--answer_mem_drop_p", type=float, default=0.1)
parser.add_argument("--answer_att_hidden_size", type=int, default=128)
parser.add_argument(
"--answer_att_type",
type=str,
default="bilinear",
help="bilinear/simple/defualt",
)
parser.add_argument(
"--answer_rnn_type", type=str, default="gru", help="rnn/gru/lstm"
)
parser.add_argument(
"--answer_sum_att_type",
type=str,
default="bilinear",
help="bilinear/simple/defualt",
)
parser.add_argument("--answer_merge_opt", type=int, default=1)
parser.add_argument("--answer_mem_type", type=int, default=1)
parser.add_argument("--max_answer_len", type=int, default=10)
parser.add_argument("--answer_dropout_p", type=float, default=0.1)
parser.add_argument("--answer_weight_norm_on", action="store_true")
parser.add_argument("--dump_state_on", action="store_true")
parser.add_argument("--answer_opt", type=int, default=1, help="0,1")
parser.add_argument(
"--pooler_actf", type=str, default="tanh", help="tanh/relu/gelu"
)
parser.add_argument("--mtl_opt", type=int, default=0)
parser.add_argument("--ratio", type=float, default=0)
parser.add_argument("--mix_opt", type=int, default=0)
parser.add_argument("--max_seq_len", type=int, default=512)
parser.add_argument("--init_ratio", type=float, default=1)
parser.add_argument("--encoder_type", type=int, default=EncoderModelType.BERT)
parser.add_argument("--num_hidden_layers", type=int, default=-1)
# BERT pre-training
parser.add_argument("--bert_model_type", type=str, default="bert-base-uncased")
parser.add_argument("--do_lower_case", action="store_true")
parser.add_argument("--masked_lm_prob", type=float, default=0.15)
parser.add_argument("--short_seq_prob", type=float, default=0.2)
parser.add_argument("--max_predictions_per_seq", type=int, default=128)
# bin samples
parser.add_argument("--bin_on", action="store_true")
parser.add_argument("--bin_size", type=int, default=64)
parser.add_argument("--bin_grow_ratio", type=int, default=0.5)
# dist training
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="For distributed training: local_rank",
)
parser.add_argument(
"--world_size", type=int, default=1, help="For distributed training: world size"
)
parser.add_argument("--master_addr", type=str, default="localhost")
parser.add_argument("--master_port", type=str, default="6600")
parser.add_argument("--backend", type=str, default="nccl")
return parser
def data_config(parser):
parser.add_argument(
"--log_file", default="mt-dnn-train.log", help="path for log file."
)
parser.add_argument("--tensorboard", action="store_true")
parser.add_argument("--tensorboard_logdir", default="tensorboard_logdir")
parser.add_argument(
"--init_checkpoint",
default="bert-base-uncased",
type=str,
)
parser.add_argument("--data_dir", default="data/canonical_data/bert_uncased_lower")
parser.add_argument("--data_sort_on", action="store_true")
parser.add_argument("--name", default="farmer")
parser.add_argument(
"--task_def", type=str, default="experiments/glue/glue_task_def.yml"
)
parser.add_argument("--train_datasets", default="mnli,cola,qnli")
parser.add_argument("--test_datasets", default="mnli_matched,mnli_mismatched")
parser.add_argument("--glue_format_on", action="store_true")
parser.add_argument(
"--mkd-opt",
type=int,
default=0,
help=">0 to turn on knowledge distillation, requires 'softlabel' column in input data",
)
parser.add_argument("--do_padding", action="store_true")
return parser
def train_config(parser):
parser.add_argument(
"--cuda",
type=bool,
default=torch.cuda.is_available(),
help="whether to use GPU acceleration.",
)
parser.add_argument("--log_per_updates", type=int, default=500)
parser.add_argument("--save_per_updates", type=int, default=10000)
parser.add_argument("--save_per_updates_on", action="store_true")
parser.add_argument("--epochs", type=int, default=5)
parser.add_argument("--batch_size", type=int, default=8)
parser.add_argument("--batch_size_eval", type=int, default=8)
parser.add_argument(
"--optimizer",
default="adamax",
help="supported optimizer: adamax, sgd, adadelta, adam",
)
parser.add_argument("--grad_clipping", type=float, default=0)
parser.add_argument("--global_grad_clipping", type=float, default=1.0)
parser.add_argument("--weight_decay", type=float, default=0)
parser.add_argument("--learning_rate", type=float, default=5e-5)
parser.add_argument("--momentum", type=float, default=0)
parser.add_argument("--warmup", type=float, default=0.1)
parser.add_argument("--warmup_schedule", type=str, default="warmup_linear")
parser.add_argument("--adam_eps", type=float, default=1e-6)
parser.add_argument("--vb_dropout", action="store_false")
parser.add_argument("--dropout_p", type=float, default=0.1)
parser.add_argument("--dropout_w", type=float, default=0.000)
parser.add_argument("--bert_dropout_p", type=float, default=0.1)
# loading
parser.add_argument("--model_ckpt", default="checkpoints/model_0.pt", type=str)
parser.add_argument("--resume", action="store_true")
# scheduler
# parser.add_argument('--feature_based_on', action='store_true')
parser.add_argument('--scheduler_type', type=int, default=0, help='0: linear, 1: cosine, 2 constant')
parser.add_argument("--output_dir", default="checkpoint")
parser.add_argument(
"--seed",
type=int,
default=2018,
help="random seed for data shuffling, embedding init, etc.",
)
parser.add_argument("--grad_accumulation_step", type=int, default=1)
# fp 16
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
# adv training
parser.add_argument("--adv_train", action="store_true")
# the current release only includes smart perturbation
parser.add_argument("--adv_opt", default=0, type=int)
parser.add_argument("--adv_norm_level", default=0, type=int)
parser.add_argument("--adv_p_norm", default="inf", type=str)
parser.add_argument("--adv_alpha", default=1, type=float)
parser.add_argument("--adv_k", default=1, type=int)
parser.add_argument("--adv_step_size", default=1e-5, type=float)
parser.add_argument("--adv_noise_var", default=1e-5, type=float)
parser.add_argument("--adv_epsilon", default=1e-6, type=float)
parser.add_argument(
"--encode_mode", action="store_true", help="only encode test data"
)
parser.add_argument("--debug", action="store_true", help="print debug info")
# transformer cache
parser.add_argument("--transformer_cache", default=".cache", type=str)
return parser
parser = argparse.ArgumentParser()
parser = data_config(parser)
parser = model_config(parser)
parser = train_config(parser)
args = parser.parse_args()
output_dir = args.output_dir
data_dir = args.data_dir
args.train_datasets = args.train_datasets.split(",")
args.test_datasets = args.test_datasets.split(",")
os.makedirs(output_dir, exist_ok=True)
output_dir = os.path.abspath(output_dir)
set_environment(args.seed, args.cuda)
log_path = args.log_file
logger = create_logger(__name__, to_disk=True, log_file=log_path)
task_defs = TaskDefs(args.task_def)
encoder_type = args.encoder_type
def dump(path, data):
with open(path, "w") as f:
json.dump(data, f)
def evaluation(
model,
datasets,
data_list,
task_defs,
output_dir="checkpoints",
epoch=0,
n_updates=-1,
with_label=False,
tensorboard=None,
glue_format_on=False,
test_on=False,
device=None,
logger=None,
):
# eval on rank 1
print_message(logger, "Evaluation")
test_prefix = "Test" if test_on else "Dev"
if n_updates > 0:
updates_str = "updates"
else:
updates_str = "epoch"
updates = model.updates if n_updates > 0 else epoch
for idx, dataset in enumerate(datasets):
prefix = dataset.split("_")[0]
task_def = task_defs.get_task_def(prefix)
label_dict = task_def.label_vocab
test_data = data_list[idx]
if test_data is not None:
with torch.no_grad():
(
test_metrics,
test_predictions,
test_scores,
test_golds,
test_ids,
) = eval_model(
model,
test_data,
metric_meta=task_def.metric_meta,
device=device,
with_label=with_label,
label_mapper=label_dict,
task_type=task_def.task_type,
)
for key, val in test_metrics.items():
if tensorboard:
tensorboard.add_scalar(
"{}/{}/{}".format(test_prefix, dataset, key),
val,
global_step=updates,
)
if isinstance(val, str):
print_message(
logger,
"Task {0} -- {1} {2} -- {3} {4}: {5}".format(
dataset, updates_str, updates, test_prefix, key, val
),
level=1,
)
elif isinstance(val, float):
print_message(
logger,
"Task {0} -- {1} {2} -- {3} {4}: {5:.3f}".format(
dataset, updates_str, updates, test_prefix, key, val
),
level=1,
)
else:
test_metrics[key] = str(val)
print_message(
logger,
"Task {0} -- {1} {2} -- {3} {4}: \n{5}".format(
dataset, updates_str, updates, test_prefix, key, val
),
level=1,
)
if args.local_rank in [-1, 0]:
score_file = os.path.join(
output_dir,
"{}_{}_scores_{}_{}.json".format(
dataset, test_prefix.lower(), updates_str, updates
),
)
results = {
"metrics": test_metrics,
"predictions": test_predictions,
"uids": test_ids,
"scores": test_scores,
}
dump(score_file, results)
if glue_format_on:
from experiments.glue.glue_utils import submit
official_score_file = os.path.join(
output_dir,
"{}_{}_scores_{}.tsv".format(
dataset, test_prefix.lower(), updates_str
),
)
submit(official_score_file, results, label_dict)
def initialize_distributed(logger, args):
"""Initialize torch.distributed."""
args.rank = int(os.getenv("RANK", "0"))
args.world_size = int(os.getenv("WORLD_SIZE", "1"))
batch_size_pre_gpu = int(args.batch_size / args.world_size)
print_message(logger, "Batch Size Per GPU: {}".format(batch_size_pre_gpu))
device = args.rank % torch.cuda.device_count()
if args.local_rank is not None:
device = args.local_rank
torch.cuda.set_device(device)
device = torch.device("cuda", args.local_rank)
# Call the init process
init_method = "tcp://"
master_ip = os.getenv("MASTER_ADDR", "localhost")
master_port = os.getenv("MASTER_PORT", "6600")
init_method += master_ip + ":" + master_port
torch.distributed.init_process_group(
backend=args.backend,
world_size=args.world_size,
rank=args.rank,
init_method=init_method,
)
return device
def print_message(logger, message, level=0):
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
do_logging = True
else:
do_logging = False
else:
do_logging = True
if do_logging:
if level == 1:
logger.warning(message)
else:
logger.info(message)
def main():
# set up dist
device = torch.device("cuda")
if args.local_rank > -1:
device = initialize_distributed(logger, args)
elif torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
opt = vars(args)
# update data dir
opt["data_dir"] = data_dir
batch_size = args.batch_size
print_message(logger, "Launching the MT-DNN training")
# return
tasks = {}
task_def_list = []
dropout_list = []
printable = args.local_rank in [-1, 0]
train_datasets = []
for dataset in args.train_datasets:
prefix = dataset.split("_")[0]
if prefix in tasks:
continue
task_id = len(tasks)
tasks[prefix] = task_id
task_def = task_defs.get_task_def(prefix)
task_def_list.append(task_def)
train_path = os.path.join(data_dir, "{}_train.json".format(dataset))
print_message(logger, "Loading {} as task {}".format(train_path, task_id))
train_data_set = SingleTaskDataset(
train_path,
True,
maxlen=args.max_seq_len,
task_id=task_id,
task_def=task_def,
printable=printable,
)
train_datasets.append(train_data_set)
train_collater = Collater(
dropout_w=args.dropout_w,
encoder_type=encoder_type,
soft_label=args.mkd_opt > 0,
max_seq_len=args.max_seq_len,
do_padding=args.do_padding,
)
multi_task_train_dataset = MultiTaskDataset(train_datasets)
if args.local_rank != -1:
multi_task_batch_sampler = DistMultiTaskBatchSampler(
train_datasets,
args.batch_size,
args.mix_opt,
args.ratio,
rank=args.local_rank,
world_size=args.world_size,
)
else:
multi_task_batch_sampler = MultiTaskBatchSampler(
train_datasets,
args.batch_size,
args.mix_opt,
args.ratio,
bin_on=args.bin_on,
bin_size=args.bin_size,
bin_grow_ratio=args.bin_grow_ratio,
)
multi_task_train_data = DataLoader(
multi_task_train_dataset,
batch_sampler=multi_task_batch_sampler,
collate_fn=train_collater.collate_fn,
pin_memory=args.cuda,
)
opt["task_def_list"] = task_def_list
dev_data_list = []
test_data_list = []
test_collater = Collater(
is_train=False,
encoder_type=encoder_type,
max_seq_len=args.max_seq_len,
do_padding=args.do_padding,
)
for dataset in args.test_datasets:
prefix = dataset.split("_")[0]
task_def = task_defs.get_task_def(prefix)
task_id = tasks[prefix]
task_type = task_def.task_type
data_type = task_def.data_type
dev_path = os.path.join(data_dir, "{}_dev.json".format(dataset))
dev_data = None
if os.path.exists(dev_path):
dev_data_set = SingleTaskDataset(
dev_path,
False,
maxlen=args.max_seq_len,
task_id=task_id,
task_def=task_def,
printable=printable,
)
if args.local_rank != -1:
dev_data_set = DistTaskDataset(dev_data_set, task_id)
single_task_batch_sampler = DistSingleTaskBatchSampler(
dev_data_set,
args.batch_size_eval,
rank=args.local_rank,
world_size=args.world_size,
)
dev_data = DataLoader(
dev_data_set,
batch_sampler=single_task_batch_sampler,
collate_fn=test_collater.collate_fn,
pin_memory=args.cuda,
)
else:
dev_data = DataLoader(
dev_data_set,
batch_size=args.batch_size_eval,
collate_fn=test_collater.collate_fn,
pin_memory=args.cuda,
)
dev_data_list.append(dev_data)
test_path = os.path.join(data_dir, "{}_test.json".format(dataset))
test_data = None
if os.path.exists(test_path):
test_data_set = SingleTaskDataset(
test_path,
False,
maxlen=args.max_seq_len,
task_id=task_id,
task_def=task_def,
printable=printable,
)
if args.local_rank != -1:
test_data_set = DistTaskDataset(test_data_set, task_id)
single_task_batch_sampler = DistSingleTaskBatchSampler(
test_data_set,
args.batch_size_eval,
rank=args.local_rank,
world_size=args.world_size,
)
test_data = DataLoader(
test_data_set,
batch_sampler=single_task_batch_sampler,
collate_fn=test_collater.collate_fn,
pin_memory=args.cuda,
)
else:
test_data = DataLoader(
test_data_set,
batch_size=args.batch_size_eval,
collate_fn=test_collater.collate_fn,
pin_memory=args.cuda,
)
test_data_list.append(test_data)
print_message(logger, "#" * 20)
print_message(logger, opt)
print_message(logger, "#" * 20)
# div number of grad accumulation.
num_all_batches = (
args.epochs * len(multi_task_train_data) // args.grad_accumulation_step
)
print_message(logger, "############# Gradient Accumulation Info #############")
print_message(
logger, "number of step: {}".format(args.epochs * len(multi_task_train_data))
)
print_message(
logger,
"number of grad grad_accumulation step: {}".format(args.grad_accumulation_step),
)
print_message(logger, "adjusted number of step: {}".format(num_all_batches))
print_message(logger, "############# Gradient Accumulation Info #############")
init_model = args.init_checkpoint
state_dict = None
if os.path.exists(init_model):
if (
encoder_type == EncoderModelType.BERT
or encoder_type == EncoderModelType.DEBERTA
or encoder_type == EncoderModelType.ELECTRA
):
state_dict = torch.load(init_model, map_location=device)
config = state_dict["config"]
elif (
encoder_type == EncoderModelType.ROBERTA
or encoder_type == EncoderModelType.XLM
):
model_path = "{}/model.pt".format(init_model)
state_dict = torch.load(model_path, map_location=device)
arch = state_dict["args"].arch
arch = arch.replace("_", "-")
if encoder_type == EncoderModelType.XLM:
arch = "xlm-{}".format(arch)
# convert model arch
from data_utils.roberta_utils import update_roberta_keys
from data_utils.roberta_utils import patch_name_dict
state = update_roberta_keys(
state_dict["model"], nlayer=state_dict["args"].encoder_layers
)
state = patch_name_dict(state)
literal_encoder_type = EncoderModelType(opt["encoder_type"]).name.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[
literal_encoder_type
]
config = config_class.from_pretrained(arch).to_dict()
state_dict = {"state": state}
else:
if opt["encoder_type"] not in EncoderModelType._value2member_map_:
raise ValueError("encoder_type is out of pre-defined types")
literal_encoder_type = EncoderModelType(opt["encoder_type"]).name.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[literal_encoder_type]
config = config_class.from_pretrained(
init_model, cache_dir=args.transformer_cache
).to_dict()
config["attention_probs_dropout_prob"] = args.bert_dropout_p
config["hidden_dropout_prob"] = args.bert_dropout_p
config["multi_gpu_on"] = opt["multi_gpu_on"]
if args.num_hidden_layers > 0:
config["num_hidden_layers"] = args.num_hidden_layers
opt.update(config)
model = MTDNNModel(
opt, device=device, state_dict=state_dict, num_train_step=num_all_batches
)
if args.resume and args.model_ckpt:
print_message(logger, "loading model from {}".format(args.model_ckpt))
model.load(args.model_ckpt)
#### model meta str
headline = "############# Model Arch of MT-DNN #############"
### print network
print_message(logger, "\n{}\n{}\n".format(headline, model.network))
# dump config
config_file = os.path.join(output_dir, "config.json")
with open(config_file, "w", encoding="utf-8") as writer:
writer.write("{}\n".format(json.dumps(opt)))
writer.write("\n{}\n{}\n".format(headline, model.network))
print_message(logger, "Total number of params: {}".format(model.total_param))
# tensorboard
tensorboard = None
# if args.tensorboard:
# args.tensorboard_logdir = os.path.join(args.output_dir, args.tensorboard_logdir)
# tensorboard = SummaryWriter(log_dir=args.tensorboard_logdir)
if args.encode_mode:
for idx, dataset in enumerate(args.test_datasets):
prefix = dataset.split("_")[0]
test_data = test_data_list[idx]
with torch.no_grad():
encoding = extract_encoding(model, test_data, use_cuda=args.cuda)
torch.save(
encoding, os.path.join(output_dir, "{}_encoding.pt".format(dataset))
)
return
for epoch in range(0, args.epochs):
print_message(logger, "At epoch {}".format(epoch), level=1)
start = datetime.now()
for i, (batch_meta, batch_data) in enumerate(multi_task_train_data):
batch_meta, batch_data = Collater.patch_data(device, batch_meta, batch_data)
task_id = batch_meta["task_id"]
model.update(batch_meta, batch_data)
if (model.updates) % (args.log_per_updates) == 0 or model.updates == 1:
ramaining_time = str(
(datetime.now() - start)
/ (i + 1)
* (len(multi_task_train_data) - i - 1)
).split(".")[0]
if args.adv_train and args.debug:
debug_info = " adv loss[%.5f] emb val[%.8f] eff_perturb[%.8f] " % (
model.adv_loss.avg,
model.emb_val.avg,
model.eff_perturb.avg,
)
else:
debug_info = " "
print_message(
logger,
"Task [{0:2}] updates[{1:6}] train loss[{2:.5f}]{3}remaining[{4}]".format(
task_id,
model.updates,
model.train_loss.avg,
debug_info,
ramaining_time,
),
)
if args.tensorboard:
tensorboard.add_scalar(
"train/loss", model.train_loss.avg, global_step=model.updates
)
if (
args.save_per_updates_on
and (
(model.local_updates)
% (args.save_per_updates * args.grad_accumulation_step)
== 0
)
and args.local_rank in [-1, 0]
):
model_file = os.path.join(
output_dir, "model_{}_{}.pt".format(epoch, model.updates)
)
evaluation(
model,
args.test_datasets,
dev_data_list,
task_defs,
output_dir,
epoch,
n_updates=args.save_per_updates,
with_label=True,
tensorboard=tensorboard,
glue_format_on=args.glue_format_on,
test_on=False,
device=device,
logger=logger,
)
evaluation(
model,
args.test_datasets,
test_data_list,
task_defs,
output_dir,
epoch,
n_updates=args.save_per_updates,
with_label=False,
tensorboard=tensorboard,
glue_format_on=args.glue_format_on,
test_on=True,
device=device,
logger=logger,
)
print_message(logger, "Saving mt-dnn model to {}".format(model_file))
model.save(model_file)
evaluation(
model,
args.test_datasets,
dev_data_list,
task_defs,
output_dir,
epoch,
with_label=True,
tensorboard=tensorboard,
glue_format_on=args.glue_format_on,
test_on=False,
device=device,
logger=logger,
)
evaluation(
model,
args.test_datasets,
test_data_list,
task_defs,
output_dir,
epoch,
with_label=False,
tensorboard=tensorboard,
glue_format_on=args.glue_format_on,
test_on=True,
device=device,
logger=logger,
)
print_message(logger, "[new test scores at {} saved.]".format(epoch))
if args.local_rank in [-1, 0]:
model_file = os.path.join(output_dir, "model_{}.pt".format(epoch))
model.save(model_file)
if args.tensorboard:
tensorboard.close()
if __name__ == "__main__":
main()
|
ContextualSP/adaptershare/train.py/0
|
{
"file_path": "ContextualSP/adaptershare/train.py",
"repo_id": "ContextualSP",
"token_count": 14357
}
| 231 |
import torch.nn as nn
from baseline.wtq_s2s.seq2seq import WTQSeq2SeqModel
from utils import *
from .spider_align import SpiderAlignmentModel
from .wtq_align import WTQAlignmentModel
_Model_mappings = {
'SpiderAlignmentModel':
{
'model': SpiderAlignmentModel,
'data_iter': load_spider_data_iterator,
'evaluator': SpiderEvaluator
},
'WTQAlignmentModel':
{
'model': WTQAlignmentModel,
'data_iter': load_wtq_data_iterator,
'evaluator': WTQEvaluator
},
'WTQSeq2SeqModel':
{
'model': WTQSeq2SeqModel,
'data_iter': load_wtq_data_iterator,
'evaluator': WTQEvaluator
}
}
def get_data_iterator_func(model: str):
return _Model_mappings[model]['data_iter']
def get_evaluator_class(model: str):
return _Model_mappings[model]['evaluator']
def load_model_from_checkpoint(model: str, device: torch.device, checkpoint: str = None, **args) -> nn.Module:
if model in ['WTQSeq2SeqModel']:
keyword_vocab_path = os.path.join(args['data_dir'], 'keyword.vocab.txt')
keyword_vocab = Vocab.from_file(keyword_vocab_path,
special_tokens=[SOS_Token, EOS_Token, UNK_Token, TBL_Token, VAL_Token],
min_freq=5)
info('load SQL keyword vocab from {} over, size = {}'.format(keyword_vocab_path, len(keyword_vocab)))
suffix_type_vocab_path = os.path.join(args['data_dir'], 'suffix_type.vocab.txt')
suffix_type_vocab = Vocab.from_file(suffix_type_vocab_path, special_tokens=[], min_freq=5)
info('load Column suffix type vocab from {} over, size = {}'.format(suffix_type_vocab_path,
len(suffix_type_vocab)))
model_args = {'keyword_vocab': keyword_vocab, 'suffix_type_vocab': suffix_type_vocab}
model = WTQSeq2SeqModel(bert_version=args['bert_version'], hidden_size=300, dropout_prob=args['dropout'],
**model_args)
model.to(device)
if checkpoint is not None:
model.load_state_dict(torch.load(checkpoint, map_location=device))
info('Initialize {} from checkpoint {} over.'.format(model, checkpoint))
return model
if 'encoder_checkpoint' in args and args['encoder_checkpoint'] is not None:
model.encoder.load_state_dict(torch.load(args['encoder_ckpt'], map_location=device))
info('Initialize {} encoder from checkpoint {} over.'.format(model, args.encoder_ckpt))
return model
return model
elif model in ['WTQAlignmentModel']:
model = WTQAlignmentModel(args['bert_version'], dropout_prob=args['dropout'])
model.to(device=device)
if checkpoint is not None:
model.load_state_dict(torch.load(checkpoint, map_location=device))
info('Initialize model from checkpoint {} over.'.format(checkpoint))
return model
return model
elif model in ['SpiderAlignmentModel']:
model = SpiderAlignmentModel(args['bert_version'], dropout_prob=args['dropout'])
model.to(device)
if checkpoint is not None:
model.load_state_dict(torch.load(checkpoint, map_location=device))
info('Initialize model from checkpoint {} over.'.format(checkpoint))
return model
return model
else:
raise NotImplementedError("Not supported model: {}".format(model))
|
ContextualSP/awakening_latent_grounding/models/model_utils.py/0
|
{
"file_path": "ContextualSP/awakening_latent_grounding/models/model_utils.py",
"repo_id": "ContextualSP",
"token_count": 1654
}
| 232 |
python train.py -model WTQAlignmentModel -bert bert-base-uncased \
-lr 3e-5 -train_bs 16 -alw linear_5-10 -num_epochs 20 \
--data_dir data/wtq_grounding \
--out_dir checkpoints/model_wtq
|
ContextualSP/awakening_latent_grounding/train_wtq_ground.sh/0
|
{
"file_path": "ContextualSP/awakening_latent_grounding/train_wtq_ground.sh",
"repo_id": "ContextualSP",
"token_count": 83
}
| 233 |
# Compositionality Generalization <img src="https://pytorch.org/assets/images/logo-dark.svg" height = "25" align=center />
This repository is the official implementation of our paper [Compositional Generalization by Learning Analytical Expressions](https://arxiv.org/pdf/2006.10627.pdf).
If you find our code useful for you, please consider citing our paper
```bib
@inproceedings{qian2020compositional,
title={Compositional Generalization by Learning Analytical Expressions},
author={Qian Liu and Shengnan An and Jian{-}Guang Lou and Bei Chen and Zeqi Lin and Yan Gao and Bin Zhou and Nanning Zheng and Dongmei Zhang},
booktitle={Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual},
year={2020}
}
```
## Content
- [Install Requirements](#requirements)
- [Train Model](#training)
- [Evaluate Model](#evaluation)
- [Pre-trained Models](#pre-trained-models)
- [Expected Results](#results)
- [Frequent Asked Questions](#faq)
## Requirements
Our code is officially supported by Python 3.7. The main dependencies are `pytorch` and `tensorboardX`.
You could install all requirements by the following command:
```console
❱❱❱ pip install -r requirements.txt
```
## Training
To train our model on different tasks on SCAN and SCAN-ext datasets, you could use this command:
```console
❱❱❱ python main.py --mode train --checkpoint <model_dir> --task <task_name>
```
📋 Note that `<model_dir>` specifies the store folder of model checkpoints, and `<task_name>` is the task name.
Available task names are `[simple, addjump, around_right, length, mcd1, mcd2, mcd3, extend]`.
For example, you could train a model on `addjump` task by the following command:
```console
❱❱❱ python main.py --mode train --checkpoint addjump_model --task addjump
```
📋 Since reinforcement learning is known to be hard to train, there is a chance of the code to not converge in the training. You could choose another random seed and try again.
📋 Meanwhile, please note that the model training is sensitive to the value of the hyper-parameter coefficient of the **simplicity-based reward** (i.e. `--simplicity-ratio` in args). When it is higher (i.e. 0.5 or 1.0), the model is harder to converge, which indicates that the training accuracy may not arrive at 100%. We're still investigating in the reason behind it. If you cannot obtain good results after trying several random seed, you could try to reproduce other results (not suitable for `around_right` and `mcd3`, as stated in the paper) using a `0` simplicity-ratio (default setting now). We will update the code when we find a better training strategy.
Therefore, please use the following command for `around_right` and `mcd3` task:
```console
❱❱❱ python main.py --mode train --checkpoint addjump_model --task around_right --simplicity-ratio 0.5
```
The corresponding log and model weights will be stored in the path `checkpoint/logs/addjump_model.log` and `checkpoint/models/addjump_model/*.mdl` respectively
## Evaluation
To evaluate our model on different tasks, run:
```console
❱❱❱ python main.py --mode test --checkpoint <model_weight_file> --task <task_name>
```
📋 Note that `<model_weight_file>` specifies a concrete model file with the suffix `.mdl`, and `<task_name>` is the task name.
For example, you could evaluate a trained model weight `weight.mdl` on `addjump` task by the following command:
```console
❱❱❱ python main.py --mode test --checkpoint weight.mdl --task addjump
```
## Pre-trained Models
You can find pretrained model weights for the above tasks under the `pretrained_weights` folder.
## Results
Our model is excepted to achieve 100% accuracies on all tasks if the training succeeds.
|
ContextualSP/compositional_generalization/README.md/0
|
{
"file_path": "ContextualSP/compositional_generalization/README.md",
"repo_id": "ContextualSP",
"token_count": 1084
}
| 234 |
import math
import torch
import logging
from functools import partial
from torch.nn import functional as F
from tensorboardX import SummaryWriter
from torch.distributions.utils import lazy_property
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.distributions.categorical import Categorical as TorchCategorical
import os
import numpy as np
class VisualizeLogger(object):
EMOJI_CORRECT = "😋"
EMOJI_ERROR = "😡"
EMOJI_REWARD = "🍎"
EMOJI_DECODE_REWARD = "🍐"
def __init__(self, summary_dir):
"""
:param summary_dir: folder to store the tensorboard X log files
:param validation_size:
"""
if not os.path.exists(summary_dir):
os.makedirs(summary_dir)
self.log_writer = SummaryWriter(summary_dir)
self.global_step = 0
self.validate_no = 1
self.validation_size = 1
# define template
self.log_template = '**Input** : {4} \n\n **Reduce** : {1} \n\n **Ground**: {2} \n\n{0}**Logic Form**: {3} \n\n'
def update_validate_size(self, validation_size):
self.validation_size = validation_size
def log_text(self, ground_truth, reduce_str, logic_form, utterance, debug_info=None):
is_correct = ground_truth == logic_form
if is_correct:
logging_str = self.log_template.format(self.EMOJI_CORRECT, reduce_str, ground_truth, logic_form, utterance)
else:
logging_str = self.log_template.format(self.EMOJI_ERROR, reduce_str, ground_truth, logic_form, utterance)
if debug_info is not None:
tree_vis = self._format_tree_prob(utterance, debug_info)
logging_str += "**Tree** :\n\n" + tree_vis
dev_case = self.global_step % self.validation_size
dev_step = self.global_step // self.validation_size
self.log_writer.add_text(f'{dev_case}-th Example', logging_str, global_step=dev_step)
def log_performance(self, valid_acc):
self.log_writer.add_scalar("Accuracy", valid_acc, global_step=self.validate_no)
def update_step(self):
self.global_step += 1
def update_epoch(self):
self.validate_no += 1
def _format_tree_prob(self, utterance, debug_info):
# accept utterance and debug_info, return the visualized tree prob
tokens = utterance.split(" ")
seq_len = len(tokens)
merge_probs = debug_info["merge_prob"]
reduce_probs = debug_info["reduce_prob"]
decoder_inputs = debug_info["decoder_inputs"]
decoder_outputs = debug_info["decoder_outputs"]
reduce_rewards = debug_info["tree_sr_rewards"]
decode_rewards = debug_info["decode_rewards"]
log_strs = []
right_single = "■■"
error_single = "□□"
# merged chain
merge_template = "{3} {0} ({1:.2f}) ({2:.2f})"
no_merge_template = "{2} {0} ({1:.2f})"
only_reduce_template = "{0} (1.00) ({1:.2f})"
start_indicator = 0
depth_indicator = 0
decode_indicator = 0
if seq_len == 1:
log_str = only_reduce_template.format(tokens[0], reduce_probs[0])
log_strs.append(log_str)
else:
for reverse_len in reversed(range(1, seq_len)):
if depth_indicator == 0:
# reduce single node
for i in range(seq_len):
log_str = only_reduce_template.format(tokens[i], reduce_probs[i])
if decoder_outputs[i] != 'NONE':
log_str += " ({1}{0:.2f})".format(reduce_rewards[i], self.EMOJI_REWARD)
log_str += " [*input*: {0}, *output*: {1}]".format(decoder_inputs[i], decoder_outputs[i])
log_str += " ({1}{0:.2f})".format(decode_rewards[decode_indicator],
self.EMOJI_DECODE_REWARD)
decode_indicator += 1
else:
log_str += " ({1}{0:.2f})".format(reduce_rewards[i], self.EMOJI_REWARD)
log_strs.append(log_str)
depth_indicator += 1
layer_merge_prob = merge_probs[start_indicator: start_indicator + reverse_len]
start_indicator += reverse_len
layer_reduce_prob = reduce_probs[seq_len + depth_indicator - 1]
merge_candidates = ["-".join(tokens[i: i + depth_indicator + 1]) for i in range(reverse_len)]
ind = np.argmax(layer_merge_prob)
for i in range(reverse_len):
if i == ind:
log_str = merge_template.format(merge_candidates[i], layer_merge_prob[i],
layer_reduce_prob,
right_single * depth_indicator)
if decoder_outputs[seq_len + depth_indicator - 1] != "NONE":
log_str += " ({1}{0:.2f})".format(reduce_rewards[seq_len + depth_indicator - 1],
self.EMOJI_REWARD)
log_str += " [*input*: {0}, *output*: {1}]".format(
decoder_inputs[seq_len + depth_indicator - 1],
decoder_outputs[seq_len + depth_indicator - 1]
)
log_str += " ({1}{0:.2f})".format(decode_rewards[decode_indicator],
self.EMOJI_DECODE_REWARD)
decode_indicator += 1
else:
log_str += " ({1}{0:.2f})".format(reduce_rewards[i], self.EMOJI_REWARD)
else:
log_str = no_merge_template.format(merge_candidates[i], layer_merge_prob[i],
error_single * depth_indicator)
log_strs.append(log_str)
depth_indicator += 1
return "\n\n".join(log_strs)
class AverageMeter:
def __init__(self):
self.value = None
self.avg = None
self.sum = None
self.count = None
self.reset()
def reset(self):
self.value = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, value, n=1):
self.value = value
self.sum += value * n
self.count += n
self.avg = self.sum / self.count
def get_logger(file_name):
logger = logging.getLogger("general_logger")
handler = logging.FileHandler(file_name, mode='w')
formatter = logging.Formatter("%(asctime)s - %(message)s", "%d-%m-%Y %H:%M:%S")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
return logger
def get_lr_scheduler(logger, optimizer, mode='max', factor=0.5, patience=10, threshold=1e-4, threshold_mode='rel'):
def reduce_lr(self, epoch):
ReduceLROnPlateau._reduce_lr(self, epoch)
logger.info(f"learning rate is reduced by factor {factor}!")
lr_scheduler = ReduceLROnPlateau(optimizer, mode, factor, patience, False, threshold, threshold_mode)
lr_scheduler._reduce_lr = partial(reduce_lr, lr_scheduler)
return lr_scheduler
def clamp_grad(v, min_val, max_val):
if v.requires_grad:
v_tmp = v.expand_as(v)
v_tmp.register_hook(lambda g: g.clamp(min_val, max_val))
return v_tmp
return v
def length_to_mask(length):
with torch.no_grad():
batch_size = length.shape[0]
max_length = length.data.max()
range = torch.arange(max_length, dtype=torch.int64, device=length.device)
range_expanded = range[None, :].expand(batch_size, max_length)
length_expanded = length[:, None].expand_as(range_expanded)
return (range_expanded < length_expanded).float()
class Categorical:
def __init__(self, scores, mask=None):
self.mask = mask
if mask is None:
self.cat_distr = TorchCategorical(F.softmax(scores, dim=-1))
self.n = scores.shape[0]
self.log_n = math.log(self.n)
else:
self.n = self.mask.sum(dim=-1)
self.log_n = (self.n + 1e-17).log()
self.cat_distr = TorchCategorical(Categorical.masked_softmax(scores, self.mask))
@lazy_property
def probs(self):
return self.cat_distr.probs
@lazy_property
def logits(self):
return self.cat_distr.logits
@lazy_property
def entropy(self):
if self.mask is None:
return self.cat_distr.entropy() * (self.n != 1)
else:
entropy = - torch.sum(self.cat_distr.logits * self.cat_distr.probs * self.mask, dim=-1)
does_not_have_one_category = (self.n != 1.0).to(dtype=torch.float32)
# to make sure that the entropy is precisely zero when there is only one category
return entropy * does_not_have_one_category
@lazy_property
def normalized_entropy(self):
return self.entropy / (self.log_n + 1e-17)
def sample(self):
return self.cat_distr.sample()
def rsample(self, temperature=None, gumbel_noise=None, eps=1e-5):
if gumbel_noise is None:
with torch.no_grad():
uniforms = torch.empty_like(self.probs).uniform_()
uniforms = uniforms.clamp(min=eps, max=1 - eps)
gumbel_noise = -(-uniforms.log()).log()
elif gumbel_noise.shape != self.probs.shape:
raise ValueError
if temperature is None:
with torch.no_grad():
scores = (self.logits + gumbel_noise)
scores = Categorical.masked_softmax(scores, self.mask)
sample = torch.zeros_like(scores)
sample.scatter_(-1, scores.argmax(dim=-1, keepdim=True), 1.0)
return sample, gumbel_noise
else:
scores = (self.logits + gumbel_noise) / temperature
sample = Categorical.masked_softmax(scores, self.mask)
return sample, gumbel_noise
def log_prob(self, value):
if value.dtype == torch.long:
if self.mask is None:
return self.cat_distr.log_prob(value)
else:
return self.cat_distr.log_prob(value) * (self.n != 0.).to(dtype=torch.float32)
else:
max_values, mv_idxs = value.max(dim=-1)
relaxed = (max_values - torch.ones_like(max_values)).sum().item() != 0.0
if relaxed:
raise ValueError("The log_prob can't be calculated for the relaxed sample!")
return self.cat_distr.log_prob(mv_idxs) * (self.n != 0.).to(dtype=torch.float32)
@staticmethod
def masked_softmax(logits, mask):
"""
This method will return valid probability distribution for the particular instance if its corresponding row
in the `mask` matrix is not a zero vector. Otherwise, a uniform distribution will be returned.
This is just a technical workaround that allows `Categorical` class usage.
If probs doesn't sum to one there will be an exception during sampling.
"""
if mask is not None:
probs = F.softmax(logits, dim=-1) * mask
probs = probs + (mask.sum(dim=-1, keepdim=True) == 0.).to(dtype=torch.float32)
Z = probs.sum(dim=-1, keepdim=True)
return probs / Z
else:
return F.softmax(logits, dim=-1)
|
ContextualSP/compositional_generalization/utils.py/0
|
{
"file_path": "ContextualSP/compositional_generalization/utils.py",
"repo_id": "ContextualSP",
"token_count": 5853
}
| 235 |
#!/usr/bin/env bash
export model_file=../checkpoints/run_canard
export config_file=../configs/canard.jsonnet
export train_data_path=../dataset/CANARD/train.txt
export validation_data_path=../dataset/CANARD/dev.txt
export pretrained_file=../glove/glove.6B.100d.txt
export seed=1
allennlp train -s ${model_file} ${config_file} \
--include-package data_reader \
--include-package model \
-o "{\"random_seed\":\"${seed}\",\"numpy_seed\":\"${seed}\",\"pytorch_seed\":\"${seed}\", \"train_data_path\":\"${train_data_path}\",\"validation_data_path\":\"${validation_data_path}\",\"model.word_embedder.tokens.pretrained_file\":\"${pretrained_file}\"}"
|
ContextualSP/incomplete_utterance_rewriting/src/train_canard.sh/0
|
{
"file_path": "ContextualSP/incomplete_utterance_rewriting/src/train_canard.sh",
"repo_id": "ContextualSP",
"token_count": 232
}
| 236 |
# coding: utf-8
import os
import json
import re
import pickle as pkl
import numpy as np
from src.utils.utils import lemma_token
from src.utils.link_util import find_alignment_by_rule, find_keyword_alignment_by_rule
from src.utils.utils import STOP_WORD_LIST
def jaccard_distance(word_list1, word_list2):
word_set1 = set(word_list1)
word_set2 = set(word_list2)
return len(word_set1 & word_set2) / len(word_set1 | word_set2)
class QuestionGenerator(object):
AGGRs = ['minimum', 'maximum', 'average', 'sum']
def __init__(self, dataset_path='data/spider/', n_options=3):
# common
self.dataset_path = dataset_path
self.database_path = os.path.join(dataset_path, 'database')
table_file_path = os.path.join(self.dataset_path, 'tables.json')
self.dbs_schemas = {_['db_id']: _ for _ in json.load(open(table_file_path, 'r', encoding='utf-8'))}
self.n_options = n_options
# example
self.database = ''
self.table_names, self.column_names = [], []
self.utterance_tokens = [] # NOTICE: bert tokenized tokens
self.utterance_tokens_no_stopwords = []
# glove
self.glove_dict = {}
self.glove_vectors = None
self._load_glove_vectors()
self.glove_unk = self.glove_dict.get('unk', 0)
def _load_glove_vectors(self):
glove_path = 'data/common/glove_tune.42B.300d.txt'
glove_dict = {}
vectors = []
with open(glove_path, 'r', encoding='utf-8') as fr:
for idx, line in enumerate(fr):
line = line.strip()
token, vec = line.split(' ', 1)
vec = [float(_) for _ in vec.split()]
assert len(vec) == 300, 'Glove vector not in dimension 300'
glove_dict[token] = idx
vectors.append(vec)
vectors = np.array(vectors)
self.glove_dict = glove_dict
self.glove_vectors = vectors
def refresh(self, database, utterance_tokens):
self.database = database
db_schemas = self.dbs_schemas[self.database]
self.column_names = [_[1] for _ in db_schemas['column_names'] if _[1] is not '*']
self.table_names = db_schemas['table_names']
self.utterance_tokens = utterance_tokens
self.utterance_tokens_no_stopwords = []
for token_idx, token in enumerate(self.utterance_tokens):
if token not in STOP_WORD_LIST:
self.utterance_tokens_no_stopwords.append((token_idx, token))
def generate_question(self, token_idx):
asked_token = self.utterance_tokens[token_idx]
column_similarities = self.get_keyword_similarities(asked_token, self.column_names)
table_similarities = self.get_keyword_similarities(asked_token, self.table_names)
aggr_similarities = self.get_keyword_similarities(asked_token, self.AGGRs)
similarities = [(span, score, 'column_name') for (span, score) in column_similarities] + \
[(span, score, 'table_name') for (span, score) in table_similarities] + \
[(span, score, 'aggr') for (span, score) in aggr_similarities]
similarities.sort(key=lambda x: -x[1])
options = similarities[:self.n_options]
options.append(('none', 0, 'none'))
options.append(('value', 0, 'value'))
return {'question': asked_token, 'options': options}
def get_keyword_similarities(self, token, spans):
scores = [(span, self.calculate_token_span_similarity(token, span)) for span in spans] # todo: warning: error here
scores.sort(key=lambda x: -x[1])
return scores
def calculate_token_span_similarity(self, token, span):
assert len(re.split(' |_', token)) == 1
# part1: surface similarity
surface_similarity, status = 0.0, ''
span_tokens = re.split(' |_', span)
token_lemma = lemma_token(token)
span_tokens_lemma = [lemma_token(_) for _ in span_tokens]
if token == ''.join(span_tokens):
surface_similarity = 5.0
status = 'exact'
elif token_lemma == ''.join(span_tokens_lemma):
surface_similarity = 3.0
status = 'exact lemma'
elif token in span:
surface_similarity = 1.5
status = 'partial'
elif token_lemma in span_tokens_lemma:
surface_similarity = 1.0
status = 'partial lemma'
surface_similarity += jaccard_distance([token], span_tokens) + \
jaccard_distance([token_lemma], span_tokens_lemma)
# part2: embedding similarity
token_vector = self.glove_vectors[self.glove_dict.get(token, self.glove_unk)]
span_vector = [self.glove_vectors[self.glove_dict.get(t, self.glove_unk)] for t in span_tokens]
if len(span_vector) > 1:
span_vector = np.mean(span_vector)
embedding_similarity = np.mean(token_vector * span_vector)
return surface_similarity + embedding_similarity
if __name__ == '__main__':
question_generator = QuestionGenerator()
def similarity_test(key_token, value_tokens):
ret = []
for value_token in value_tokens:
score = question_generator.calculate_token_span_similarity(key_token, value_token)
ret.append((value_token, score))
return ret
key_token = ''
value_token = ''
while True:
s = input().lower()
if s.startswith('key:'):
key_token = s[4:].strip()
elif s.startswith('value:'):
value_token = s[6:].strip()
print(question_generator.calculate_token_span_similarity(key_token, value_token))
elif s.startswith('values:'):
value_tokens = eval(s[7:].strip())
print(similarity_test(key_token, value_tokens))
|
ContextualSP/interactive_text_to_sql/src/components/question_generator.py/0
|
{
"file_path": "ContextualSP/interactive_text_to_sql/src/components/question_generator.py",
"repo_id": "ContextualSP",
"token_count": 2675
}
| 237 |
# coding: utf-8
import json
import os
import pickle as pkl
import nltk
from nltk.stem import WordNetLemmatizer
from tqdm import tqdm
from src.utils.utils import lemma_token
wordnet_lemmatizer = WordNetLemmatizer()
VALUE_FILTER = ['what', 'how', 'list', 'give', 'show', 'find', 'id', 'order', 'alse', 'when']
AGG = ['average', 'sum', 'max', 'min', 'minimum', 'maximum', 'between']
def load_dataset(table_path, data_path):
table_list = json.load(open(table_path, 'r', encoding='utf-8'))
spider_examples = json.load(open(data_path, 'r', encoding='utf-8'))
tables = {}
for table in table_list:
db_id = table['db_id']
table['col_table'] = [_[0] for _ in table['column_names']]
table['schema_content'] = [_[1] for _ in table['column_names']]
table['col_set'] = list(set(table['schema_content']))
tables[db_id] = table
for example in spider_examples:
db_id = example['db_id']
example['column_names'] = tables[db_id]['schema_content']
example['table_names'] = tables[db_id]['table_names']
example['col_set'] = tables[db_id]['col_set']
example['col_table'] = tables[db_id]['col_table']
keys = {}
for kv in tables[db_id]['foreign_keys']:
keys[kv[0]] = kv[1]
keys[kv[1]] = kv[0]
for id_k in tables[db_id]['primary_keys']:
keys[id_k] = id_k
example['keys'] = keys
return tables, spider_examples
def group_header(toks, idx, num_toks, header_toks):
# a substring of toks[idx:] belongs to list header_toks
for endIdx in reversed(range(idx + 1, num_toks+1)):
sub_toks = toks[idx: endIdx]
sub_toks = " ".join(sub_toks)
if sub_toks in header_toks:
return endIdx, sub_toks
return idx, None
def fully_part_header(toks, idx, num_toks, header_toks):
for endIdx in reversed(range(idx + 1, num_toks+1)):
sub_toks = toks[idx: endIdx]
if len(sub_toks) > 1:
sub_toks = " ".join(sub_toks)
if sub_toks in header_toks:
return endIdx, sub_toks
return idx, None
def partial_header(toks, idx, num_toks, header_toks):
# a substring of tokens is a subset of a header's tokens
def check_in(list_one, list_two):
# print(set(list_one) & set(list_two), len(list_one))
if len(set(list_one) & set(list_two)) == len(list_one) and len(list_two) <= 3:
# print(len(list_two), list_two, list_one)
return True
for endIdx in reversed(range(idx + 1, len(toks))):
sub_toks = toks[idx: min(endIdx, len(toks))]
if len(sub_toks) > 1:
flag_count = 0
tmp_heads = None
for heads in header_toks:
if check_in(sub_toks, heads):
flag_count += 1
tmp_heads = heads
if flag_count == 1:
return endIdx, tmp_heads
return idx, None
def symbol_filter(questions):
question_tmp_q = []
for q_id, q_val in enumerate(questions):
if len(q_val) > 2 and q_val[0] in ["'", '"', '`', '鈥�', '鈥�'] and q_val[-1] in ["'", '"', '`', '鈥�']:
question_tmp_q.append("'")
question_tmp_q += ["".join(q_val[1:-1])]
question_tmp_q.append("'")
elif len(q_val) > 2 and q_val[0] in ["'", '"', '`', '鈥�'] :
question_tmp_q.append("'")
question_tmp_q += ["".join(q_val[1:])]
elif len(q_val) > 2 and q_val[-1] in ["'", '"', '`', '鈥�']:
question_tmp_q += ["".join(q_val[0:-1])]
question_tmp_q.append("'")
elif q_val in ["'", '"', '`', '鈥�', '鈥�', '``', "''"]:
question_tmp_q += ["'"]
# elif q_val in [","]:
# question_tmp_q += ['銆�']
else:
question_tmp_q += [q_val]
return question_tmp_q
def re_lemma(string):
lema = lemma_token(string.lower()) # get base form of a verb
if len(lema) > 0:
return lema
else:
return string.lower()
def group_values(toks, idx, num_toks):
# longest token sequence with upper capital letter
def check_isupper(tok_lists):
for tok_one in tok_lists:
if tok_one[0].isupper() is False:
return False
return True
for endIdx in reversed(range(idx + 1, num_toks + 1)):
sub_toks = toks[idx: endIdx]
if len(sub_toks) > 1 and check_isupper(sub_toks) is True:
return endIdx, sub_toks
if len(sub_toks) == 1:
if sub_toks[0][0].isupper() and sub_toks[0].lower() not in VALUE_FILTER and \
sub_toks[0].lower().isalnum() is True:
return endIdx, sub_toks
return idx, None
def group_digital(toks, idx):
test = toks[idx].replace(':', '')
test = test.replace('.', '')
if test.isdigit():
return True
else:
return False
def group_symbol(toks, idx, num_toks):
if toks[idx-1] == "'":
for i in range(0, min(3, num_toks-idx)):
if toks[i + idx] == "'":
return i + idx, toks[idx:i+idx]
return idx, None
def is_year_num(tok):
if len(str(tok)) == 4 and str(tok).isdigit() and 15 < int(str(tok)[:2]) < 22:
return True
return False
class SchemaLinker(object):
def __init__(self, table_path=None, conceptnet_path=None):
self.tables = {}
if table_path:
if not os.path.exists(table_path):
raise FileNotFoundError(f'{table_path} not found')
table_list = json.load(open(table_path, 'r', encoding='utf-8'))
for table in table_list:
db_id = table['db_id']
table['col_table'] = [_[0] for _ in table['column_names']]
table['schema_content'] = [_[1] for _ in table['column_names']]
table['col_set'] = list(set(table['schema_content']))
self.tables[db_id] = table
self.is_a_dict = []
self.related_to_dict = []
if conceptnet_path:
if os.path.exists('cache/conceptnet/is_a.pkl') and os.path.exists('cache/conceptnet/relation_to.pkl'):
self.is_a_dict = pkl.load(open('cache/conceptnet/is_a.pkl', 'rb'))
self.related_to_dict = pkl.load(open('cache/conceptnet/relation_to.pkl', 'rb'))
else:
if not os.path.exists(conceptnet_path):
raise FileNotFoundError(f'{conceptnet_path} not found')
is_a_dict = {}
related_to_dict = {}
with open(conceptnet_path, 'r', encoding='utf-8') as fr:
for line in tqdm(fr):
uri, relation, head, tail, detail = line.strip().split('\t')
head_split = head.split('/')[1:]
tail_split = tail.split('/')[1:]
if head_split[1] != 'en' or tail_split[1] != 'en':
continue
if relation == '/r/IsA':
is_a_dict[head_split[2]] = tail_split[2]
elif relation == '/r/RelatedTo':
related_to_dict[head_split[2]] = tail_split[2]
# with open('data/concept_net/IsA.csv', 'r', encoding='utf-8') as fr1, \
# open('data/concept_net/RelatedTo.csv', 'r', encoding='utf-8') as fr2:
# for line in tqdm(fr1.readlines() + fr2.readlines()):
# uri, relation, head, tail, detail = line.strip().split('\t')
# head_split = head.split('/')[1:]
# tail_split = tail.split('/')[1:]
# if head_split[1] != 'en' or tail_split[1] != 'en':
# continue
# if relation == '/r/IsA':
# is_a_dict[head_split[2]] = tail_split[2]
# elif relation == '/r/RelatedTo':
# related_to_dict[head_split[2]] = tail_split[2]
# pkl.dump(is_a_dict, open('cache/conceptnet/is_a.pkl', 'wb'))
# pkl.dump(related_to_dict, open('cache/conceptnet/relation_to.pkl', 'wb'))
self.is_a_dict = is_a_dict
self.related_to_dict = related_to_dict
def link_example(self, example, table_info=None):
'''
Add linking info for example
example must contain: db_id, question_toks
:param example:
:param table_info:
:return:
'''
db_id = example['db_id']
if table_info is not None:
assert table_info['db_id'] == db_id
else:
assert db_id in self.tables
table_info = self.tables[db_id]
example['column_names'] = table_info['schema_content']
example['table_names'] = table_info['table_names']
example['col_set'] = table_info['col_set']
example['col_table'] = table_info['col_table']
keys = {}
for kv in table_info['foreign_keys']:
keys[kv[0]] = kv[1]
keys[kv[1]] = kv[0]
for id_k in table_info['primary_keys']:
keys[id_k] = id_k
example['keys'] = keys
if 'origin_question_toks' not in example:
example['origin_question_toks'] = example['question_toks']
example['question_toks'] = symbol_filter(example['question_toks'])
origin_question_toks = symbol_filter([x for x in example['origin_question_toks']])
question_toks = [wordnet_lemmatizer.lemmatize(x.lower()) for x in example['question_toks']]
example['question_toks'] = question_toks
# This way for table_names lemmatizer
table_names = []
table_names_pattern = []
for y in example['table_names']:
x = [wordnet_lemmatizer.lemmatize(x.lower()) for x in y.split(' ')]
# x = [lemma(x).lower() for x in y.split(' ')]
table_names.append(" ".join(x))
x = [re_lemma(x.lower()) for x in y.split(' ')]
table_names_pattern.append(" ".join(x))
# This is for header_toks lemmatizer
header_toks = []
header_toks_list = []
header_toks_pattern = []
header_toks_list_pattern = []
for y in example['col_set']:
x = [wordnet_lemmatizer.lemmatize(x.lower()) for x in y.split(' ')]
header_toks.append(" ".join(x))
header_toks_list.append(x)
x = [re_lemma(x.lower()) for x in y.split(' ')]
header_toks_pattern.append(" ".join(x))
header_toks_list_pattern.append(x)
num_toks = len(question_toks)
idx = 0
tok_concol = []
type_concol = []
nltk_result = nltk.pos_tag(question_toks)
while idx < num_toks:
############ fully header
end_idx, header = fully_part_header(question_toks, idx, num_toks, header_toks) # length should > 1
if header:
tok_concol.append(origin_question_toks[idx: end_idx])
type_concol.append(["col"])
idx = end_idx
continue
############ check for table
end_idx, tname = group_header(question_toks, idx, num_toks, table_names)
if tname:
tok_concol.append(origin_question_toks[idx: end_idx])
type_concol.append(["table"])
idx = end_idx
continue
########### check for col
end_idx, header = group_header(question_toks, idx, num_toks, header_toks)
if header:
tok_concol.append(origin_question_toks[idx: end_idx])
type_concol.append(["col"])
idx = end_idx
continue
end_idx, tname = partial_header(question_toks, idx, num_toks, header_toks_list)
if tname:
# tok_concol.append(tname)
tok_concol.append(origin_question_toks[idx: end_idx])
type_concol.append(["col"])
idx = end_idx
continue
end_idx, agg = group_header(question_toks, idx, num_toks, AGG)
if agg:
tok_concol.append(origin_question_toks[idx: end_idx])
type_concol.append(["agg"])
idx = end_idx
continue
if nltk_result[idx][1] == 'RBR' or nltk_result[idx][1] == 'JJR':
tok_concol.append([origin_question_toks[idx]])
type_concol.append(['MORE'])
idx += 1
continue
if nltk_result[idx][1] == 'RBS' or nltk_result[idx][1] == 'JJS':
tok_concol.append([origin_question_toks[idx]])
type_concol.append(['MOST'])
idx += 1
continue
if is_year_num(question_toks[idx]):
question_toks[idx] = 'year'
end_idx, header = group_header(question_toks, idx, num_toks, header_toks)
if header:
tok_concol.append(origin_question_toks[idx: end_idx])
type_concol.append(["col"])
idx = end_idx
continue
pro_result = "NONE"
def get_concept_result(toks, graph):
find_col = False
for begin_id in range(0, len(toks)):
for r_ind in reversed(range(1, len(toks) + 1 - begin_id)):
tmp_query = "_".join(toks[begin_id:r_ind])
if tmp_query in graph:
mi = graph[tmp_query]
for col in example['col_set']:
if col in mi:
return col
end_idx, symbol = group_symbol(question_toks, idx, num_toks)
if symbol:
tmp_toks = [x for x in question_toks[idx: end_idx]]
origin_tmp_toks = [x for x in origin_question_toks[idx: end_idx]]
assert len(tmp_toks) > 0, print(symbol, question_toks)
pro_result = get_concept_result(tmp_toks, self.is_a_dict)
if pro_result is None:
pro_result = get_concept_result(tmp_toks, self.related_to_dict)
if pro_result is None:
pro_result = "NONE"
for tmp in origin_tmp_toks:
tok_concol.append([tmp])
type_concol.append([pro_result])
pro_result = "NONE"
idx = end_idx
continue
end_idx, values = group_values(origin_question_toks, idx, num_toks)
if values and (len(values) > 1 or question_toks[idx - 1] not in ['?', '.']):
tmp_toks = [wordnet_lemmatizer.lemmatize(x) for x in question_toks[idx: end_idx] if x.isalnum() is True]
origin_tmp_toks = [x for x in origin_question_toks[idx: end_idx] if x.isalnum() is True]
assert len(tmp_toks) > 0, print(question_toks[idx: end_idx], values, question_toks, idx, end_idx)
pro_result = get_concept_result(tmp_toks, self.is_a_dict)
if pro_result is None:
pro_result = get_concept_result(tmp_toks, self.related_to_dict)
if pro_result is None:
pro_result = "NONE"
for tmp in origin_tmp_toks:
tok_concol.append([tmp])
type_concol.append([pro_result])
pro_result = "NONE"
idx = end_idx
continue
result = group_digital(question_toks, idx)
if result is True:
tok_concol.append(origin_question_toks[idx: idx + 1])
type_concol.append(["value"])
idx += 1
continue
if question_toks[idx] == ['ha']:
question_toks[idx] = ['have']
tok_concol.append([origin_question_toks[idx]])
type_concol.append(['NONE'])
idx += 1
continue
example['question_arg'] = tok_concol
example['question_arg_type'] = type_concol
example['nltk_pos'] = nltk_result
return example
def link_nl(self, db_id, nl):
if not isinstance(nl, list):
nl = nl.split()
ret = self.link_example({
'db_id': db_id,
'question_toks': nl,
})
return ret
if __name__ == '__main__':
table_path = 'data/datasets/spider/tables.json'
data_path = 'data/datasets/spider/dev.json'
conceptnet_path = 'data/concept_net/conceptnet-assertions-5.6.0.csv'
linker = SchemaLinker(table_path, conceptnet_path)
examples = json.load(open(data_path, 'r', encoding='utf-8'))
example = examples[0]
import copy
new_example = linker.link(copy.copy(example))
pass
|
ContextualSP/interactive_text_to_sql/src/utils/schema_linker.py/0
|
{
"file_path": "ContextualSP/interactive_text_to_sql/src/utils/schema_linker.py",
"repo_id": "ContextualSP",
"token_count": 9130
}
| 238 |
import itertools
import json
import logging
import os.path
from abc import ABCMeta, abstractmethod, abstractproperty
from collections import MutableMapping, Mapping, Sequence, Iterator
from contextlib import contextmanager
from sqlalchemy import MetaData
from sqlalchemy.engine.url import URL
from gtd.io import open_or_create, JSONPicklable
from gtd.utils import ensure_unicode, SimpleExecutor, Failure
from gtd.utils import makedirs
from sqlalchemy import Column, Table
from sqlalchemy import tuple_
from sqlalchemy.engine import Engine, create_engine
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy.sql import select
class Closeable(object, metaclass=ABCMeta):
@abstractmethod
def close(self):
"""Close this object."""
pass
@abstractproperty
def closed(self):
"""A bool indicating whether this object was closed.
Returns:
bool
"""
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def __del__(self):
if not self.closed:
logging.warn('{} was not properly closed.'.format(self))
self.close()
class BatchMapping(Mapping, metaclass=ABCMeta):
"""Like the built-in Mapping class, except subclasses must implement batch versions of get and contains."""
@abstractmethod
def get_batch(self, keys):
"""Get value for each key in keys.
Args:
keys (list): a list of keys
Returns:
list: a list of values with the same order corresponding to the list of keys.
If a given key does not have a value, the corresponding returned value will be a Failure object.
"""
pass
def __getitem__(self, key):
"""Get value for key."""
val = self.get_batch([key])[0]
if isinstance(val, Failure):
raise KeyError(key)
return val
@abstractmethod
def contains_batch(self, keys):
"""Check for the presence of each key in keys.
Args:
keys (list): a list of keys
Returns:
list[bool]: a list of booleans with the same order corresponding to the list of keys, indicating
whether each key is present in the BatchMapping.
"""
pass
def __contains__(self, key):
"""Check if key is in the mapping."""
return self.contains_batch([key])[0]
class BatchMutableMapping(MutableMapping, BatchMapping, metaclass=ABCMeta):
"""Like the built-in MutableMapping, except subclasses must implement batch versions of setitem and delitem."""
@abstractmethod
def set_batch(self, key_val_pairs):
pass
def __setitem__(self, key, value):
self.set_batch([(key, value)])
@abstractmethod
def del_batch(self, keys):
pass
def __delitem__(self, key):
self.del_batch([key])
class SimpleBatchMapping(BatchMutableMapping):
def __init__(self, d=None):
if d is None:
d = {}
self._d = d
def get_batch(self, keys):
f = Failure.silent("Could not get key.")
return [self._d.get(k, f) for k in keys]
def contains_batch(self, keys):
return [k in self._d for k in keys]
def set_batch(self, key_val_pairs):
for k, v in key_val_pairs:
self._d[k] = v
def del_batch(self, keys):
for k in keys:
del self._d[k]
def __iter__(self):
return iter(self._d)
def __len__(self):
return len(self._d)
class CacheWrapperMixin(object):
def _set_cache(self, cache):
self._cache = cache
@property
def cache(self):
return self._cache
def __iter__(self):
return iter(self.cache)
def __len__(self):
return len(self.cache)
def iteritems(self):
return iter(self.cache.items())
def iterkeys(self):
return iter(self.cache.keys())
def itervalues(self):
return iter(self.cache.values())
def keys(self):
return list(self.cache.keys())
def items(self):
return list(self.cache.items())
def values(self):
return list(self.cache.values())
class LazyMapping(CacheWrapperMixin, BatchMapping):
def __init__(self, cache):
"""Create a LazyMapping.
Args:
cache (BatchMutableMapping)
"""
self._set_cache(cache)
def contains_batch(self, keys):
"""Determine whether each key in the batch is already present in the cache.
Args:
keys (list): a list of keys
Returns:
list[bool]: a list of booleans, indicating whether each key is present in the cache
"""
return self.cache.contains_batch(keys)
@abstractmethod
def compute_batch(self, keys):
"""Compute the values for a batch of keys.
Args:
keys (list): a list of keys
Returns:
list: a list of values with the same order corresponding to the list of keys.
If a given key does not have a value, the corresponding returned value will be a Failure object.
"""
pass
def compute(self, key):
"""Compute the value for a single key.
Args:
key
Returns:
val
"""
return self.compute_batch([key])[0]
def ensure_batch(self, keys, computed_list=False):
"""Ensure that the given keys are present in the cache.
If a key is not present, its entry will be computed.
Args:
keys (list): a list of keys
computed_list (bool): defaults to False. See Returns description.
Returns:
if computed_list:
list(bool): a list of booleans indicating which keys were freshly computed (may include failed computations)
else:
int: the number of keys which were freshly computed
"""
presence = self.cache.contains_batch(keys)
to_compute = [key for key, present in zip(keys, presence) if not present]
computed = self.compute_batch(to_compute)
updates = []
for key, val in zip(to_compute, computed):
if not isinstance(val, Failure):
updates.append((key, val))
self.cache.set_batch(updates)
if computed_list:
return [not p for p in presence]
return len([p for p in presence if not p])
def get_batch(self, keys, compute=True):
"""Get value for each key in keys.
Args:
keys (list): a list of keys
compute (bool): if a key is missing from the cache, compute it. When disabled, just returns Failure
objects for missing keys.
Returns:
list: a list of values with the same order corresponding to the list of keys.
If a given key's value cannot be computed, the corresponding returned value will be a Failure object.
"""
if compute:
self.ensure_batch(keys)
return self.cache.get_batch(keys)
@staticmethod
def compute_batch_parallel(fxn, keys):
"""Execute a function in parallel on the entire batch of keys, using a multi-threaded executor.
This is a helper function which subclasses of LazyDict can use to implement `compute_batch`.
Note that speedups will only be obtained if compute is IO bound, due to Python's GIL.
Args:
fxn (Callable): function to be called in parallel
keys (list): a list of keys
Returns:
list: result is equivalent to [fxn(key) for key in keys]
"""
no_result_failure = Failure.silent('No result returned by SimpleExecutor.')
results = [no_result_failure] * len(keys)
with SimpleExecutor(fxn) as ex:
for i, key in enumerate(keys):
ex.submit(i, key)
for i, val in ex.results():
results[i] = val
for result in results:
assert result != no_result_failure
return results
class EagerMapping(CacheWrapperMixin, BatchMapping):
def __init__(self, cache):
self._set_cache(cache)
if len(cache) == 0:
self.populate(cache)
@abstractmethod
def populate(self, cache):
pass
def get_batch(self, keys):
return self.cache.get_batch(keys)
def contains_batch(self, keys):
return self.cache.contains_batch(keys)
class EagerSequence(Sequence):
def __init__(self, cache):
self._cache = cache
if len(self.cache) == 0:
self.populate(self.cache)
@property
def cache(self):
return self._cache
@abstractmethod
def populate(self, cache):
pass
def __getitem__(self, key):
return self.cache[key]
def __iter__(self):
return iter(self.cache)
def __len__(self):
return len(self.cache)
def sqlalchemy_metadata(host, port, database, username, password):
url = URL(drivername='postgresql+psycopg2', username=username,
password=password, host=host, port=port, database=database)
engine = create_engine(url, server_side_cursors=True, connect_args={'connect_timeout': 4})
# ensure that we can connect
with engine.begin():
pass # this will throw OperationalError if it fails
return MetaData(engine)
class ORM(object, metaclass=ABCMeta):
def __init__(self, columns):
assert isinstance(columns, list)
for c in columns:
assert isinstance(c, ORMColumn)
self._columns = columns
@property
def columns(self):
"""Return a list of ORMColumns."""
return self._columns
@abstractmethod
def to_row(self, value):
"""Convert object into database row.
Args:
value (object)
Returns:
dict[Column, object]
"""
pass
@abstractmethod
def from_row(self, row):
"""Convert row back into object.
Args:
dict[Column, object]
Returns:
object
"""
pass
def bind(self, table):
for orm_col in self.columns:
orm_col.bind(table)
class SimpleORM(ORM):
def __init__(self, column):
self._col = column
super(SimpleORM, self).__init__([column])
def to_row(self, value):
return {self._col.key: value}
def from_row(self, row):
return row[self._col.key]
class CustomORM(ORM):
def __init__(self, columns, to_row, from_row):
self._to_row = to_row
self._from_row = from_row
super(CustomORM, self).__init__(columns)
def to_row(self, value):
return self._to_row(value)
def from_row(self, row):
return self._from_row(row)
class ORMColumn(object):
"""Wraps a SQLAlchemy Column object."""
def __init__(self, *args, **kwargs):
self._rebuild(args, kwargs)
def _rebuild(self, args, kwargs):
if self.bound:
raise RuntimeError('Cannot rebuild ORMColumn if it is already bound.')
self._unbound_column = Column(*args, **kwargs)
self._args = args
self._kwargs = kwargs
@property
def unbound_column(self):
return self._unbound_column
@property
def name(self):
return self.unbound_column.name
def extend(self, *args, **kwargs):
new_args = self._args + args
new_kwargs = dict(self._kwargs)
new_kwargs.update(kwargs)
self._rebuild(new_args, new_kwargs)
@property
def bound(self):
return hasattr(self, '_column')
def bind(self, table):
col_names = [c.name for c in table.columns]
if len(col_names) != len(set(col_names)):
raise ValueError('Can only bind to table with unique column names.')
self._column = table.c[self.name]
@property
def column(self):
"""Return SQLAlchemy Column object."""
if self.bound:
return self._column
else:
raise RuntimeError("Need to bind ORMColumn to a Table.")
@property
def key(self):
"""Used to select this column from a SQLAlchemy RowProxy."""
return self.column
class TableMapping(BatchMutableMapping):
def __init__(self, name, key_orm, val_orm, metadata, engine=None):
if engine is None:
engine = metadata.bind
assert isinstance(engine, Engine)
# mark columns as primary keys
for c in key_orm.columns:
c.extend(primary_key=True)
# Convert ORMColumns into SQLAlchemy Columns to construct Table
orm_cols = key_orm.columns + val_orm.columns
table_cols = [orm_col.unbound_column for orm_col in orm_cols]
# avoid overlapping column names
col_names = [col.name for col in table_cols]
if len(col_names) != len(set(col_names)):
raise ValueError("Column names must be unique.")
try:
# If table is already defined in metadata, return it.
# It is possible for the table to be defined in metadata, but not exist in database.
# (e.g. if metadata.drop_all() was called)
# If not, use reflection to get information about the table from the database, and return it.
# If table isn't in database, raise NoSuchTableError.
table = Table(name, metadata, autoload=True)
except NoSuchTableError:
# Define the table.
table = Table(name, metadata, *table_cols)
# If table does not exist in database, create it.
metadata.create_all()
# make sure we only get the columns we expected
if set([c.name for c in table.columns]) != set(col_names):
raise ValueError("ORM column names must match table column names exactly.")
# ORMs must have a reference to the Table's Column objects.
key_orm.bind(table)
val_orm.bind(table)
self._key_orm = key_orm
self._val_orm = val_orm
self._table = table
self._engine = engine
@property
def _key_cols(self):
"""Return a list of Columns (not ORMColumns)."""
return [orm_column.column for orm_column in self._key_orm.columns]
@property
def _val_cols(self):
"""Return a list of Columns (not ORMColumns)."""
return [orm_column.column for orm_column in self._val_orm.columns]
@contextmanager
def _transaction(self):
with self._engine.begin() as conn:
yield conn
# connection automatically closed after transaction
assert conn.closed
@property
def table(self):
return self._table
def _key_conditions(self, keys):
vals = []
for key in keys:
row = self._key_orm.to_row(key)
val = tuple(row[c] for c in self._key_cols)
vals.append(val)
return tuple_(*self._key_cols).in_(vals)
def contains_batch(self, keys):
if len(keys) == 0: return []
# select all rows matching any of the keys
condition = self._key_conditions(keys)
cmd = select(self._key_cols).where(condition)
# get the set of keys found
with self._transaction() as conn:
result = conn.execute(cmd)
present_keys = set(self._key_orm.from_row(row) for row in result)
return [key in present_keys for key in keys]
def get_batch(self, keys):
if len(keys) == 0: return []
key_to_index = {k: i for i, k in enumerate(keys)}
condition = self._key_conditions(keys)
cmd = select([self.table]).where(condition)
with self._transaction() as conn:
results = conn.execute(cmd)
no_result_failure = Failure.silent('No result returned from TableDict.')
vals = [no_result_failure] * len(keys)
for row in results:
key = self._key_orm.from_row(row)
val = self._val_orm.from_row(row)
index = key_to_index[key]
vals[index] = val
return vals
def _kv_to_row(self, key, val, string_cols=False):
row = self._key_orm.to_row(key)
row.update(self._val_orm.to_row(val))
if string_cols:
row = {col.name: v for col, v in row.items()}
return row
def del_batch(self, keys):
if len(keys) == 0: return
condition = self._key_conditions(keys)
cmd = self.table.delete().where(condition)
with self._transaction() as conn:
result = conn.execute(cmd)
if result.rowcount == 0:
raise KeyError(keys) # rollback
def __iter__(self):
with self._transaction() as conn:
for row in conn.execute(select(self._key_cols)):
yield self._key_orm.from_row(row)
def __len__(self):
cmd = self.table.count()
with self._transaction() as conn:
return conn.execute(cmd).scalar()
def set_batch(self, key_val_pairs):
if len(key_val_pairs) == 0: return
keys, vals = list(zip(*key_val_pairs))
# make sure keys are unique
assert len(keys) == len(set(keys))
present_keys = []
for key, present in zip(keys, self.contains_batch(keys)):
if present:
present_keys.append(key)
rows = []
for k, v in key_val_pairs:
row = self._kv_to_row(k, v, string_cols=True)
rows.append(row)
with self._transaction() as conn:
self.del_batch(present_keys) # delete rows that are already present
conn.execute(self.table.insert(), rows) # insert new rows
def iteritems(self):
with self._transaction() as conn:
for row in conn.execute(select([self.table])):
key = self._key_orm.from_row(row)
val = self._val_orm.from_row(row)
yield (key, val)
def iterkeys(self):
return iter(self)
def itervalues(self):
for _, val in self.items():
yield val
def keys(self):
return list(self.keys())
def items(self):
return list(self.items())
def values(self):
return list(self.values())
class FileMapping(MutableMapping, Closeable):
def __init__(self, path):
self._path = path
self._f = open_or_create(self._path, 'r+')
s = self._f.read()
if len(s) == 0:
self._d = {}
else:
self._d = json.loads(s)
def close(self):
self._f.close()
@property
def closed(self):
return self._f.closed
def __repr__(self):
return 'FileMapping at {}'.format(self._path)
def _dump(self):
f = self._f
f.seek(0)
f.truncate()
json.dump(self._d, f)
f.flush()
def __setitem__(self, key, value):
self._d[key] = value
self._dump()
def __delitem__(self, key):
del self._d[key]
self._dump()
def __getitem__(self, item):
return self._d[item]
def __len__(self):
return len(self._d)
def __iter__(self):
return iter(self._d)
def __str__(self):
return str(self._d)
def __repr__(self):
return repr(self._d)
class FileSerializer(object):
__class__ = ABCMeta
@abstractmethod
def to_line(self, obj):
"""Return a string that can be written as a SINGLE line in a file (cannot contain newline character)."""
pass
@abstractmethod
def from_line(self, line):
pass
class UnicodeSerializer(FileSerializer):
def to_line(self, obj):
u = ensure_unicode(obj)
return u.encode('utf-8')
def from_line(self, line):
return line.decode('utf-8')
class CustomSerializer(FileSerializer):
def __init__(self, to_line, from_line):
self._to = to_line
self._from = from_line
def to_line(self, obj):
return self._to(obj)
def from_line(self, line):
return self._from(line)
class JSONPicklableSerializer(FileSerializer):
def to_line(self, obj):
return obj.to_json_str()
def from_line(self, line):
return JSONPicklable.from_json_str(line)
class AppendableSequence(Sequence):
__class__ = ABCMeta
@abstractmethod
def append(self, item):
pass
def extend(self, items):
for item in items:
self.append(item)
class SimpleAppendableSequence(AppendableSequence, Closeable):
def __init__(self, l=None):
if l is None:
l = []
self._l = l
self._closed = False
def __getitem__(self, item):
if isinstance(item, slice):
return SequenceSlice(self, item)
return self._l[item]
def __len__(self):
return len(self._l)
def append(self, item):
self._l.append(item)
def close(self):
self._closed = True
@property
def closed(self):
return self._closed
class FileSequenceOffsets(Sequence, Closeable):
def __init__(self, file_seq):
offsets_path = file_seq.path + '.offsets'
file_existed = os.path.isfile(offsets_path) # check if file already existed
self._f_write = open_or_create(offsets_path, 'a') # open for appending only
if file_existed:
# load offsets from file into memory
with open(offsets_path, 'r') as f:
self._offsets = [int(line) for line in f] # int cast strips newline automatically
else:
# build offsets (in-memory and on-file)
self._offsets = []
current_offset = 0
for line in file_seq.iter_raw_lines():
self.append(current_offset)
current_offset += len(line)
self._offsets_path = offsets_path
def close(self):
self._f_write.close()
@property
def closed(self):
return self._f_write.closed
def __repr__(self):
return 'FileSequenceOffsets at {}'.format(self._offsets_path)
def __getitem__(self, i):
return self._offsets[i]
def __len__(self):
return len(self._offsets)
def append(self, i):
self.extend([i])
def extend(self, i_list):
self._offsets.extend(i_list)
f = self._f_write
for i in i_list:
f.write(str(i))
f.write('\n')
f.flush()
class FileSequenceMetaData(Closeable):
"""Stores FileSequence properties in a JSON file."""
def __init__(self, file_seq):
"""Store metadata about a FileSequence.
Args:
file_seq (FileSequence)
"""
meta_path = file_seq.path + '.meta'
file_existed = os.path.isfile(meta_path) # check if file already exists
self._d = FileMapping(meta_path) # initialize underlying dict
if not file_existed:
self.length = len(file_seq) # record length
def close(self):
self._d.close()
@property
def closed(self):
return self._d.closed
@property
def length(self):
try:
return self._d['length']
except KeyError:
raise AttributeError()
@length.setter
def length(self, val):
self._d['length'] = val
def __str__(self):
return str(self._d)
def __repr__(self):
return repr(self._d)
class FileSequence(AppendableSequence, Closeable):
"""Sequence backed by a file."""
def __init__(self, path, serializer=None):
if serializer is None:
serializer = UnicodeSerializer() # by default, just write to file as utf-8 encoded strings
self._path = path
self._ser = serializer
# open or create the corresponding file
self._f_read = open_or_create(path, 'r') # for reading only
self._f_write = open_or_create(path, 'a') # for appending. Stream positioned at end of file.
# create metadata
self._offsets = FileSequenceOffsets(self) # note: this must come before metadata
self._meta = FileSequenceMetaData(self)
def close(self):
self._meta.close()
self._offsets.close()
self._f_write.close()
self._f_read.close()
@property
def closed(self):
return self._meta.closed and self._offsets.closed and self._f_write.closed and self._f_read.closed
def __repr__(self):
return 'FileSequence at {}'.format(self._path)
@property
def path(self):
return self._path
def _strip_newline(self, line):
return line[:-1]
def __getitem__(self, i):
if isinstance(i, slice):
return SequenceSlice(self, i)
f = self._f_read
f.seek(self._offsets[i])
line = f.readline()
line = self._strip_newline(line)
return self._ser.from_line(line)
def __len__(self):
return len(self._offsets)
def append(self, item):
self.extend([item])
def extend(self, items):
f = self._f_write
offsets = []
for item in items:
offset = f.tell()
offsets.append(offset)
line = self._ser.to_line(item)
f.write(line)
f.write('\n')
f.flush()
self._meta.length += len(offsets) # keep metadata up-to-date
self._offsets.extend(offsets)
def iter_raw_lines(self):
for line in self._f_read:
yield line
def __iter__(self):
for line in self.iter_raw_lines():
line = self._strip_newline(line)
yield self._ser.from_line(line)
class SimpleFileSequence(FileSequence):
def __init__(self, path):
ser = UnicodeSerializer()
super(SimpleFileSequence, self).__init__(path, ser)
class Shard(FileSequence):
"""A FileSequence serving as a Shard in a ShardedSequence."""
@classmethod
def open(cls, directory, index, max_length, serializer):
path = cls.shard_path(directory, index)
if not os.path.isfile(path):
raise IOError('No such shard: {}'.format(path))
return Shard(directory, index, max_length, serializer)
@classmethod
def shard_path(cls, directory, index):
return os.path.join(directory, '{}.shard'.format(index))
def __init__(self, directory, index, max_length, serializer):
path = self.shard_path(directory, index)
self._index = index
self._max_length = max_length
super(Shard, self).__init__(path, serializer)
assert len(self) <= self._max_length
@property
def index(self):
return self._index
@property
def max_length(self):
return self._max_length
@property
def remaining_space(self):
return self.max_length - len(self)
class ShardedSequence(AppendableSequence, Closeable):
def __init__(self, directory, shard_size, serializer):
self._directory = directory
self._shard_size = shard_size
self._serializer = serializer
# create directory if it does not exist
makedirs(directory)
# identify shards in the directory
self._shards = []
for k in itertools.count():
try:
shard = Shard.open(directory, k, self._shard_size, serializer)
self._shards.append(shard)
except IOError:
break
# create one shard if there are none
if len(self._shards) == 0:
self.add_shard()
# all shards except the last should match the shard size
for i, shard in enumerate(self._shards):
l = len(shard)
if i == len(self._shards) - 1: # final shard
assert l <= self._shard_size
else:
assert l == self._shard_size
def __repr__(self):
return 'ShardedSequence at {}'.format(self._directory)
def close(self):
for shard in self._shards:
shard.close()
@property
def closed(self):
for shard in self._shards:
if not shard.closed:
return False
return True
@property
def shard_size(self):
return self._shard_size
@property
def directory(self):
return self._directory
def __len__(self):
return sum(len(s) for s in self._shards)
def __getitem__(self, i):
if isinstance(i, slice):
return SequenceSlice(self, i)
index = i // self.shard_size
shard_index = i % self.shard_size
try:
shard = self._shards[index]
return shard[shard_index]
except IndexError:
raise IndexError('{} exceeds max index of ShardedSequence.'.format(i))
def add_shard(self):
index = len(self._shards)
shard = Shard(self.directory, index, self.shard_size, self._serializer)
self._shards.append(shard)
return shard
def appendable_shard(self):
"""Return the shard that we can append to.
If the last existing shard is full, create a new shard and return that.
Returns:
Shard
"""
last_shard = self._shards[-1]
if last_shard.remaining_space == 0:
last_shard = self.add_shard()
return last_shard
def append(self, item):
self.extend([item])
def extend(self, items):
iter_items = iter(items)
def get_batch(k):
"""Get up to k more elements from items."""
results = []
for _ in range(k):
try:
results.append(next(iter_items))
except StopIteration:
break
return results
# keep filling shards until we can't fill them anymore
while True:
shard = self.appendable_shard()
requested = shard.remaining_space
batch = get_batch(requested)
shard.extend(batch)
if len(batch) < requested:
break
def __iter__(self):
return itertools.chain(*self._shards)
class BatchIterator(Iterator, metaclass=ABCMeta):
def __init__(self, default_batch_size=20):
self._default_batch_size = default_batch_size
@abstractmethod
def next_batch(self, k):
"""Get next batch of elements from iterator.
Get k more elements from the iterator. If there are less than k elements remaining,
return whatever remains.
Raise StopIteration if and only if there are 0 more elements to yield.
Args:
k (int): number of elements to yield
Returns:
list: batch of elements
"""
pass
def __next__(self):
try:
return next(self._latest_batch)
except (AttributeError, StopIteration):
self._latest_batch = iter(self.next_batch(self._default_batch_size))
return next(self._latest_batch)
class LazyIterator(BatchIterator, metaclass=ABCMeta):
def __init__(self, cache, default_batch_size=100):
"""Create a CacheIterator.
Args:
cache (AppendableSequence): an appendable sequence
"""
self._iterated = 0
self._cache = cache
super(LazyIterator, self).__init__(default_batch_size=default_batch_size)
@property
def iterated(self):
"""Number of elements produced by this iterator so far."""
return self._iterated
@property
def cache(self):
return self._cache
@abstractmethod
def compute_batch(self, k):
"""Compute the next k items for the iterator.
This should be a function of self.iterated, self.cache and k.
Besides these 3 variables, it should NOT rely on any state accumulated from previous iterations of the iterator.
Args:
k (int)
Returns:
A list of up to k items. If there aren't k more items to compute, just return whatever there
is to compute.
"""
pass
@property
def num_computed(self):
return len(self.cache)
def _ensure_batch(self, k):
"""Ensure that the cache has the next k items.
If there aren't k more items to add, just add whatever can be added.
Returns:
the number of freshly computed new items
"""
missing = (self.iterated + k) - len(self.cache)
if missing <= 0:
return 0 # cache already has everything we need
batch = self.compute_batch(k)
new_items = batch[k - missing:]
self.cache.extend(new_items)
return len(new_items)
def next_batch(self, k):
self._ensure_batch(k)
cache_excess = len(self.cache) - self.iterated
num_to_yield = min(cache_excess, k) # sometimes the cache doesn't have k more
if num_to_yield == 0:
raise StopIteration # no more elements
i = self._iterated
batch = list(self.cache[i:i + num_to_yield])
self._iterated += num_to_yield
return batch
def advance_to(self, index):
"""Advance the iterator to the specified index.
Args:
index (int): the next item yielded by the iterator will be iterator[index]
"""
if index > len(self.cache):
raise IndexError('Cache has not been computed up to index {} yet.'.format(index))
self._iterated = index
def ensure_to(self, index, batch_size):
"""Ensure that every value up to (but not including) index has been computed.
Args:
index (int)
batch_size (int): size of the batches used to compute missing values.
"""
while True:
n = self.num_computed
if n >= index:
break
self.advance_to(n)
self._ensure_batch(batch_size)
class SequenceSlice(Sequence):
def __init__(self, seq, slice):
self._seq = seq
start, stop, step = slice.start, slice.stop, slice.step
if start is None:
start = 0
if stop is None:
stop = len(seq)
if step is None:
step = 1
for val in (start, stop, step):
if val < 0:
raise ValueError("Slice values must be non-negative.")
self.start, self.stop, self.step = start, stop, step
def __getitem__(self, i):
if i < 0: # allow for negative indexing
if i < -len(self): # only allow negatives in the appropriate range
raise IndexError()
i = i % len(self) # convert to positive index
idx = self.start + self.step * i
if idx >= self.stop:
raise IndexError()
return self._seq[idx]
def __len__(self):
diff = self.stop - self.start
num_items = diff / self.step # integer division rounds down
remainder = diff % self.step
if remainder > 0:
num_items += 1
return num_items
|
ContextualSP/lemon/executor/gtd/persist.py/0
|
{
"file_path": "ContextualSP/lemon/executor/gtd/persist.py",
"repo_id": "ContextualSP",
"token_count": 15505
}
| 239 |
from unittest import TestCase
from os.path import join
import pytest
from gtd.text import PhraseMatcher
from gtd.utils import FileMemoized, SimpleExecutor, as_batches, Failure, NestedDict, EqualityMixinSlots, \
memoize_with_key_fxn, DictMemoized
def test_as_batches():
items = [0, 1, 2, 3, 4, 5, 6]
assert list(as_batches(items, 2)) == [[0, 1], [2, 3], [4, 5], [6]]
def test_file_memoized_represent_args(tmpdir):
path = str(tmpdir.join('fxn'))
fm = FileMemoized(None, path, None, None)
key = fm._cache_key(['a', 'b'], {'c': 2, 'd': 'e'})
assert key == join(path, 'a_b_c=2_d=e.txt')
key = fm._cache_key([], {'c': 2, 'd': 'e'})
assert key == join(path, 'c=2_d=e.txt')
key = fm._cache_key([], dict())
assert key == join(path, 'NO_KEY.txt')
class TestUtils(TestCase):
def test_phrase_matcher(self):
phrases = [[1, 2, 3], [1, ], [2, ], [2, 4]]
not_phrases = [[1, 2], [4, ]]
pm = PhraseMatcher(phrases)
for phrase in phrases:
self.assertTrue(pm.has_phrase(phrase))
for phrase in not_phrases:
self.assertFalse(pm.has_phrase(phrase))
tokens = [1, 2, 1, 2, 3, 2, 3, 2, 4]
matches = pm.match(tokens)
correct = [((1,), 0, 1),
((2,), 1, 2),
((1,), 2, 3),
((2,), 3, 4),
((1, 2, 3), 2, 5),
((2,), 5, 6),
((2,), 7, 8),
((2, 4), 7, 9)]
self.assertEqual(matches, correct)
class TestSimpleExecutor(object):
def test_context_manager(self):
fxn = lambda x: 2 * x
with SimpleExecutor(fxn, max_workers=2) as ex:
for i, x in enumerate(range(10)):
ex.submit(i, x)
results = {k: v for k, v in ex.results()}
correct = {k: 2 * k for k in range(10)}
assert results == correct
class TestFailure(object):
def test_eq(self):
f0 = Failure()
f1 = Failure()
f2 = Failure(uid=1)
f3 = Failure(uid=1, message='different message')
assert f0 != f1 # different id
assert f1 != f2 # different id
assert f2 == f3 # same id
class TestNestedDict(object):
@pytest.fixture
def normal_dict(self):
return {
'a': 1,
'b': {
'c': 2,
'd': 3,
},
}
@pytest.fixture
def nested_dict(self, normal_dict):
return NestedDict(normal_dict)
def test_as_dict(self, nested_dict, normal_dict):
assert nested_dict.as_dict() == normal_dict
def test_iter(self, nested_dict):
assert set(nested_dict) == {'a', 'b'}
def test_len(self, nested_dict):
assert len(nested_dict) == 3
def test_nested(self):
d = NestedDict()
d.set_nested(('a', 'b', 'c'), 1)
d.set_nested(('a', 'd'), 2)
assert d.as_dict() == {
'a': {
'b': {
'c': 1
},
'd': 2,
}
}
assert d.get_nested(('a', 'd')) == 2
with pytest.raises(KeyError):
d.get_nested(('a', 'd', 'e'))
def test_leaves(self, nested_dict):
assert set(nested_dict.leaves()) == {1, 2, 3}
class DummySlotsObject(EqualityMixinSlots):
__slots__ = ['a', 'b', 'c']
def __init__(self, a, b, c=None):
self.a = a
self.b = b
if c:
self.c = c
class TestEqualityMixinSlot(object):
def test_equality(self):
d1 = DummySlotsObject(5, 10)
d2 = DummySlotsObject(5, 10)
assert d1 == d2
d3 = DummySlotsObject(5, 10, 20)
d4 = DummySlotsObject(5, 11)
assert d1 != d3
assert d1 != d4
class MemoizedClass(object):
def __init__(self):
self.calls = 0
@memoize_with_key_fxn(lambda self, a, b: b) # key fxn only uses b
def fxn_to_memoize(self, a, b):
self.calls += 1
return a + b
class MemoizedClass2(object):
def __init__(self):
self.calls = 0
def fxn(self, a, b):
self.calls += 1
return a + b
fxn_memoized = DictMemoized(fxn)
class TestDictMemoized(object):
def test(self):
mc = MemoizedClass2()
result = mc.fxn_memoized('a', 'b')
assert result == 'ab'
assert mc.calls == 1
result2 = mc.fxn_memoized('a', 'b')
assert result2 == 'ab'
assert mc.calls == 1
result2 = mc.fxn_memoized('b', 'b')
assert result2 == 'bb'
assert mc.calls == 2
class TestMemoizeWithKey(object):
def test_caching(self):
mc = MemoizedClass()
result = mc.fxn_to_memoize('hey', 'there')
assert mc.calls == 1
assert result == 'heythere'
# returns cached result
result2 = mc.fxn_to_memoize('hey', 'there')
assert result2 == 'heythere'
assert mc.calls == 1
# computes new result
result3 = mc.fxn_to_memoize('hey', 'what')
assert mc.calls == 2
# only caches on 2nd arg, 'there', not 'you'
result4 = mc.fxn_to_memoize('you', 'there')
assert result4 == 'heythere'
assert mc.calls == 2
|
ContextualSP/lemon/executor/gtd/tests/test_utils.py/0
|
{
"file_path": "ContextualSP/lemon/executor/gtd/tests/test_utils.py",
"repo_id": "ContextualSP",
"token_count": 2716
}
| 240 |
import logging
import sys
from abc import abstractproperty, ABCMeta
import numpy as np
import tensorflow as tf
from keras.layers import Dense
from numpy.testing import assert_array_almost_equal
from gtd.chrono import verboserate
from gtd.ml.framework import Feedable, Optimizable
from gtd.ml.model import Embedder, MeanSequenceEmbedder, ConcatSequenceEmbedder, \
CandidateScorer, SoftCopyScorer, Scorer, \
Attention, BidiLSTMSequenceEmbedder, TokenEmbedder
from gtd.ml.seq_batch import SequenceBatch, FeedSequenceBatch, reduce_mean, embed
from gtd.ml.utils import expand_dims_for_broadcast, gather_2d
from gtd.utils import DictMemoized
from strongsup.embeddings import Vocabs, ContextualPredicate
from strongsup.example import DelexicalizedUtterance
from strongsup.parse_case import PrettyCaseEmbedding
from strongsup.rlong.state import RLongObject
from strongsup.tables.predicate import WikiTablePredicate, WikiTablePredicateType
from strongsup.utils import get_optimizer
################################
# Embedders
class DelexicalizedDynamicPredicateEmbedder(Embedder):
def __init__(self, rnn_states, type_embedder, name='DelexicalizedDynamicPredicateEmbedder'):
"""Construct DelexicalizedDynamicPredicateEmbedder.
Args:
rnn_states (SequenceBatch): of shape (num_contexts, seq_length, rnn_state_dim)
type_embedder (TokenEmbedder)
name (str)
"""
self._type_embedder = type_embedder
with tf.name_scope(name):
# column indices of rnn_states (indexes time)
self._col_indices = FeedSequenceBatch() # (num_predicates, max_predicate_mentions)
# row indices of rnn_states (indexes utterance)
self._row_indices = tf.placeholder(dtype=tf.int32, shape=[None]) # (num_predicates,)
row_indices_expanded = expand_dims_for_broadcast(self._row_indices, self._col_indices.values)
# (num_predicates, max_predicate_mentions, rnn_state_dim)
rnn_states_selected = SequenceBatch(
gather_2d(rnn_states.values, row_indices_expanded, self._col_indices.values),
self._col_indices.mask)
# (num_predicates, rnn_state_dim)
rnn_embeds = reduce_mean(rnn_states_selected, allow_empty=True)
rnn_embeds = tf.verify_tensor_all_finite(rnn_embeds, "RNN-state-based embeddings")
self._type_seq_embedder = MeanSequenceEmbedder(type_embedder.embeds, name='TypeEmbedder')
self._embeds = tf.concat(1, [rnn_embeds, self._type_seq_embedder.embeds])
def inputs_to_feed_dict(self, vocabs):
"""Feed.
Args:
vocabs (Vocabs)
Returns:
dict
"""
utterance_vocab = vocabs.utterances
pred_types = []
row_indices = []
col_indices = []
for contextual_pred in vocabs.dynamic_preds.tokens:
pred = contextual_pred.predicate
utterance = contextual_pred.utterance
pred_types.append(list(pred.types))
if utterance is None:
utterance_idx = 0
positions = []
else:
# an int corresponding to a row index of rnn_states
utterance_idx = utterance_vocab.word2index(utterance)
try:
# the token offsets of where the predicate is mentioned in the delexicalized utterance
positions = utterance.placeholder_positions[pred]
except KeyError:
# predicate doesn't appear in utterance
positions = []
row_indices.append(utterance_idx)
col_indices.append(positions)
feed = {}
feed[self._row_indices] = row_indices
feed.update(self._col_indices.inputs_to_feed_dict(col_indices))
feed.update(self._type_seq_embedder.inputs_to_feed_dict(pred_types, self._type_embedder.vocab))
return feed
@property
def embeds(self):
return self._embeds
class DynamicPredicateEmbedder(Embedder):
def __init__(self, word_embedder, type_embedder, name='DynamicPredicateEmbedder'):
"""PredicateEmbedder.
Embed a predicate as the average of its words, and the average of its types.
Args:
word_embedder (TokenEmbedder)
type_embedder (TokenEmbedder)
name (str): name scope for the sub-graph
"""
self._word_embedder = word_embedder
self._type_embedder = type_embedder
with tf.name_scope(name):
self._word_seq_embedder = MeanSequenceEmbedder(word_embedder.embeds, name='WordEmbedder')
self._type_seq_embedder = MeanSequenceEmbedder(type_embedder.embeds, name='TypeEmbedder')
self._embeds = tf.concat(1, [self._word_seq_embedder.embeds, self._type_seq_embedder.embeds])
def inputs_to_feed_dict(self, vocabs):
"""Feed.
Args:
vocabs (Vocabs)
Returns:
dict
"""
predicates = vocabs.dynamic_preds.tokens
pred_words = [contextual_pred.predicate.words for contextual_pred in predicates]
pred_types = [list(contextual_pred.predicate.types) for contextual_pred in predicates]
feed = {}
feed_words = self._word_seq_embedder.inputs_to_feed_dict(pred_words, self._word_embedder.vocab)
feed_types = self._type_seq_embedder.inputs_to_feed_dict(pred_types, self._type_embedder.vocab)
feed.update(feed_words)
feed.update(feed_types)
return feed
@property
def embeds(self):
return self._embeds
class PositionalPredicateEmbedder(Embedder):
def __init__(self, pred_embedder, name='PositionalPredicateEmbedder'):
"""Embed predicates using positional information.
Args:
pred_embedder (DynamicPredicateEmbedder): a dynamic predicate embedder, with no positional information
name (str): name scope
"""
with tf.name_scope(name):
nbr_embedder = MeanSequenceEmbedder(pred_embedder.embeds,
allow_empty=True) # average of a predicate's neighbors
self._embeds = tf.concat(1, [pred_embedder.embeds, nbr_embedder.embeds])
self._nbr_embedder = nbr_embedder
self._pred_embedder = pred_embedder
@property
def embeds(self):
return self._embeds
def _column_values(self, contextual_pred):
pred = contextual_pred.predicate
utterance = contextual_pred.utterance
context = utterance.context
pred_str = pred.name
graph = context.world.graph
ent_strs = list(graph.reversed_join(pred_str, graph.all_rows))
return [ContextualPredicate(WikiTablePredicate(s), utterance) for s in ent_strs]
def inputs_to_feed_dict(self, vocabs):
"""Feed.
Args:
vocabs (Vocabs)
Returns:
dict
"""
dynamic_vocab = vocabs.dynamic_preds
feed = {}
feed.update(self._pred_embedder.inputs_to_feed_dict(vocabs))
neighbors = []
for contextual_pred in dynamic_vocab.tokens:
if WikiTablePredicateType.is_relation(contextual_pred.predicate):
nbrs = self._column_values(contextual_pred) # a list of entity predicates
else:
nbrs = []
neighbors.append(nbrs)
feed.update(self._nbr_embedder.inputs_to_feed_dict(neighbors, dynamic_vocab))
return feed
class CombinedPredicateEmbedder(Embedder):
"""Concatenates embeddings for static and dynamic predicates
- static predicates: argmax, join, count, etc.
- dynamic predicates: e.g. united_states, nation, num_gold_medals
"""
def __init__(self, static_pred_embedder, dyn_pred_embedder):
"""Construct full predicate embedding model.
Args:
static_pred_embedder (TokenEmbedder): embeds for static predicates
dyn_pred_embedder (DynamicPredicateEmbedder): embedder for dynamic predicates
"""
with tf.name_scope('PredicateEmbedder'):
self._embeds = tf.concat(0, [static_pred_embedder.embeds, dyn_pred_embedder.embeds])
assert isinstance(static_pred_embedder, TokenEmbedder)
self._static_pred_embedder = static_pred_embedder
self._dyn_pred_embedder = dyn_pred_embedder
@property
def embeds(self):
return self._embeds # (vocab_size, embed_dim)
def inputs_to_feed_dict(self, vocabs):
# TODO(kelvin): these assert calls are slow
assert vocabs.all_preds.tokens == vocabs.static_preds.tokens + vocabs.dynamic_preds.tokens
assert vocabs.static_preds.tokens == self._static_pred_embedder.vocab.tokens
return self._dyn_pred_embedder.inputs_to_feed_dict(vocabs)
# WARNINGS:
# - The caching is only efficient if the same SETS of utterances are encountered across different mini-batches
# - The caching is only correct if the set of predicates only depends on the set of utterances in a mini-batch,
# and not the specific ParseCases.
inputs_to_feed_dict_cached = DictMemoized(inputs_to_feed_dict)
class UtteranceEmbedder(Embedder):
def __init__(self, word_embedder, lstm_dim, utterance_length):
with tf.name_scope('UtteranceEmbedder'):
self._word_vocab = word_embedder.vocab
# A simpler embedder which is order-blind
# self._seq_embedder = MeanSequenceEmbedder(word_embedder.embeds, seq_length=utterance_length)
# self._seq_embedder.hidden_states = self._seq_embedder._embedded_sequence_batch
self._seq_embedder = BidiLSTMSequenceEmbedder(word_embedder.embeds, seq_length=utterance_length, hidden_size=lstm_dim)
self._gather_indices = tf.placeholder(tf.int32, shape=[None], name='gather_indices')
self._gathered_embeds = tf.gather(self._seq_embedder.embeds, self._gather_indices)
hidden_states = self._seq_embedder.hidden_states
self._hidden_states_by_utterance = hidden_states
self._gathered_hidden_states = SequenceBatch(tf.gather(hidden_states.values, self._gather_indices),
tf.gather(hidden_states.mask, self._gather_indices))
@property
def hidden_states(self):
"""A SequenceBatch."""
return self._gathered_hidden_states
@property
def hidden_states_by_utterance(self):
return self._hidden_states_by_utterance
def inputs_to_feed_dict(self, cases, utterance_vocab):
# Optimization: Multiple cases have the same context (same utterance)
gather_indices = []
for case in cases:
gather_indices.append(utterance_vocab.word2index(case.current_utterance))
feed = self._seq_embedder.inputs_to_feed_dict(utterance_vocab.tokens, self._word_vocab)
feed[self._gather_indices] = gather_indices
return feed
@property
def embeds(self):
# return self._seq_embedder.embeds
return self._gathered_embeds
class HistoryEmbedder(Embedder):
def __init__(self, pred_embedder, history_length):
pred_embeds = pred_embedder.embeds
with tf.name_scope('HistoryEmbedder'):
self._seq_embedder = ConcatSequenceEmbedder(pred_embeds, seq_length=history_length, align='right')
self._history_length = history_length
self._embeds = self._seq_embedder.embeds # (batch_size, history_dim)
self._pred_embedder = pred_embedder
self._build_embeds_hash(self._embeds, history_length, pred_embedder.embed_dim)
def _build_embeds_hash(self, embeds, history_length, embed_dim):
# embeds is (batch_size, history_length * embed_dim)
embeds_shape = tf.shape(embeds)
batch_size = embeds_shape[0]
reshaped_embeds = tf.reshape(embeds, [batch_size, history_length, embed_dim])
# random vector, initialized once and never trained
hash_vector = tf.get_variable('history_hash_vector', shape=[embed_dim], dtype=tf.float32,
initializer=tf.random_normal_initializer(seed=0), trainable=False)
# inner product every predicate embedding with the hash vector
hash = tf.reshape(hash_vector, [1, 1, embed_dim]) # expand dims for broadcast
self._embeds_hash = tf.reduce_sum(reshaped_embeds * hash, axis=2, keep_dims=False) # (batch_size, max_stack_size)
@property
def embeds_hash(self):
return self._embeds_hash
@property
def history_length(self):
return self._history_length
@classmethod
def previous_decisions_for_this_utterance(cls, case):
target_utterance_idx = case.current_utterance_idx
previous_decisions = []
biggest_idx_so_far = 0
for prev_case in case._previous_cases:
utterance_idx = prev_case.current_utterance_idx
assert utterance_idx >= biggest_idx_so_far # monotonicity check
biggest_idx_so_far = max(biggest_idx_so_far, utterance_idx)
if utterance_idx == target_utterance_idx:
previous_decisions.append(prev_case.decision)
return previous_decisions
def inputs_to_feed_dict(self, cases, vocabs, ignore_previous_utterances):
histories = []
for case in cases:
if ignore_previous_utterances:
previous_decisions = self.previous_decisions_for_this_utterance(case)
else:
previous_decisions = case.previous_decisions
utterance = case.current_utterance
history = [vocabs.as_contextual_pred(pred, utterance) for pred in previous_decisions]
histories.append(history)
return self._seq_embedder.inputs_to_feed_dict(histories, vocabs.all_preds)
@property
def embeds(self):
return self._embeds
################################
# Scorers
class SimplePredicateScorer(CandidateScorer):
def __init__(self, query, predicate_embedder):
"""Given a query vector, compute logit scores for each predicate choice.
Args:
query (Tensor): the query tensor, of shape (batch_size, ?)
predicate_embedder (CombinedPredicateEmbedder)
"""
pred_embeds = predicate_embedder.embeds
super(SimplePredicateScorer, self).__init__(query, pred_embeds, project_query=True)
class AttentionPredicateScorer(CandidateScorer):
def __init__(self, query, predicate_embedder, utterance_embedder):
attention = Attention(utterance_embedder.hidden_states,
query, project_query=True)
# self._attention is already reserved for another purpose ...
self._attention_on_utterance = attention
pred_embeds = predicate_embedder.embeds
super(AttentionPredicateScorer, self).__init__(attention.retrieved, pred_embeds, project_query=True)
@property
def attention_on_utterance(self):
return self._attention_on_utterance
class SoftCopyPredicateScorer(Feedable, Scorer):
def __init__(self, utterance_attention_weights, disable=False):
self._disabled = disable
if self._disabled:
# just return all zeros
self._batch_size = tf.placeholder(tf.int32, shape=[])
self._num_candidates = tf.placeholder(tf.int32, shape=[])
zeros = tf.zeros([self._batch_size, self._num_candidates], dtype=tf.float32)
self._scores = SequenceBatch(values=zeros, mask=zeros)
else:
self._soft_copier = SoftCopyScorer(utterance_attention_weights)
self._scores = self._soft_copier.scores
@property
def scores(self):
return self._scores
def _candidate_alignments(self, pred, utterance):
aligns = utterance.predicate_alignment(pred)
aligns = [(offset, s) for offset, s in aligns if offset < self._soft_copier.input_length]
return aligns
def inputs_to_feed_dict(self, utterances, choice_batch):
"""Feed inputs.
Args:
utterances: (list[Utterance])
choice_batch (list[list[ContextualPredicate]])
Returns:
dict
"""
if self._disabled:
return {self._batch_size: len(utterances), self._num_candidates: max(len(c) for c in choice_batch)}
alignments_batch = []
for utterance, choices in zip(utterances, choice_batch):
alignments = [self._candidate_alignments(contextual_pred.predicate, utterance) for contextual_pred in choices]
alignments_batch.append(alignments)
return self._soft_copier.inputs_to_feed_dict(alignments_batch)
class PredicateScorer(Feedable, Scorer):
def __init__(self, simple_scorer, attention_scorer, soft_copy_scorer):
"""
Args:
simple_scorer (SimplePredicateScorer)
attention_scorer (AttentionPredicateScorer)
soft_copy_scorer (SoftCopyPredicateScorer)
"""
assert isinstance(simple_scorer, SimplePredicateScorer)
assert isinstance(attention_scorer, AttentionPredicateScorer)
assert isinstance(soft_copy_scorer, SoftCopyPredicateScorer)
simple_scores = simple_scorer.scores # (batch_size, num_candidates)
attention_scores = attention_scorer.scores # (batch_size, num_candidates)
soft_copy_scores = soft_copy_scorer.scores # (batch_size, num_candidates)
# check that Tensors are finite
def verify_finite_inside_mask(scores, msg):
finite_scores = scores.with_pad_value(0).values
assert_op = tf.verify_tensor_all_finite(finite_scores, msg)
return assert_op
with tf.control_dependencies([
verify_finite_inside_mask(simple_scores, 'simple_scores'),
verify_finite_inside_mask(attention_scores, 'attention_scores'),
verify_finite_inside_mask(soft_copy_scores, 'soft copy scores'),
]):
scores = SequenceBatch(
simple_scores.values + attention_scores.values + soft_copy_scores.values,
simple_scores.mask)
subscores = SequenceBatch(
tf.pack(
[simple_scores.values, attention_scores.values, soft_copy_scores.values],
axis=2),
simple_scores.mask)
scores = scores.with_pad_value(-float('inf'))
probs = SequenceBatch(tf.nn.softmax(scores.values), scores.mask)
self._scores = scores
self._subscores = subscores
self._probs = probs
self._simple_scorer = simple_scorer
self._attention_scorer = attention_scorer
self._soft_copy_scorer = soft_copy_scorer
@property
def scores(self):
return self._scores
@property
def subscores(self):
return self._subscores
@property
def probs(self):
return self._probs
@property
def attention_on_utterance(self):
return self._attention_scorer.attention_on_utterance
def inputs_to_feed_dict(self, cases, vocabs):
choice_batch = []
for case in cases:
utterance = case.current_utterance
choices = [vocabs.as_contextual_pred(pred, utterance) for pred in case.choices]
choice_batch.append(choices)
utterances = [case.current_utterance for case in cases]
pred_vocab = vocabs.all_preds
feed_simple_scorer = self._simple_scorer.inputs_to_feed_dict(choice_batch, pred_vocab)
feed_attention_scorer = self._attention_scorer.inputs_to_feed_dict(choice_batch, pred_vocab)
feed_sc_scorer = self._soft_copy_scorer.inputs_to_feed_dict(utterances, choice_batch)
feed = {}
feed.update(feed_simple_scorer)
feed.update(feed_attention_scorer)
feed.update(feed_sc_scorer)
return feed
# WARNING:
# - The caching is only efficient if tuple(c.context for c in cases) is encountered frequently across batches
# - The caching is only correct if case.choices only depends on case.context
inputs_to_feed_dict_cached = DictMemoized(inputs_to_feed_dict,
custom_key_fxn=lambda self, cases, vocabs: tuple(c.current_utterance for c in cases))
@property
def simple_scorer(self):
return self._simple_scorer
@property
def attention_scorer(self):
return self._attention_scorer
@property
def soft_copy_scorer(self):
return self._soft_copy_scorer
################################
# Full Models
class ParseModel(Feedable):
"""The NN responsible for scoring ParseCase choices.
Given a ParseCase, it will return a logit score for every option in ParseCase.options.
"""
def __init__(self, pred_embedder, history_embedder, stack_embedder, utterance_embedder, scorer_factory, h_dims,
domain, delexicalized):
"""ParseModel.
Args:
pred_embedder (CombinedPredicateEmbedder)
history_embedder (HistoryEmbedder | None): if None, model won't condition on history of previous predictions
stack_embedder (ExecutionStackEmbedder | None): if None, model won't condition on execution stack
utterance_embedder (UtteranceEmbedder)
scorer_factory (Callable[Tensor, PredicateScorer])
h_dims (list[int])
domain (str)
delexicalized (bool)
"""
# ReLU feedforward network
with tf.name_scope('ParseModel'):
state_embedders = [history_embedder, stack_embedder, utterance_embedder]
state_embeds = []
for state_embedder in state_embedders:
if state_embedder:
state_embeds.append(state_embedder.embeds)
self._input_layer = tf.concat(1, state_embeds)
# (batch_size, hist_dim + stack_dim + utterance_dim)
h = self._input_layer
for h_dim in h_dims:
h = Dense(h_dim, activation='relu')(h) # (batch_size, h_dim)
query = h
self._case_encodings = query
scorer = scorer_factory(query)
self._domain = domain
self._delexicalized = delexicalized
self._pred_embedder = pred_embedder
self._history_embedder = history_embedder
self._stack_embedder = stack_embedder
self._utterance_embedder = utterance_embedder
self._scorer = scorer
self._logits = scorer.scores.values
self._attention_on_utterance = scorer.attention_on_utterance.logits
self._sublogits = scorer.subscores.values
self._log_probs = tf.nn.log_softmax(self._logits)
self._probs = scorer.probs.values
# track how many times we've called inputs_to_feed_dict with caching=True
self._cache_calls = 0
self._test_cache_every_k = 100
@property
def logits(self):
return self._logits
@property
def log_probs(self):
return self._log_probs
@property
def case_encodings(self):
return self._case_encodings
@property
def probs(self):
return self._probs
def _compute_vocabs(self, utterances):
"""Compute Vocabs object.
Args:
utterances (frozenset[utterances])
"""
return Vocabs(utterances, self._domain)
_compute_vocabs_cached = DictMemoized(_compute_vocabs)
def inputs_to_feed_dict(self, cases, ignore_previous_utterances, caching):
feed = {}
utterances = frozenset([case.current_utterance for case in cases])
if self._delexicalized:
for utterance in utterances:
assert isinstance(utterance, DelexicalizedUtterance)
if caching:
vocabs = self._compute_vocabs_cached(utterances)
else:
vocabs = self._compute_vocabs(utterances)
def feed_dict(model):
if model is None:
return lambda *args, **kwargs: {} # nothing to be computed
if caching:
try:
return model.inputs_to_feed_dict_cached
except AttributeError:
pass # no cached version available
return model.inputs_to_feed_dict
feed.update(feed_dict(self._pred_embedder)(vocabs))
feed.update(feed_dict(self._history_embedder)(cases, vocabs, ignore_previous_utterances))
feed.update(feed_dict(self._stack_embedder)(cases))
feed.update(feed_dict(self._utterance_embedder)(cases, vocabs.utterances))
feed.update(feed_dict(self._scorer)(cases, vocabs))
if caching:
self._cache_calls += 1
# every once in a while, check that the cache values are not stale
if self._cache_calls % self._test_cache_every_k == 0:
fresh_feed = self.inputs_to_feed_dict(cases, caching=False)
for key in fresh_feed:
try:
assert_array_almost_equal(
fresh_feed[key], feed[key], decimal=3)
except Exception as e:
print('WTF', key)
print(cases)
print(fresh_feed[key])
print(feed[key])
raise e
# assert_array_collections_equal(fresh_feed, feed, decimal=4)
return feed
def score(self, cases, ignore_previous_utterances, caching):
"""Populate the choice_logits property for every ParseCase in the batch.
Args:
cases (list[ParseCase])
ignore_previous_utterances (bool): if True, pretend like the previous utterances were not uttered
caching (bool)
"""
if len(cases) == 0:
return
# define variables to fetch
fetch = {
'logits': self._logits,
'log_probs': self._log_probs,
}
if self._stack_embedder:
fetch['stack_hash'] = self._stack_embedder.embeds_hash
if self._history_embedder:
fetch['history_hash'] = self._history_embedder.embeds_hash
# fetch variables
fetched = self.compute(fetch, cases, ignore_previous_utterances, caching)
# unpack fetched values
logits, log_probs = fetched['logits'], fetched['log_probs'] # numpy arrays with shape (batch_size, max_choices)
stack_hash = fetched['stack_hash'] if self._stack_embedder else [None] * len(cases)
history_hash = fetched['history_hash'] if self._history_embedder else [None] * len(cases)
num_nans = lambda arr: np.sum(np.logical_not(np.isfinite(arr)))
# cut to actual number of choices
for i, case in enumerate(cases):
case.choice_logits = logits[i, :len(case.choices)]
case.choice_log_probs = log_probs[i, :len(case.choices)]
case.pretty_embed = PrettyCaseEmbedding(history_hash[i], stack_hash[i])
logit_nans = num_nans(case.choice_logits)
log_prob_nans = num_nans(case.choice_log_probs)
# Tracking NaN
if logit_nans > 0:
logging.error("logit NaNs: %d/%d", logit_nans, case.choice_logits.size)
if log_prob_nans > 0:
logging.error("log_prob NaNs: %d/%d", log_prob_nans, case.choice_log_probs.size)
def score_paths(self, paths, ignore_previous_utterances, caching):
cases_to_be_scored = []
used_case_ids = set()
for path in paths:
for case in path:
if id(case) not in used_case_ids:
cases_to_be_scored.append(case)
used_case_ids.add(id(case))
self.score(cases_to_be_scored, ignore_previous_utterances, caching)
def score_breakdown(self, cases, ignore_previous_utterances, caching):
"""Return the logits for all (parse case, choice, scorer) tuples.
Args:
cases (list[ParseCase])
ignore_previous_utterances (bool): if True, pretend like the previous utterances were not uttered
caching (bool)
Returns:
attention_on_utterance:
np.array of shape (len(cases), max len(utterance))
containing the attention score of each token.
sublogits:
np.array of shape (len(cases), max len(choices), number of scorers)
containing the logits of each scorer on each choice.
By default there are 3 scorers: basic, attention, and soft copy.
"""
if len(cases) == 0:
return []
return self.compute([self._attention_on_utterance, self._sublogits], cases, ignore_previous_utterances, caching)
class CrossEntropyLossModel(Feedable):
"""Defines a standard cross entropy loss on the decision of a ParseCase."""
def __init__(self, logits):
"""Define the loss model.
Args:
logits (Tensor): a tensor of shape (batch_size, max_choices)
"""
with tf.name_scope('LossModel'):
self._labels = tf.placeholder(tf.int32, shape=[None], name='labels')
self._losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, self._labels, name='losses')
def inputs_to_feed_dict(self, cases):
"""For each ParseCase, map case.decision to the appropriate placeholder.
Args:
cases (list[ParseCase])
"""
labels = [c.choices.index(c.decision) for c in cases]
return {self._labels: np.array(labels)}
@property
def losses(self):
return self._losses
class LogitLossModel(Feedable):
"""Defines a loss based on the logit."""
def __init__(self, logits):
"""Define the loss model.
Args:
logits (Tensor): a tensor of shape (batch_size, max_choices)
"""
with tf.name_scope('LossModel'):
self._labels = tf.placeholder(tf.int32, shape=[None], name='labels')
# Pick out the correct logit terms using gather
shape = tf.shape(logits)
flattened_logits = tf.reshape(logits, [-1])
self._losses = - tf.gather(flattened_logits,
tf.range(shape[0]) * shape[1] + self._labels)
def inputs_to_feed_dict(self, cases):
"""For each ParseCase, map case.decision to the appropriate placeholder.
Args:
cases (list[ParseCase])
"""
labels = [c.choices.index(c.decision) for c in cases]
return {self._labels: np.array(labels)}
@property
def losses(self):
return self._losses
class TrainParseModel(Optimizable, Feedable):
"""A wrapper around a ParseModel for training."""
def __init__(self, parse_model, loss_model_factory, learning_rate,
optimizer_opt, max_batch_size=None):
loss_model = loss_model_factory(parse_model.logits)
losses = loss_model.losses
with tf.name_scope('TrainParseModel'):
weights = tf.placeholder(tf.float32, [None])
weighted_losses = losses * weights
loss = tf.reduce_sum(weighted_losses)
step = tf.get_variable('step', shape=[], dtype=tf.int32,
initializer=tf.constant_initializer(0), trainable=False)
increment_step_op = tf.assign_add(step, 1)
optimizer = get_optimizer(optimizer_opt)(learning_rate)
take_step = optimizer.minimize(loss, global_step=step)
self._weights = weights
self._loss = loss
self._parse_model = parse_model
self._loss_model = loss_model
self._step = step
self._increment_step = increment_step_op
self._take_step = take_step
# For batched computation
self._max_batch_size = max_batch_size
if max_batch_size is not None:
self._grads_and_vars = optimizer.compute_gradients(loss)
self._grad_tensors = []
self._combined_grad_placeholders = []
for grad, var in self._grads_and_vars:
self._grad_tensors.append(tf.convert_to_tensor(grad))
self._combined_grad_placeholders.append(tf.placeholder(tf.float32))
self._apply_gradients = optimizer.apply_gradients(
list(zip(self._combined_grad_placeholders,
[var for (_, var) in self._grads_and_vars])))
@property
def loss(self):
return self._loss
@property
def parse_model(self):
return self._parse_model
@property
def logits(self):
return self.parse_model.logits
def score(self, cases, ignore_previous_utterances, caching):
return self.parse_model.score(cases, ignore_previous_utterances, caching)
def score_breakdown(self, cases, ignore_previous_utterances, caching):
return self.parse_model.score_breakdown(cases, ignore_previous_utterances, caching)
def inputs_to_feed_dict(self, cases, weights, caching):
"""Convert a batch of ParseCases and their corresponding weights into a feed_dict.
Args:
cases (list[ParseCase])
weights (list[float])
Returns:
feed_dict
"""
feed = {}
feed.update(self._loss_model.inputs_to_feed_dict(cases))
feed.update(self._parse_model.inputs_to_feed_dict(cases, ignore_previous_utterances=False,
caching=caching))
# when updating the model, we always acknowledge previous utterances
feed[self._weights] = np.array(weights)
return feed
def train_step(self, cases, weights, caching):
if len(cases) != len(weights):
raise ValueError('cases and weights must have the same length.')
if len(cases) == 0:
#logging.warn('Training on zero cases.')
print(" WARNING: Zero cases \033[F", file=sys.stderr)
# still increment the step
sess = tf.get_default_session()
sess.run(self._increment_step)
elif not self._max_batch_size or len(cases) <= self._max_batch_size:
print(" Updating ({} cases) \033[F".format(len(cases)), file=sys.stderr)
self.compute(self._take_step, cases, weights, caching)
else:
print(" Updating ({} cases) \033[F".format(len(cases)), file=sys.stderr)
assert not caching
grads = None
slices = list(range(0, len(cases), self._max_batch_size))
for i in verboserate(slices, desc='Computing gradients ({} cases)'.format(len(cases))):
cases_slice = cases[i:i + self._max_batch_size]
weights_slice = weights[i:i + self._max_batch_size]
grads_slice = self.compute(self._grad_tensors,
cases_slice, weights_slice, False)
if grads is None:
grads = grads_slice
else:
for i in range(len(self._grad_tensors)):
grads[i] += grads_slice[i]
sess = tf.get_default_session()
feed_dict = dict(list(zip(self._combined_grad_placeholders, grads)))
sess.run(self._apply_gradients, feed_dict)
sess.run(self._increment_step)
@property
def step(self):
return self._step.eval()
@property
def objective_tensor(self):
return self.loss
def stack_element_category(elem):
if isinstance(elem, (str, int)):
return 'PRIMITIVE'
elif isinstance(elem, RLongObject):
return 'OBJECT'
elif isinstance(elem, list):
return 'LIST'
else:
raise ValueError('Stack element of unknown category: {}'.format(elem))
class StackObjectEmbedder(Feedable, metaclass=ABCMeta):
@abstractproperty
def embeds(self):
"""A Tensor of shape [batch_size, max_stack_size, object_embed_dim].
Elements of each stack MUST be right-aligned!
i.e. zero-padding should be on the left side.
We want the topmost (rightmost) element of the stack to always appear in the same position.
"""
pass
@property
def embed_dim(self):
return self.embeds.get_shape().as_list()[2]
class StackOfAttributesEmbedder(Feedable):
"""Embed a batch of stacks, where each stack element is a list of attributes.
Lists of attributes are embedded as the concatenation of their elements, with right padding.
Lists that exceed max_list_size are truncated on the right.
"""
def __init__(self, attribute_embedder, extract_attribute, max_stack_size, max_list_size):
"""
Args:
attribute_embedder (TokenEmbedder)
extract_attribute (Callable[RLongObject, str]): extract a particular attribute from an RLongObject
max_stack_size (int)
max_list_size (int)
"""
list_embedder = ConcatSequenceEmbedder(attribute_embedder.embeds, seq_length=max_list_size)
list_embeds_flat = list_embedder.embeds # (batch_size * max_stack_size, list_embed_dim)
# where list_embed_dim = attribute_embed_dim * max_list_size
list_embeds_flat_shape = tf.shape(list_embeds_flat)
batch_size = list_embeds_flat_shape[0] / max_stack_size # a scalar Tensor, dynamically determined
list_embed_dim = list_embeds_flat.get_shape().as_list()[1] # a Python int, statically known
# (batch_size, max_stack_size, list_embed_dim)
self._embeds = tf.reshape(list_embeds_flat, shape=[batch_size, max_stack_size, list_embed_dim])
self._attribute_embedder = attribute_embedder
self._extract_attribute = extract_attribute
self._list_embedder = list_embedder
self._max_stack_size = max_stack_size
@property
def embeds(self):
return self._embeds # (batch_size, max_stack_size, list_embed_dim)
def _pad_stack(self, stack):
extra = self._max_stack_size - len(stack)
assert extra >= 0 # stack should never exceed max_stack-size
# always apply left-padding of the stack
empty_list = []
return [empty_list] * extra + stack
def convert_to_attribute_stacks(self, exec_stacks):
"""Convert a batch of execution stacks into a batch of attribute stacks.
Stack elements are converted as follows:
A list of RLongObjects is converted into a list of attributes.
A single RLongObject is converted into a single-item list.
A primitive stack element is converted into an empty list.
Args:
exec_stacks (list[list[basestring|int|RLongObject|list[RLongObject]]]): a batch of execution stacks,
where each stack element is either a primitive, RLongObject, or list of RLongObjects.
Returns:
attribute_stacks (list[list[list[str]]]): a batch of stacks, where each stack element is a list of
attributes (as strings).
"""
extract_attribute = self._extract_attribute
attribute_stacks = []
for stack in exec_stacks:
attribute_stack = []
for elem in stack:
category = stack_element_category(elem)
if category == 'PRIMITIVE':
attribute_list = []
elif category == 'OBJECT':
attribute_list = [extract_attribute(elem)]
elif category == 'LIST':
attribute_list = [extract_attribute(o) for o in elem] # assume that list is a list of objects
else:
raise ValueError('Cannot embed: {}'.format(elem))
attribute_stack.append(attribute_list)
attribute_stacks.append(attribute_stack)
return attribute_stacks
def inputs_to_feed_dict(self, exec_stacks):
"""Feed inputs.
Args:
attribute_stacks (list[list[list[str]]]): a batch of stacks, where each stack element is a list of
attributes (as strings).
Returns:
feed_dict
"""
attribute_stacks = self.convert_to_attribute_stacks(exec_stacks)
sequences = []
for stack in attribute_stacks:
padded_stack = self._pad_stack(stack)
for attribute_list in padded_stack:
sequences.append(attribute_list)
assert len(sequences) == len(exec_stacks) * self._max_stack_size
return self._list_embedder.inputs_to_feed_dict(sequences, self._attribute_embedder.vocab)
class RLongObjectEmbedder(StackObjectEmbedder):
def __init__(self, attribute_extractors, primitive_embedder, max_stack_size, max_list_size):
embedders = [StackOfAttributesEmbedder(primitive_embedder, attribute_extractor, max_stack_size, max_list_size)
for attribute_extractor in attribute_extractors]
# (batch_size, max_stack_size, max_list_size * primitive_embed_dim * len(embedders))
self._embeds = tf.concat(2, [embedder.embeds for embedder in embedders])
self._embedders = embedders
@property
def embeds(self):
return self._embeds
def inputs_to_feed_dict(self, exec_stacks):
feed = {}
for embedder in self._embedders:
feed.update(embedder.inputs_to_feed_dict(exec_stacks))
return feed
class ExecutionStackEmbedder(Embedder):
def __init__(self, primitive_embedder, object_embedder, max_stack_size, max_list_size,
project_object_embeds=True, abstract_objects=False):
"""ExecutionStackEmbedder.
Args:
primitive_embedder (TokenEmbedder)
object_embedder (StackObjectEmbedder)
max_stack_size (int)
max_list_size (int)
project_object_embeds (bool): defaults to True. If True, project object embeddings into
dimension of primitive embeddings.
abstract_objects (bool): defaults to False. If True, just embed all objects using the
same generic "object" token.
"""
# get primitive and object embeds
primitive_indices = FeedSequenceBatch(align='right', seq_length=max_stack_size) # (batch_size, max_stack_size)
primitive_embeds = embed(primitive_indices, primitive_embedder.embeds).values # (batch_size, max_stack_size, embed_dim)
object_embeds = object_embedder.embeds # (batch_size, max_stack_size, object_embed_dim)
# get Tensor shapes
primitive_embed_dim = primitive_embedder.embed_dim
object_embed_dim = object_embedder.embed_dim
batch_size = tf.shape(primitive_indices.values)[0]
# project object embeds into same dimension as primitive embeds
if project_object_embeds:
object_projection_layer = Dense(primitive_embed_dim, activation='linear')
object_embeds_flat = tf.reshape(object_embeds, [batch_size * max_stack_size, object_embed_dim])
projected_object_embeds_flat = object_projection_layer(object_embeds_flat)
projected_object_embeds = tf.reshape(projected_object_embeds_flat, [batch_size, max_stack_size, primitive_embed_dim])
else:
object_projection_layer = None
projected_object_embeds = object_embeds
# combine primitive and object embeds
is_object_feed = FeedSequenceBatch(align='right', seq_length=max_stack_size, dtype=tf.float32)
is_object = is_object_feed.values # (batch_size, max_stack_size)
is_object = expand_dims_for_broadcast(is_object, primitive_embeds) # (batch_size, max_stack_size, embed_dim)
stack_embeds = is_object * projected_object_embeds + (1 - is_object) * primitive_embeds # (batch_size, max_stack_size, embed_dim)
# make sure to mask out empty stack positions
stack_embeds = stack_embeds * expand_dims_for_broadcast(primitive_indices.mask, stack_embeds)
flat_stack_embeds = tf.reshape(stack_embeds, [batch_size, max_stack_size * primitive_embed_dim])
self._build_embeds_hash(stack_embeds, primitive_embed_dim)
self._primitive_embedder = primitive_embedder
self._object_embedder = object_embedder
self._max_stack_size = max_stack_size
self._max_list_size = max_list_size
self._abstract_objects = abstract_objects
self._object_projection_layer = object_projection_layer
self._embeds = flat_stack_embeds
self._primitive_indices = primitive_indices
self._is_object_feed = is_object_feed
def _build_embeds_hash(self, stack_embeds, embed_dim):
# stack_embeds is (batch_size, max_stack_size, embed_dim)
# random vector, initialized once and never trained
hash_vector = tf.get_variable('exec_stack_hash_vector', shape=[embed_dim], dtype=tf.float32,
initializer=tf.random_normal_initializer(seed=0), trainable=False)
# inner product every stack embedding with the hash vector
hash = tf.reshape(hash_vector, [1, 1, embed_dim]) # expand dims for broadcast
self._embeds_hash = tf.reduce_sum(stack_embeds * hash, axis=2, keep_dims=False) # (batch_size, max_stack_size)
@property
def embeds_hash(self):
return self._embeds_hash
def inputs_to_feed_dict(self, cases):
OBJECT = self._primitive_embedder.vocab.OBJECT
LIST = self._primitive_embedder.vocab.LIST
abstract_objects = self._abstract_objects
# collect batch of execution stacks
exec_stacks = []
for case in cases:
previous_cases = case._previous_cases
if len(previous_cases) == 0:
exec_stack = [] # TODO(kelvin): always safe to assume stack starts out empty?
else:
latest_case = previous_cases[-1]
exec_stack = latest_case.denotation.execution_stack # use the denotation up until this point
exec_stacks.append(exec_stack)
is_object_batch = []
primitive_stack_batch = []
for exec_stack in exec_stacks:
primitive_stack = []
is_object = []
for elem in exec_stack:
category = stack_element_category(elem)
if category == 'PRIMITIVE':
is_obj = 0.
primitive_val = elem
elif category == 'OBJECT':
is_obj = 0. if abstract_objects else 1. # if abstract_objects, embed it as a primitive instead
primitive_val = OBJECT
elif category == 'LIST':
is_obj = 0. if abstract_objects else 1. # if abstract_objects, embed it as a primitive instead
primitive_val = LIST if len(elem) != 1 else OBJECT # singleton list treated as object
else:
raise ValueError('Cannot embed: {}'.format(elem))
is_object.append(is_obj)
primitive_stack.append(primitive_val)
primitive_stack_batch.append(primitive_stack)
is_object_batch.append(is_object)
primitive_feed = self._primitive_indices.inputs_to_feed_dict(primitive_stack_batch,
self._primitive_embedder.vocab)
object_feed = self._object_embedder.inputs_to_feed_dict(exec_stacks)
is_object_feed = self._is_object_feed.inputs_to_feed_dict(is_object_batch)
feed = {}
feed.update(primitive_feed)
feed.update(object_feed)
feed.update(is_object_feed)
return feed
@property
def embeds(self):
"""Tensor of shape [batch_size, max_stack_size * primitive_embed_dim]."""
return self._embeds
class DummyStackObjectEmbedder(StackObjectEmbedder):
"""Just embeds every object as a vector of all ones.
This is really just used as a placeholder model when we set abstract_objects=True in ExecutionStackEmbedder.
In that scenario, the outputs of this embedder do not actually get used in the final embedding of the stack.
"""
def __init__(self, max_stack_size, object_embed_dim):
self._batch_size = tf.placeholder(tf.int32, shape=[])
self._embeds = tf.ones([self._batch_size, max_stack_size, object_embed_dim])
@property
def embeds(self):
return self._embeds
def inputs_to_feed_dict(self, exec_stacks):
return {self._batch_size: len(exec_stacks)}
|
ContextualSP/lemon/executor/strongsup/parse_model.py/0
|
{
"file_path": "ContextualSP/lemon/executor/strongsup/parse_model.py",
"repo_id": "ContextualSP",
"token_count": 21417
}
| 241 |
from strongsup.path_checker import PathChecker
class RLongPathChecker(PathChecker):
def __init__(self, config):
PathChecker.__init__(self, config)
self._max_stack_size = config.get('max_stack_size')
self._action_must_clear_beam = config.get('action_must_clear_beam')
def __call__(self, path):
"""Check whether the path should be added to the beam.
Args:
path (ParsePath)
Returns:
boolean
"""
if (self._max_stack_size
and len(path.denotation.execution_stack) > self._max_stack_size):
return False
if (self._action_must_clear_beam
and path.denotation.execution_stack
and path[-1].decision.name[0] == 'A'):
return False
return True
|
ContextualSP/lemon/executor/strongsup/rlong/path_checker.py/0
|
{
"file_path": "ContextualSP/lemon/executor/strongsup/rlong/path_checker.py",
"repo_id": "ContextualSP",
"token_count": 381
}
| 242 |
# -*- coding: utf-8 -*-
import re
import unicodedata
def tsv_unescape(x):
"""Unescape strings in the TSV file.
Escaped characters include:
newline (0x10) -> backslash + n
vertical bar (0x7C) -> backslash + p
backslash (0x5C) -> backslash + backslash
Args:
x (str or unicode)
Returns:
a unicode
"""
x = x.replace(r'\n', '\n').replace(r'\p', '|').replace('\\\\', '\\')
if not isinstance(x, str):
x = x.decode('utf-8', errors='ignore')
return x
def tsv_unescape_list(x):
"""Unescape a list in the TSV file.
List items are joined with vertical bars (0x5C)
Args:
x (str or unicode)
Returns:
a list of unicodes
"""
return [tsv_unescape(y) for y in x.split('|')]
# From the official evaluator
def normalize(x):
if not isinstance(x, str):
x = x.decode('utf8', errors='ignore')
# Remove diacritics
x = ''.join(c for c in unicodedata.normalize('NFKD', x)
if unicodedata.category(c) != 'Mn')
# Normalize quotes and dashes
x = re.sub(r"[‘’´`]", "'", x)
x = re.sub(r"[“”]", "\"", x)
x = re.sub(r"[‐‑‒–—−]", "-", x)
while True:
old_x = x
# Remove citations
x = re.sub(r"((?<!^)\[[^\]]*\]|\[\d+\]|[•♦†‡*#+])*$", "", x.strip())
# Remove details in parenthesis
x = re.sub(r"(?<!^)( \([^)]*\))*$", "", x.strip())
# Remove outermost quotation mark
x = re.sub(r'^"([^"]*)"$', r'\1', x.strip())
if x == old_x:
break
# Remove final '.'
if x and x[-1] == '.':
x = x[:-1]
# Collapse whitespaces and convert to lower case
x = re.sub(r'\s+', ' ', x, flags=re.U).lower().strip()
return x
PTB_BRACKETS = {
'-lrb-': '(', '-rrb-': ')',
'-lsb-': '[', '-rsb-': ']',
'-lcb-': '{', '-rcb-': '}',
}
def resolve_ptb_brackets(tokens):
"""Convert Penn Tree Bank escaped brackets to actual brackets."""
if isinstance(tokens, str):
tokens = tokens.split()
if len(tokens) == 1:
tokens = tsv_unescape_list(tokens[0])
return [PTB_BRACKETS.get(x, x) for x in tokens]
|
ContextualSP/lemon/executor/strongsup/tables/utils.py/0
|
{
"file_path": "ContextualSP/lemon/executor/strongsup/tables/utils.py",
"repo_id": "ContextualSP",
"token_count": 1093
}
| 243 |
import math
import numpy as np
import pytest
import tensorflow as tf
from numpy.testing import assert_array_almost_equal
from gtd.ml.framework import Feedable
from gtd.ml.model import TokenEmbedder
from gtd.ml.seq_batch import SequenceBatch
from gtd.ml.utils import guarantee_initialized_variables
from gtd.ml.vocab import SimpleVocab, SimpleEmbeddings
from gtd.tests.ml.test_framework import FeedableTester, clean_test_session
from gtd.utils import Bunch
from strongsup.embeddings import RLongPrimitiveEmbeddings
from strongsup.parse_case import ParseCase
from strongsup.parse_model import PredicateScorer, CrossEntropyLossModel, TrainParseModel, \
CombinedPredicateEmbedder, Embedder, ExecutionStackEmbedder, DummyStackObjectEmbedder, HistoryEmbedder
from strongsup.rlong.state import RLongObject
from strongsup.tests.utils import PredicateGenerator
@pytest.fixture
def cases():
context = 'nothing'
p = PredicateGenerator(context)
c0 = ParseCase.initial(context, [p('a'), p('c'), p('d')])
c1 = ParseCase.initial(context, [p('a'), p('b'), p('c'), p('d'), p('e')])
c2 = ParseCase.initial(context, []) # empty
c0.decision = p('d')
c1.decision = p('c')
# can't decide for c2, since no options
return [c0, c1, c2]
class TestCrossEntropyLossModel(FeedableTester):
@pytest.fixture
def inputs(self, cases):
return self.as_args_kwargs(cases[:2]) # don't use the last one, because it has no decision set
@pytest.fixture
def logits(self):
# some made up logit scores
ninf = float('-inf')
arr = [
[1., 2., 3., ninf],
[1., 2., 3.5, 4.],
]
return np.array(arr)
@pytest.fixture
def model(self, logits):
logits_tensor = tf.constant(logits, dtype=tf.float32)
return CrossEntropyLossModel(logits_tensor)
@pytest.fixture
def feed_dict(self, model):
return {model._labels: np.array([2, 2])}
# for both cases, the selected decision is at index 2 of case.choices
@pytest.fixture
def outputs(self, logits):
e = math.exp(1.)
p0 = e**3 / (e + e**2 + e**3)
p1 = e**3.5 / (e + e**2 + e**3.5 + e**4)
probs = np.array([p0, p1])
nll = -np.log(probs)
return [nll]
@pytest.fixture
def output_tensors(self, model):
return [model.losses]
class DummyParseModel(Feedable):
@property
def logits(self):
return tf.get_variable('logits', shape=[4], initializer=tf.constant_initializer([1, 1, 1, 1]))
# not actually used
# just needed because TrainParseModel needs some variables to optimize.
def inputs_to_feed_dict(self, *args, **kwargs):
return {}
class DummyLossModel(Feedable):
def __init__(self, logits):
# logits are not actually used to compute loss
self.losses = tf.get_variable('losses', shape=[4],
initializer=tf.constant_initializer([1, 2, 6, 3.5], dtype=tf.float32))
def inputs_to_feed_dict(self, *args, **kwargs):
return {}
class TestTrainParseModel(FeedableTester):
@pytest.fixture
def model(self):
parse_model = DummyParseModel()
loss_model_factory = lambda logits: DummyLossModel(logits)
return TrainParseModel(parse_model, loss_model_factory, learning_rate=2.0)
@pytest.fixture
def inputs(self):
# A list of (ParseCase, train_weight) pairs.
# ParseCases can be None, because DummyLossModel doesn't look at them anyway to produce losses.
return self.as_args_kwargs([None, None, None, None], [1, 8, 0, 2], caching=False)
@pytest.fixture
def feed_dict(self, model):
return {
model._weights: np.array([1., 8., 0., 2.])
}
@pytest.fixture
def outputs(self):
loss = 1 + (2 * 8) + (6 * 0) + (3.5 * 2)
return [loss]
@pytest.fixture
def output_tensors(self, model):
return [model.loss]
class DummyEmbedder(Embedder):
"""Doesn't actually compute embeddings and vocabs dynamically, but sufficient for testing."""
def __init__(self, tokens, embeds):
"""
Args:
tokens (list[unicode])
embeds (np.array)
"""
self.vocab = SimpleVocab(tokens)
self._embeds = tf.constant(embeds, dtype=tf.float32)
self._embed_dim = embeds.shape[1]
@property
def embeds(self):
return self._embeds
def dynamic_vocab(self, batch):
return self.vocab
def inputs_to_feed_dict(self, *args, **kwargs):
return {}
class TestCombinedPredicateEmbedder(FeedableTester):
@pytest.fixture
def base_pred_embeddings(self):
array = np.array([
[0, 0, 0, 0],
[1, 2, 3, 4],
[0, 2, 0, 8],
], dtype=np.float32)
vocab = SimpleVocab('<unk> b0 b1'.split())
return SimpleEmbeddings(array, vocab)
@pytest.fixture
def model(self, base_pred_embeddings):
ent_embeds = np.array([
[10, 20, 30, 40],
[11, 21, 31, 41],
], dtype=np.float32)
rel_embeds = ent_embeds
ent_model = DummyEmbedder(['ent0', 'ent1'], ent_embeds)
rel_model = DummyEmbedder(['rel0', 'rel1'], rel_embeds)
return CombinedPredicateEmbedder(base_pred_embeddings, ent_model, rel_model)
@pytest.fixture
def inputs(self):
return self.as_args_kwargs([])
@pytest.fixture
def feed_dict(self):
return {}
@pytest.fixture
def outputs(self):
embeds = np.array([
[0, 0, 0, 0],
[1, 2, 3, 4],
[0, 2, 0, 8],
[10, 20, 30, 40],
[11, 21, 31, 41],
[10, 20, 30, 40],
[11, 21, 31, 41],
], dtype=np.float32)
return [embeds]
@pytest.fixture
def output_tensors(self, model):
return [model.embeds]
# TODO: This test is obsolete
#class TestHistoryEmbedder(object):
# @pytest.fixture
# def model(self):
# pred_embeds_tensor = tf.constant([
# [1, 2, 3],
# [4, 5, 6],
# ], dtype=tf.float32)
# class DummyPredicateEmbedder(object):
# @property
# def embeds(self):
# return pred_embeds_tensor
# pred_embeds = DummyPredicateEmbedder()
# return HistoryEmbedder(pred_embeds, 3)
#
# @pytest.fixture
# def cases(self):
# pred_names = [
# ['a', 'b', 'c'],
# ['c', 'b', 'c', 'd', 'e'],
# [],
# ]
#
# preds = [[Bunch(name=name) for name in name_list] for name_list in pred_names]
# cases = [Bunch(previous_decisions=pred_list) for pred_list in preds]
# return cases
#
# @pytest.mark.usefixtures('clean_test_session')
# def test_cases_to_histories(self, model, cases):
# histories = model._cases_to_histories(cases)
# assert histories == {
# 0: ['a', 'b', 'c'],
# 1: ['c', 'd', 'e'],
# 2: [],
# }
class TestPredicateScorer(object):
@pytest.fixture
def model(self):
ninf = -float('inf')
simple_scores = tf.constant([
[1, 2, 3, ninf],
[4, 5, ninf, ninf],
[1, 1, 2, 2]
], dtype=tf.float32)
soft_copy_scores = tf.constant([
[8, -2, 10, 0],
[0, 1, 0, 0],
[11, 0.5, 1.4, -1.6],
], dtype=tf.float32)
mask = tf.constant([
[1, 1, 1, 0],
[1, 1, 0, 0],
[1, 1, 1, 1],
], dtype=tf.float32)
# scores don't actually depend on cases
simple_scorer = Bunch(scores=SequenceBatch(simple_scores, mask), inputs_to_feed_dict=lambda cases: {})
soft_copy_scorer = Bunch(scores=SequenceBatch(soft_copy_scores, mask), inputs_to_feed_dict=lambda cases: {})
return PredicateScorer(simple_scorer, soft_copy_scorer)
@pytest.fixture
def cases(self):
context = 'nothing'
p = PredicateGenerator(context)
c0 = ParseCase.initial(context, [p('a'), p('c'), p('d')])
c1 = ParseCase.initial(context, [p('a'), p('b')])
c2 = ParseCase.initial(context, [p('a'), p('b'), p('d'), p('c')]) # empty
return [c0, c1, c2]
@pytest.fixture
def correct_scores(self):
ninf = -float('inf')
return np.array([
[9, 0, 13, ninf],
[4, 6, ninf, ninf],
[12, 1.5, 3.4, 0.4]
], dtype=np.float32)
@pytest.mark.usefixtures('clean_test_session')
def test(self, model, cases, correct_scores):
scores = model.compute(model.scores.values, cases)
assert_array_almost_equal(correct_scores, scores)
class DummyRLongObject(RLongObject):
pass
class TestExecutionStackEmbedder(object):
@pytest.fixture
def model(self):
max_stack_size = 3
max_list_size = 7
primitive_embed_dim = 6
object_embed_dim = 10
primitive_embeddings = RLongPrimitiveEmbeddings(primitive_embed_dim)
primitive_embedder = TokenEmbedder(primitive_embeddings, 'primitive_embeds', trainable=True)
object_embedder = DummyStackObjectEmbedder(max_stack_size, object_embed_dim)
return ExecutionStackEmbedder(primitive_embedder, object_embedder, max_stack_size, max_list_size,
project_object_embeds=True, abstract_objects=False)
@pytest.fixture
def cases(self):
make_case = lambda stack: Bunch(_previous_cases=[Bunch(denotation=Bunch(execution_stack=stack))])
some_obj = DummyRLongObject()
empty_list = []
return [
make_case(['r', -1]),
make_case(['X1/1']),
make_case(['b', some_obj, empty_list]),
]
@pytest.mark.usefixtures('clean_test_session')
def test_inputs_to_feed_dict(self, model, cases):
feed = model.inputs_to_feed_dict(cases)
assert_array_almost_equal(
feed[model._primitive_indices.values],
np.array([
[0, 2, 19],
[0, 0, 20],
[7, 0, 1],
], dtype=np.float32)
)
assert_array_almost_equal(
feed[model._primitive_indices.mask],
np.array([
[0, 1, 1],
[0, 0, 1],
[1, 1, 1],
], dtype=np.float32)
)
assert_array_almost_equal(
feed[model._is_object_feed.values],
np.array([
[0, 0, 0],
[0, 0, 0],
[0, 1, 1],
], dtype=np.float32)
)
@pytest.mark.usefixtures('clean_test_session')
def test(self, model, cases):
sess = tf.get_default_session()
guarantee_initialized_variables(sess)
embeds = model.compute(model.embeds, cases)
primitive_embeddings = RLongPrimitiveEmbeddings(6)
# compute object embedding after applying projection
object_projection_layer = model._object_projection_layer
W, b = object_projection_layer.get_weights() # shapes [10, 6] and [6]
object_embed = np.ones(10).dot(W) + b
assert_array_almost_equal(embeds[0],
np.concatenate((np.zeros(6), primitive_embeddings['r'], primitive_embeddings[-1]))
)
assert_array_almost_equal(embeds[1],
np.concatenate((np.zeros(6), np.zeros(6), primitive_embeddings['X1/1']))
)
assert_array_almost_equal(embeds[2],
np.concatenate((primitive_embeddings['b'], object_embed, object_embed))
)
class TestHistoryEmbedder(object):
def test_previous_decisions_for_this_utterance(self):
prev_cases = [Bunch(current_utterance_idx=1, decision='a'), Bunch(current_utterance_idx=1, decision='b'),
Bunch(current_utterance_idx=2, decision='c'), Bunch(current_utterance_idx=2, decision='d')]
case = Bunch(current_utterance_idx=2, _previous_cases=prev_cases)
prev_decisions = HistoryEmbedder.previous_decisions_for_this_utterance(case)
assert prev_decisions == ['c', 'd']
bad_cases = [Bunch(current_utterance_idx=2, decision='a'), Bunch(current_utterance_idx=1, decision='b'),
Bunch(current_utterance_idx=2, decision='c'), Bunch(current_utterance_idx=2, decision='d')]
bad_case = Bunch(current_utterance_idx=2, _previous_cases=bad_cases)
with pytest.raises(AssertionError):
_ = HistoryEmbedder.previous_decisions_for_this_utterance(bad_case)
|
ContextualSP/lemon/executor/strongsup/tests/test_parse_model.py/0
|
{
"file_path": "ContextualSP/lemon/executor/strongsup/tests/test_parse_model.py",
"repo_id": "ContextualSP",
"token_count": 6157
}
| 244 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import sys
from argparse import ArgumentParser
from fairseq_cli.train import cli_main as fairseq_train
from fairseq_cli.generate import cli_main as fairseq_generate
import logging
import shlex
import re
import os
sys.path.append('../')
# from model_interface import TAPEXModelInterface
from model_eval import evaluate_generate_file
logger = logging.getLogger(__name__)
def set_train_parser(parser_group):
train_parser = parser_group.add_parser("train")
train_parser.add_argument("--dataset-dir", type=str, required=True, default="",
help="dataset directory where train.src is located in")
train_parser.add_argument("--exp-dir", type=str, default="checkpoints",
help="experiment directory which stores the checkpoint weights")
train_parser.add_argument("--model-path", type=str, default="tapex.base/model.pt",
help="the directory of pre-trained model path")
train_parser.add_argument("--model-arch", type=str, default="bart_base", choices=["bart_large", "bart_base"],
help="tapex large should correspond to bart_large, and tapex base should be bart_base")
train_parser.add_argument("--max-tokens", type=int, default=1536,
help="if you train a large model on 16GB memory, max-tokens should be empirically "
"set as 1536, and can be near-linearly increased according to your GPU memory.")
train_parser.add_argument("--gradient-accumulation", type=int, default=8,
help="the accumulation steps to arrive a equal batch size, the default value can be used"
"to reproduce our results. And you can also reduce it to a proper value for you.")
train_parser.add_argument("--total-num-update", type=int, default=10000,
help="the total optimization training steps")
train_parser.add_argument("--learning-rate", type=float, default=3e-5,
help="the peak learning rate for model training")
train_parser.add_argument("--warmup-steps", type=int, default=1500,
help="warmup steps")
train_parser.add_argument("--wandb-project", type=str, default='universal_pretrain_bart',
help="wandb-project")
def set_eval_parser(parser_group):
eval_parser = parser_group.add_parser("eval")
eval_parser.add_argument("--dataset-dir", type=str, required=True, default="",
help="dataset directory where train.src is located in")
eval_parser.add_argument("--model-path", type=str, default="tapex.base.wikisql/model.pt",
help="the directory of fine-tuned model path such as tapex.base.wikisql/model.pt")
eval_parser.add_argument("--sub-dir", type=str, default="valid", choices=["train", "valid", "test"],
help="the directory of pre-trained model path, and the default should be in"
"{bart.base, bart.large, tapex.base, tapex.large}.")
eval_parser.add_argument("--max-tokens", type=int, default=1536 * 4,
help="the max tokens can be larger than training when in inference.")
eval_parser.add_argument("--predict-dir", type=str, default="predict",
help="the predict folder of generated result.")
def set_predict_parser(parser_group):
predict_parser = parser_group.add_parser("predict")
predict_parser.add_argument("--resource-dir", type=str, required=True, default="./tapex.base",
help="the resource dir which contains the model weights, vocab.bpe, "
"dict.src.txt, dict.tgt.txt and encoder.json.")
predict_parser.add_argument("--checkpoint-name", type=str, default="model.pt",
help="the model weight's name in the resource directory")
def train_fairseq_model(args):
cmd = f"""
fairseq-train {args.dataset_dir} \
--save-dir {args.exp_dir} \
--restore-file {args.model_path} \
--arch {args.model_arch} \
--memory-efficient-fp16 \
--task translation \
--criterion label_smoothed_cross_entropy \
--source-lang src \
--target-lang tgt \
--truncate-source \
--label-smoothing 0.1 \
--max-source-positions 1024 \
--max-tokens {args.max_tokens} \
--update-freq {args.gradient_accumulation} \
--max-update {args.total_num_update} \
--required-batch-size-multiple 1 \
--dropout 0.1 \
--attention-dropout 0.1 \
--relu-dropout 0.0 \
--weight-decay 0.01 \
--optimizer adam \
--adam-eps 1e-08 \
--clip-norm 0.1 \
--lr-scheduler polynomial_decay \
--lr {args.learning_rate} \
--total-num-update {args.total_num_update} \
--warmup-updates {args.warmup_steps} \
--ddp-backend no_c10d \
--num-workers 20 \
--reset-meters \
--reset-optimizer \
--reset-dataloader \
--share-all-embeddings \
--layernorm-embedding \
--share-decoder-input-output-embed \
--skip-invalid-size-inputs-valid-test \
--log-format json \
--log-interval 10 \
--save-interval-updates 2000 \
--validate-interval 50 \
--save-interval 50 \
--patience 200 \
--report-accuracy \
--wandb-project {args.wandb_project}
"""
sys.argv = shlex.split(cmd)
logger.info("Begin to train model for dataset {}".format(args.dataset_dir))
logger.info("Running command {}".format(re.sub("\s+", " ", cmd.replace("\n", " "))))
fairseq_train()
def evaluate_fairseq_model(args):
cmd = f"""
fairseq-generate
--path {args.model_path} \
{args.dataset_dir} \
--truncate-source \
--gen-subset {args.sub_dir} \
--max-tokens {args.max_tokens} \
--nbest 1 \
--source-lang src \
--target-lang tgt \
--results-path {args.predict_dir} \
--beam 5 \
--bpe gpt2 \
--remove-bpe \
--num-workers 20 \
--skip-invalid-size-inputs-valid-test
"""
sys.argv = shlex.split(cmd)
logger.info("Begin to evaluate model on the {} subset of dataset {}".format(args.sub_dir, args.dataset_dir))
logger.info("Running command {}".format(re.sub("\s+", " ", cmd.replace("\n", " "))))
fairseq_generate()
# after generation, we should call TAPEX evaluate function to evaluate the result
generate_file = os.path.join(args.predict_dir, "generate-{}.txt".format(args.sub_dir))
# the delimiter is the answer delimiter used in training, which by default is a comma
evaluate_generate_file(generate_file, target_delimiter=", ")
# def predict_demo(args):
# demo_interface = TAPEXModelInterface(resource_dir=args.resource_dir,
# checkpoint_name=args.checkpoint_name)
# question = "Greece held its last Summer Olympics in which year?"
# table_context = {
# "header": ["Year", "City", "Country", "Nations"],
# "rows": [
# [1896, "Athens", "Greece", 14],
# [1900, "Paris", "France", 24],
# [1904, "St. Louis", "USA", 12],
# [2004, "Athens", "Greece", 201],
# [2008, "Beijing", "China", 204],
# [2012, "London", "UK", 204]
# ]
# }
# answer = demo_interface.predict(question=question,
# table_context=table_context)
# logger.info("Receive question as : {}".format(question))
# logger.info("The answer should be : {}".format(answer))
if __name__ == '__main__':
parser = ArgumentParser()
subparsers = parser.add_subparsers(dest="subcommand")
set_train_parser(subparsers)
set_eval_parser(subparsers)
set_predict_parser(subparsers)
args = parser.parse_args()
if args.subcommand == "train":
train_fairseq_model(args)
elif args.subcommand == "eval":
evaluate_fairseq_model(args)
elif args.subcommand == "predict":
predict_demo(args)
|
ContextualSP/lemon/lemon/run_model_pretrain.py/0
|
{
"file_path": "ContextualSP/lemon/lemon/run_model_pretrain.py",
"repo_id": "ContextualSP",
"token_count": 3752
}
| 245 |
from allennlp_reasoning_explainqa.training.metrics.confusion_matrix import *
from allennlp_reasoning_explainqa.training.metrics.explanation_eval import *
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/eqasc/code/allennlp_reasoning_explainqa/training/metrics/__init__.py/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/eqasc/code/allennlp_reasoning_explainqa/training/metrics/__init__.py",
"repo_id": "ContextualSP",
"token_count": 50
}
| 246 |
{"id":"8-343","answerKey":"B"}
{"id":"1129","answerKey":"A"}
{"id":"880","answerKey":"C"}
{"id":"7-999","answerKey":"C"}
{"id":"8-464","answerKey":"C"}
{"id":"9-794","answerKey":"C"}
{"id":"9-1163","answerKey":"C"}
{"id":"9-322","answerKey":"B"}
{"id":"7-1140","answerKey":"D"}
{"id":"7-903","answerKey":"B"}
{"id":"7-511","answerKey":"C"}
{"id":"9-937","answerKey":"B"}
{"id":"8-201","answerKey":"C"}
{"id":"1618","answerKey":"A"}
{"id":"758","answerKey":"C"}
{"id":"7-414","answerKey":"D"}
{"id":"9-675","answerKey":"C"}
{"id":"9-163","answerKey":"C"}
{"id":"1032","answerKey":"A"}
{"id":"889","answerKey":"B"}
{"id":"1160","answerKey":"A"}
{"id":"9-298","answerKey":"A"}
{"id":"1189","answerKey":"A"}
{"id":"8-395","answerKey":"B"}
{"id":"7-238","answerKey":"B"}
{"id":"7-372","answerKey":"B"}
{"id":"8-35","answerKey":"D"}
{"id":"9-271","answerKey":"A"}
{"id":"9-409","answerKey":"D"}
{"id":"530","answerKey":"B"}
{"id":"1426","answerKey":"C"}
{"id":"8-466","answerKey":"D"}
{"id":"1577","answerKey":"B"}
{"id":"8-257","answerKey":"A"}
{"id":"378","answerKey":"B"}
{"id":"8-41","answerKey":"A"}
{"id":"9-540","answerKey":"A"}
{"id":"266","answerKey":"D"}
{"id":"1309","answerKey":"D"}
{"id":"7-1197","answerKey":"A"}
{"id":"7-891","answerKey":"D"}
{"id":"1180","answerKey":"D"}
{"id":"1204","answerKey":"A"}
{"id":"7-52","answerKey":"D"}
{"id":"1759","answerKey":"A"}
{"id":"9-655","answerKey":"A"}
{"id":"132","answerKey":"A"}
{"id":"8-79","answerKey":"A"}
{"id":"1835","answerKey":"B"}
{"id":"9-149","answerKey":"C"}
{"id":"695","answerKey":"A"}
{"id":"8-179","answerKey":"D"}
{"id":"7-50","answerKey":"A"}
{"id":"508","answerKey":"D"}
{"id":"1674","answerKey":"C"}
{"id":"163","answerKey":"A"}
{"id":"7-49","answerKey":"C"}
{"id":"8-393","answerKey":"D"}
{"id":"788","answerKey":"B"}
{"id":"9-29","answerKey":"A"}
{"id":"9-368","answerKey":"C"}
{"id":"7-671","answerKey":"C"}
{"id":"1272","answerKey":"A"}
{"id":"648","answerKey":"B"}
{"id":"9-1180","answerKey":"B"}
{"id":"9-227","answerKey":"C"}
{"id":"1582","answerKey":"A"}
{"id":"8-125","answerKey":"A"}
{"id":"1923","answerKey":"A"}
{"id":"9-229","answerKey":"D"}
{"id":"1702","answerKey":"B"}
{"id":"8-260","answerKey":"C"}
{"id":"9-491","answerKey":"C"}
{"id":"75","answerKey":"A"}
{"id":"1215","answerKey":"C"}
{"id":"8-93","answerKey":"C"}
{"id":"7-988","answerKey":"A"}
{"id":"9-1139","answerKey":"C"}
{"id":"1545","answerKey":"B"}
{"id":"7-664","answerKey":"C"}
{"id":"8-53","answerKey":"C"}
{"id":"7-1044","answerKey":"A"}
{"id":"7-1122","answerKey":"A"}
{"id":"9-79","answerKey":"B"}
{"id":"7-157","answerKey":"D"}
{"id":"9-1164","answerKey":"B"}
{"id":"8-63","answerKey":"D"}
{"id":"8-308","answerKey":"D"}
{"id":"326","answerKey":"C"}
{"id":"1184","answerKey":"A"}
{"id":"359","answerKey":"A"}
{"id":"9-350","answerKey":"D"}
{"id":"7-140","answerKey":"C"}
{"id":"591","answerKey":"A"}
{"id":"7-391","answerKey":"C"}
{"id":"1672","answerKey":"C"}
{"id":"9-464","answerKey":"C"}
{"id":"9-983","answerKey":"B"}
{"id":"9-179","answerKey":"A"}
{"id":"7-942","answerKey":"C"}
{"id":"7-100","answerKey":"B"}
{"id":"9-30","answerKey":"B"}
{"id":"1709","answerKey":"A"}
{"id":"8-491","answerKey":"D"}
{"id":"44","answerKey":"B"}
{"id":"1023","answerKey":"D"}
{"id":"1911","answerKey":"B"}
{"id":"429","answerKey":"A"}
{"id":"8-49","answerKey":"D"}
{"id":"520","answerKey":"C"}
{"id":"7-1128","answerKey":"C"}
{"id":"7-394","answerKey":"B"}
{"id":"9-1166","answerKey":"B"}
{"id":"7-884","answerKey":"C"}
{"id":"9-501","answerKey":"A"}
{"id":"9-757","answerKey":"C"}
{"id":"7-725","answerKey":"D"}
{"id":"1300","answerKey":"D"}
{"id":"9-230","answerKey":"A"}
{"id":"9-988","answerKey":"B"}
{"id":"9-393","answerKey":"A"}
{"id":"7-823","answerKey":"D"}
{"id":"9-24","answerKey":"A"}
{"id":"570","answerKey":"D"}
{"id":"9-124","answerKey":"B"}
{"id":"9-199","answerKey":"A"}
{"id":"767","answerKey":"A"}
{"id":"28","answerKey":"D"}
{"id":"9-1134","answerKey":"B"}
{"id":"9-1030","answerKey":"C"}
{"id":"9-18","answerKey":"B"}
{"id":"8-378","answerKey":"D"}
{"id":"7-677","answerKey":"C"}
{"id":"9-786","answerKey":"A"}
{"id":"9-463","answerKey":"D"}
{"id":"7-71","answerKey":"A"}
{"id":"9-1053","answerKey":"D"}
{"id":"9-437","answerKey":"B"}
{"id":"1787","answerKey":"B"}
{"id":"7-107","answerKey":"A"}
{"id":"769","answerKey":"C"}
{"id":"9-73","answerKey":"A"}
{"id":"9-1194","answerKey":"D"}
{"id":"9-416","answerKey":"D"}
{"id":"470","answerKey":"A"}
{"id":"1297","answerKey":"D"}
{"id":"8-346","answerKey":"A"}
{"id":"7-807","answerKey":"A"}
{"id":"8-463","answerKey":"A"}
{"id":"9-110","answerKey":"A"}
{"id":"1611","answerKey":"C"}
{"id":"9-942","answerKey":"D"}
{"id":"9-1102","answerKey":"B"}
{"id":"9-774","answerKey":"C"}
{"id":"8-333","answerKey":"A"}
{"id":"9-573","answerKey":"B"}
{"id":"1955","answerKey":"A"}
{"id":"8-45","answerKey":"C"}
{"id":"9-674","answerKey":"C"}
{"id":"898","answerKey":"B"}
{"id":"7-1159","answerKey":"C"}
{"id":"568","answerKey":"A"}
{"id":"9-877","answerKey":"B"}
{"id":"406","answerKey":"C"}
{"id":"7-1132","answerKey":"A"}
{"id":"7-479","answerKey":"C"}
{"id":"609","answerKey":"A"}
{"id":"1568","answerKey":"C"}
{"id":"9-418","answerKey":"C"}
{"id":"7-1050","answerKey":"B"}
{"id":"9-510","answerKey":"C"}
{"id":"9-519","answerKey":"A"}
{"id":"9-637","answerKey":"C"}
{"id":"473","answerKey":"A"}
{"id":"8-445","answerKey":"B"}
{"id":"9-575","answerKey":"B"}
{"id":"7-284","answerKey":"B"}
{"id":"8-135","answerKey":"A"}
{"id":"397","answerKey":"D"}
{"id":"9-32","answerKey":"B"}
{"id":"48","answerKey":"C"}
{"id":"8-69","answerKey":"C"}
{"id":"9-159","answerKey":"C"}
{"id":"9-317","answerKey":"C"}
{"id":"423","answerKey":"D"}
{"id":"8-304","answerKey":"A"}
{"id":"785","answerKey":"D"}
{"id":"9-1087","answerKey":"C"}
{"id":"485","answerKey":"D"}
{"id":"9-908","answerKey":"C"}
{"id":"1231","answerKey":"C"}
{"id":"810","answerKey":"B"}
{"id":"158","answerKey":"D"}
{"id":"7-445","answerKey":"A"}
{"id":"1502","answerKey":"C"}
{"id":"1200","answerKey":"B"}
{"id":"437","answerKey":"A"}
{"id":"8-205","answerKey":"A"}
{"id":"9-270","answerKey":"C"}
{"id":"8-130","answerKey":"C"}
{"id":"229","answerKey":"D"}
{"id":"9-390","answerKey":"D"}
{"id":"8-107","answerKey":"C"}
{"id":"7-527","answerKey":"A"}
{"id":"7-333","answerKey":"C"}
{"id":"9-44","answerKey":"C"}
{"id":"7-160","answerKey":"D"}
{"id":"1942","answerKey":"D"}
{"id":"9-597","answerKey":"B"}
{"id":"9-35","answerKey":"A"}
{"id":"1161","answerKey":"B"}
{"id":"7-171","answerKey":"B"}
{"id":"1139","answerKey":"D"}
{"id":"1924","answerKey":"B"}
{"id":"9-440","answerKey":"B"}
{"id":"9-528","answerKey":"C"}
{"id":"170","answerKey":"C"}
{"id":"395","answerKey":"D"}
{"id":"9-633","answerKey":"D"}
{"id":"9-504","answerKey":"A"}
{"id":"8-192","answerKey":"A"}
{"id":"7-1108","answerKey":"D"}
{"id":"7-852","answerKey":"A"}
{"id":"761","answerKey":"D"}
{"id":"8-318","answerKey":"B"}
{"id":"636","answerKey":"C"}
{"id":"7-444","answerKey":"D"}
{"id":"8-57","answerKey":"B"}
{"id":"9-187","answerKey":"B"}
{"id":"1345","answerKey":"B"}
{"id":"8-59","answerKey":"D"}
{"id":"178","answerKey":"B"}
{"id":"9-1186","answerKey":"A"}
{"id":"82","answerKey":"C"}
{"id":"8-165","answerKey":"C"}
{"id":"404","answerKey":"B"}
{"id":"279","answerKey":"A"}
{"id":"9-532","answerKey":"A"}
{"id":"268","answerKey":"C"}
{"id":"7-1018","answerKey":"D"}
{"id":"1756","answerKey":"A"}
{"id":"1137","answerKey":"D"}
{"id":"7-203","answerKey":"D"}
{"id":"745","answerKey":"B"}
{"id":"7-902","answerKey":"A"}
{"id":"1095","answerKey":"A"}
{"id":"7-163","answerKey":"A"}
{"id":"9-858","answerKey":"C"}
{"id":"1530","answerKey":"D"}
{"id":"9-993","answerKey":"D"}
{"id":"8-340","answerKey":"D"}
{"id":"3","answerKey":"B"}
{"id":"1074","answerKey":"C"}
{"id":"9-431","answerKey":"A"}
{"id":"9-638","answerKey":"B"}
{"id":"9-352","answerKey":"D"}
{"id":"226","answerKey":"D"}
{"id":"9-132","answerKey":"A"}
{"id":"9-222","answerKey":"D"}
{"id":"9-105","answerKey":"D"}
{"id":"7-459","answerKey":"B"}
{"id":"9-881","answerKey":"B"}
{"id":"280","answerKey":"B"}
{"id":"187","answerKey":"B"}
{"id":"8-253","answerKey":"C"}
{"id":"9-482","answerKey":"D"}
{"id":"496","answerKey":"A"}
{"id":"630","answerKey":"C"}
{"id":"9-16","answerKey":"D"}
{"id":"7-986","answerKey":"D"}
{"id":"7-787","answerKey":"C"}
{"id":"9-181","answerKey":"A"}
{"id":"1240","answerKey":"B"}
{"id":"474","answerKey":"D"}
{"id":"1274","answerKey":"D"}
{"id":"1531","answerKey":"D"}
{"id":"8-321","answerKey":"B"}
{"id":"1321","answerKey":"C"}
{"id":"9-51","answerKey":"D"}
{"id":"7-685","answerKey":"B"}
{"id":"7-59","answerKey":"A"}
{"id":"7-270","answerKey":"D"}
{"id":"7-736","answerKey":"C"}
{"id":"8-186","answerKey":"D"}
{"id":"224","answerKey":"B"}
{"id":"8-206","answerKey":"A"}
{"id":"8-190","answerKey":"B"}
{"id":"7-334","answerKey":"B"}
{"id":"9-853","answerKey":"B"}
{"id":"8-367","answerKey":"A"}
{"id":"1047","answerKey":"A"}
{"id":"9-454","answerKey":"C"}
{"id":"1572","answerKey":"C"}
{"id":"8-373","answerKey":"A"}
{"id":"9-772","answerKey":"A"}
{"id":"1852","answerKey":"A"}
{"id":"9-1090","answerKey":"D"}
{"id":"7-769","answerKey":"C"}
{"id":"9-478","answerKey":"C"}
{"id":"448","answerKey":"A"}
{"id":"7-417","answerKey":"B"}
{"id":"7-108","answerKey":"D"}
{"id":"1506","answerKey":"D"}
{"id":"1712","answerKey":"A"}
{"id":"8-312","answerKey":"C"}
{"id":"9-776","answerKey":"A"}
{"id":"8-279","answerKey":"C"}
{"id":"9-621","answerKey":"B"}
{"id":"1823","answerKey":"C"}
{"id":"9-735","answerKey":"B"}
{"id":"7-1170","answerKey":"B"}
{"id":"1500","answerKey":"A"}
{"id":"342","answerKey":"B"}
{"id":"7-356","answerKey":"D"}
{"id":"78","answerKey":"B"}
{"id":"9-520","answerKey":"C"}
{"id":"7-653","answerKey":"C"}
{"id":"1112","answerKey":"B"}
{"id":"9-152","answerKey":"B"}
{"id":"9-552","answerKey":"B"}
{"id":"7-262","answerKey":"A"}
{"id":"7-683","answerKey":"D"}
{"id":"276","answerKey":"B"}
{"id":"7-855","answerKey":"C"}
{"id":"664","answerKey":"D"}
{"id":"9-883","answerKey":"C"}
{"id":"9-550","answerKey":"A"}
{"id":"8-493","answerKey":"D"}
{"id":"9-257","answerKey":"C"}
{"id":"1239","answerKey":"D"}
{"id":"869","answerKey":"A"}
{"id":"7-1105","answerKey":"A"}
{"id":"597","answerKey":"D"}
{"id":"385","answerKey":"D"}
{"id":"1301","answerKey":"B"}
{"id":"9-893","answerKey":"D"}
{"id":"9-369","answerKey":"B"}
{"id":"9-1026","answerKey":"A"}
{"id":"7-424","answerKey":"B"}
{"id":"9-259","answerKey":"A"}
{"id":"9-783","answerKey":"C"}
{"id":"1088","answerKey":"B"}
{"id":"1387","answerKey":"C"}
{"id":"7-1062","answerKey":"A"}
{"id":"676","answerKey":"D"}
{"id":"1998","answerKey":"B"}
{"id":"1698","answerKey":"A"}
{"id":"490","answerKey":"A"}
{"id":"844","answerKey":"A"}
{"id":"1795","answerKey":"C"}
{"id":"1508","answerKey":"B"}
{"id":"9-289","answerKey":"D"}
{"id":"9-668","answerKey":"C"}
{"id":"7-364","answerKey":"A"}
{"id":"1271","answerKey":"B"}
{"id":"9-1117","answerKey":"A"}
{"id":"35","answerKey":"A"}
{"id":"1660","answerKey":"B"}
{"id":"7-710","answerKey":"B"}
{"id":"8-52","answerKey":"B"}
{"id":"9-1167","answerKey":"D"}
{"id":"8-43","answerKey":"D"}
{"id":"9-57","answerKey":"B"}
{"id":"1411","answerKey":"A"}
{"id":"9-206","answerKey":"C"}
{"id":"7-740","answerKey":"D"}
{"id":"1774","answerKey":"B"}
{"id":"7-93","answerKey":"C"}
{"id":"8-97","answerKey":"B"}
{"id":"9-813","answerKey":"B"}
{"id":"9-686","answerKey":"B"}
{"id":"9-799","answerKey":"C"}
{"id":"1179","answerKey":"B"}
{"id":"1954","answerKey":"A"}
{"id":"8-403","answerKey":"C"}
{"id":"9-576","answerKey":"A"}
{"id":"9-866","answerKey":"B"}
{"id":"7-208","answerKey":"C"}
{"id":"9-771","answerKey":"D"}
{"id":"998","answerKey":"C"}
{"id":"433","answerKey":"B"}
{"id":"9-508","answerKey":"A"}
{"id":"7-561","answerKey":"D"}
{"id":"7-976","answerKey":"A"}
{"id":"1635","answerKey":"C"}
{"id":"7-875","answerKey":"A"}
{"id":"7-1053","answerKey":"B"}
{"id":"9-957","answerKey":"D"}
{"id":"1150","answerKey":"C"}
{"id":"8-240","answerKey":"A"}
{"id":"9-554","answerKey":"C"}
{"id":"9-135","answerKey":"B"}
{"id":"7-1096","answerKey":"B"}
{"id":"841","answerKey":"C"}
{"id":"7-146","answerKey":"A"}
{"id":"1554","answerKey":"D"}
{"id":"9-731","answerKey":"A"}
{"id":"1780","answerKey":"A"}
{"id":"7-1077","answerKey":"D"}
{"id":"8-494","answerKey":"C"}
{"id":"936","answerKey":"A"}
{"id":"8-478","answerKey":"B"}
{"id":"9-669","answerKey":"C"}
{"id":"7-732","answerKey":"A"}
{"id":"7-658","answerKey":"D"}
{"id":"1003","answerKey":"B"}
{"id":"8-62","answerKey":"A"}
{"id":"7-386","answerKey":"B"}
{"id":"257","answerKey":"D"}
{"id":"147","answerKey":"D"}
{"id":"7-599","answerKey":"D"}
{"id":"8-92","answerKey":"B"}
{"id":"354","answerKey":"A"}
{"id":"9-966","answerKey":"B"}
{"id":"9-612","answerKey":"B"}
{"id":"9-548","answerKey":"A"}
{"id":"9-429","answerKey":"A"}
{"id":"7-95","answerKey":"C"}
{"id":"1560","answerKey":"A"}
{"id":"9-461","answerKey":"C"}
{"id":"9-490","answerKey":"C"}
{"id":"9-301","answerKey":"C"}
{"id":"60","answerKey":"C"}
{"id":"9-894","answerKey":"C"}
{"id":"9-895","answerKey":"A"}
{"id":"9-281","answerKey":"A"}
{"id":"202","answerKey":"C"}
{"id":"1937","answerKey":"C"}
{"id":"620","answerKey":"A"}
{"id":"8-142","answerKey":"C"}
{"id":"7-1138","answerKey":"A"}
{"id":"8-471","answerKey":"B"}
{"id":"9-433","answerKey":"B"}
{"id":"1458","answerKey":"C"}
{"id":"57","answerKey":"B"}
{"id":"605","answerKey":"C"}
{"id":"9-889","answerKey":"A"}
{"id":"1890","answerKey":"A"}
{"id":"9-618","answerKey":"A"}
{"id":"9-523","answerKey":"A"}
{"id":"1126","answerKey":"C"}
{"id":"644","answerKey":"C"}
{"id":"8-365","answerKey":"A"}
{"id":"9-727","answerKey":"D"}
{"id":"7-461","answerKey":"D"}
{"id":"9-1071","answerKey":"B"}
{"id":"1918","answerKey":"B"}
{"id":"1038","answerKey":"D"}
{"id":"9-197","answerKey":"D"}
{"id":"1393","answerKey":"B"}
{"id":"7-244","answerKey":"A"}
{"id":"9-916","answerKey":"B"}
{"id":"9-1046","answerKey":"A"}
{"id":"167","answerKey":"A"}
{"id":"9-566","answerKey":"B"}
{"id":"8-28","answerKey":"D"}
{"id":"7-179","answerKey":"B"}
{"id":"389","answerKey":"B"}
{"id":"1528","answerKey":"C"}
{"id":"1457","answerKey":"D"}
{"id":"1208","answerKey":"B"}
{"id":"1170","answerKey":"C"}
{"id":"8-409","answerKey":"C"}
{"id":"8-307","answerKey":"A"}
{"id":"1948","answerKey":"C"}
{"id":"661","answerKey":"C"}
{"id":"7-435","answerKey":"C"}
{"id":"8-332","answerKey":"C"}
{"id":"948","answerKey":"B"}
{"id":"381","answerKey":"A"}
{"id":"9-759","answerKey":"B"}
{"id":"8-350","answerKey":"B"}
{"id":"7-727","answerKey":"C"}
{"id":"850","answerKey":"B"}
{"id":"970","answerKey":"D"}
{"id":"7-381","answerKey":"D"}
{"id":"9-436","answerKey":"C"}
{"id":"9-411","answerKey":"C"}
{"id":"9-692","answerKey":"B"}
{"id":"1334","answerKey":"A"}
{"id":"9-1160","answerKey":"D"}
{"id":"9-89","answerKey":"A"}
{"id":"9-1034","answerKey":"D"}
{"id":"8-293","answerKey":"B"}
{"id":"9-652","answerKey":"C"}
{"id":"1391","answerKey":"C"}
{"id":"9-948","answerKey":"B"}
{"id":"8-213","answerKey":"D"}
{"id":"162","answerKey":"B"}
{"id":"1359","answerKey":"B"}
{"id":"9-743","answerKey":"A"}
{"id":"9-645","answerKey":"A"}
{"id":"8-250","answerKey":"C"}
{"id":"283","answerKey":"C"}
{"id":"8-183","answerKey":"A"}
{"id":"9-284","answerKey":"A"}
{"id":"7-1186","answerKey":"C"}
{"id":"926","answerKey":"C"}
{"id":"7-519","answerKey":"B"}
{"id":"7-7","answerKey":"C"}
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/openbookqa/data/question-answers.jsonl/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/openbookqa/data/question-answers.jsonl",
"repo_id": "ContextualSP",
"token_count": 6310
}
| 247 |
from typing import Dict, NamedTuple
class Metric(NamedTuple):
precision: float
recall: float
def F1(self):
if self.precision + self.recall == 0:
return 0.0
return 2 * self.precision * self.recall / (self.precision + self.recall)
def diagnostics(self) -> Dict[str, float]:
return {
"precision": self.precision,
"recall": self.recall
}
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/evaluation/metric.py/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/evaluation/metric.py",
"repo_id": "ContextualSP",
"token_count": 192
}
| 248 |
## Test case: All ProPara answers
* answers.tsv is a sorted copy of the all answers of all [ProPara data sets](../../data/).
This is intended for exercising and checking Action File loading.
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/testfiles-0/README.md/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/testfiles-0/README.md",
"repo_id": "ContextualSP",
"token_count": 52
}
| 249 |
import numpy as np
import pandas as pd
import json
import re
import string
import os
from tqdm import tqdm
import argparse
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--start', type=int)
parser.add_argument('--end', type=int)
parser.add_argument('--indicator_type')
def read_indicators(indicator_type):
assert indicator_type in ["reverse", "premise", "conclusion"]
indicators = []
with open(f"../data/Indicators/{indicator_type}.txt", "r") as f:
for line in f.readlines():
word = line.strip()
indicators.append(word)
return indicators
def locate_logic_indicator_sentences(indicators, text):
rexp = re.compile("|".join([r"(\b{}\b)".format(ind) for ind in indicators]))
return list(re.finditer(rexp, text))
def find_next_period(current_index, text, forward=True):
if current_index <= 0: return 0
if current_index >= len(text) - 1: return len(text) - 1
stride = 1 if forward else -1
while True:
current_index = current_index + stride
if text[current_index] in [".", "!", "?"]:
return current_index + 1 # used directly for subscription
if current_index >= len(text) - 1: return len(text) - 1
if current_index <= 0: return 0
def is_good_conclusion_indicator(indicator, following_words):
if indicator == "so":
following_first_word = following_words.strip().split()[0]
word_list = [following_first_word]
for word in word_list:
pos = nlp(word)[0].pos_
return not pos in ["ADJ", "ADV", "INTJ", "VERB"] # "so bad", "so many", "so loaded" etc.
return True
def find_next_char(current_index, text): # non punc, non space
while text[current_index] not in string.ascii_lowercase + string.digits:
current_index = current_index + 1
return current_index
if __name__ == "__main__":
args = parser.parse_args()
book_start_index = max(args.start, 0)
book_end_index = None if args.end == -1 or args.end >= len(os.listdir("../data/BookCorpus/epubtxt/")) else args.end
indicator_type = args.indicator_type
indicators = read_indicators(indicator_type)
all_books = os.listdir("../data/BookCorpus/epubtxt/")
fw = open(f"./bookcorpus_{indicator_type}/mlm_bookcorpus_{book_start_index}_{book_end_index}.jsonl", "w")
for book in tqdm(all_books[book_start_index:book_end_index]):
with open(f"../data/BookCorpus/epubtxt/{book}", "r") as f:
book = " ".join(f.read().lower().split())
occurances = locate_logic_indicator_sentences(indicators, book)
for span in occurances:
try:
ind_start, ind_end = span.start(), span.end()
mask_start = find_next_char(ind_end, book)
mask_end = find_next_period(mask_start + 1, book, True)
if book[ind_end] in [".", "!", "?", "'", '"'] or book[ind_end + 1] in [".", "!", "?", "'", '"']: continue # indicator on eos
if mask_end - mask_start <= 25 or mask_end - mask_start >= 150: # drop overlong or overshort setence
continue
if not is_good_conclusion_indicator(book[ind_start: ind_end], book[mask_start:mask_end]):
continue
pre_len = min(600 + np.random.geometric(p=1/25) * 5, 1000)
post_len = min(250 + np.random.geometric(p=1/25) * 5, 600)
input_start = find_next_period(ind_start - pre_len, book, False)
input_end = find_next_period(ind_end + post_len, book, True)
input_p1 = book[input_start: mask_start]
input_p2 = "[SEP] [MASK] [SEP]"
input_p3 = book[mask_end : input_end]
input_ = " ".join([input_p1, input_p2, input_p3]).strip()
output_ = book[mask_start : mask_end].strip()
# print(input_,)
# print(output_, "\n")
dic = {"input":input_, "output":output_}
json.dump(dic, fw)
fw.write("\n")
except:
continue
fw.close()
|
ContextualSP/logigan/corpus_construction/mlm_corpus/corpus_construction.py/0
|
{
"file_path": "ContextualSP/logigan/corpus_construction/mlm_corpus/corpus_construction.py",
"repo_id": "ContextualSP",
"token_count": 1967
}
| 250 |
import re
import os
from torchtext import data
from torchtext.data import Iterator, BucketIterator
from torchtext.vocab import GloVe
from torch.utils.data import Dataset, DataLoader
import torch
from collections import defaultdict
import string
from utils import Trie, Tree
class Dictionary:
def __init__(self, words):
self.SPECIAL_SYMBOLS = 4
self.PAD, self.OOV, self.EOS, self.SOS = 0, 1, 2, 3
self.id2word = [None] + words
self.word2id = {word: 1 + i for i, word in enumerate(words)}
def tokenize(self, sentence):
return sentence.split()
def sentence2ids(self, sentence, eos=False, sos=False):
tokens = self.tokenize(sentence)
ids = [self.SPECIAL_SYMBOLS + self.word2id[word] - 1 if word in self.word2id else self.OOV for word in tokens]
if eos:
ids = ids + [self.EOS]
if sos:
ids = [self.SOS] + ids
return ids
def sentences2ids(self, sentences, eos=False, sos=False):
ids = [self.sentence2ids(sentence, eos=eos, sos=sos) for sentence in sentences]
lengths = [len(s) for s in ids]
ids = [s + [self.PAD]*(max(lengths)-len(s)) for s in ids] # Padding
# ids = [[ids[i][j] for i in range(len(ids))] for j in range(max(lengths))] # batch*len -> len*batch
return ids, lengths
def sentences2ids_for_multilabel(self, sentences, eos=False, sos = False):
ids = [[self.sentence2ids(sentence, eos=False, sos=False) for sentence in group] for group in sentences]
if sos:
ids = [[[self.SOS]] + s for s in ids]
if eos:
ids = [s + [[self.EOS]] for s in ids]
lengths = [len(s) for s in ids]
ids = [s + [[self.PAD]]*(max(lengths)-len(s)) for s in ids]
max_label_length = 0
for id in ids:
for iid in id:
max_label_length = max(len(iid), max_label_length)
ids = torch.LongTensor([[s + [self.PAD] * (max_label_length - len(s)) for s in id] for id in ids])# Padding
targets = torch.zeros(len(ids), max(lengths) , self.size())
temp = torch.ones(ids.shape)
targets = targets.scatter_(2 , ids, 1)
targets[:,:,0] = 0
return targets, torch.tensor(lengths)
def ids2sentence(self, ids):
return ' '.join(['<OOV>' if i == self.OOV else self.id2word[i - self.SPECIAL_SYMBOLS + 1] for i in ids if i != self.EOS and i != self.PAD and i != self.SOS])
def ids2sentences(self, ids):
return [self.ids2sentence(i) for i in ids]
def size(self):
return len(self.id2word) + self.SPECIAL_SYMBOLS - 1
class treeDataset(Dataset):
def __init__(self, src_path, trg_path):
self.src_sents = []
self.trg_sents = []
self.candidate_token_sents = []
with open(src_path) as fs, open(trg_path) as ft:
for src_sent, trg_sent in zip(fs, ft):
src_sent = src_sent.strip()
trg_sent = trg_sent.strip()
self.src_sents.append(src_sent)
self.trg_sents.append(trg_sent)
def __getitem__(self, index):
src_sent = self.src_sents[index]
trg_sent = self.trg_sents[index].split(" ### ")
label_list = []
sparql_tree = Trie(sos_token='<SOS>')
for sub_path in trg_sent:
sparql_tree.add_path(sub_path)
for sub_path in trg_sent:
label_list.append(sparql_tree.get_label(sub_path))
return [src_sent] * len(trg_sent), trg_sent, label_list, len(trg_sent)
@classmethod
def transform(cls, src_dictionary, trg_dictionary, src_sents, trg_sents, label_sents, device):
src_ids, src_lengths = src_dictionary.sentences2ids(src_sents, sos=False, eos=True)
src_lengths, idx_sort = torch.sort(torch.tensor(src_lengths), dim=0, descending=True)
src_lengths = src_lengths.to(device)
_, original_idx = torch.sort(idx_sort.data, dim=0)
original_idx = original_idx.to(device)
src_ids = torch.LongTensor(src_ids).index_select(0, idx_sort).transpose(0,1).to(device)
trg_ids, trg_lengths = trg_dictionary.sentences2ids(trg_sents, eos=True, sos=True)
trg_ids = torch.LongTensor(trg_ids).index_select(0, idx_sort).transpose(0,1).to(device)
trg_lengths = torch.tensor(trg_lengths).index_select(0, idx_sort).to(device)
label_ids, label_lengths = trg_dictionary.sentences2ids_for_multilabel(label_sents, eos = True, sos =False)
label_lengths = label_lengths.index_select(0, idx_sort).to(device)
label_ids = label_ids.index_select(0, idx_sort).transpose(0,1).to(device)
return (src_ids, src_lengths), (trg_ids, trg_lengths), (label_ids, label_lengths) , original_idx
def __len__(self):
return len(self.src_sents)
class customDataset(Dataset):
def __init__(self, src_path, trg_path):
self.src_sents = []
self.trg_sents = []
self.candidate_token_sents = []
with open(src_path) as fs, open(trg_path) as ft:
for src_sent, trg_sent in zip(fs, ft):
src_sent = src_sent.strip()
trg_sent = trg_sent.strip()
self.src_sents.append(src_sent)
self.trg_sents.append(trg_sent)
def __getitem__(self, index):
src_sent = self.src_sents[index]
trg_sent = self.trg_sents[index]
return src_sent, trg_sent
@classmethod
def transform(cls, src_dictionary, trg_dictionary, src_sents, device='cpu'):
src_ids, src_lengths = src_dictionary.sentences2ids(src_sents, sos=False, eos=True)
src_lengths, idx_sort = torch.sort(torch.tensor(src_lengths), dim=0, descending=True)
src_ids = torch.LongTensor(src_ids).index_select(0, idx_sort).transpose(0,1).to(device)
src_lengths = src_lengths.to(device)
return (src_ids, src_lengths)
def __len__(self):
return len(self.src_sents)
|
ContextualSP/poset_decoding/sketch_prediction/data.py/0
|
{
"file_path": "ContextualSP/poset_decoding/sketch_prediction/data.py",
"repo_id": "ContextualSP",
"token_count": 2389
}
| 251 |
from .tuner import Tuner
from .tune import tune
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/auto/tuner/__init__.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/auto/tuner/__init__.py",
"repo_id": "ContextualSP",
"token_count": 15
}
| 252 |
from . import toy
from . import wiki_qa
from . import embeddings
from . import snli
from . import quora_qp
from . import cfq
from pathlib import Path
def list_available():
return [p.name for p in Path(__file__).parent.iterdir()
if p.is_dir() and not p.name.startswith('_')]
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/datasets/__init__.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/datasets/__init__.py",
"repo_id": "ContextualSP",
"token_count": 108
}
| 253 |
"""Parameters table class."""
import typing
import pandas as pd
import collections.abc
from matchzoo.engine.param import Param
from matchzoo.engine import hyper_spaces
class ParamTable(object):
"""
Parameter table class.
Example:
>>> params = ParamTable()
>>> params.add(Param('ham', 'Parma Ham'))
>>> params.add(Param('egg', 'Over Easy'))
>>> params['ham']
'Parma Ham'
>>> params['egg']
'Over Easy'
>>> print(params)
ham Parma Ham
egg Over Easy
>>> params.add(Param('egg', 'Sunny side Up'))
Traceback (most recent call last):
...
ValueError: Parameter named egg already exists.
To re-assign parameter egg value, use `params["egg"] = value` instead.
"""
def __init__(self):
"""Parameter table constrctor."""
self._params = {}
def add(self, param: Param):
""":param param: parameter to add."""
if not isinstance(param, Param):
raise TypeError("Only accepts a Param instance.")
if param.name in self._params:
msg = f"Parameter named {param.name} already exists.\n" \
f"To re-assign parameter {param.name} value, " \
f"use `params[\"{param.name}\"] = value` instead."
raise ValueError(msg)
self._params[param.name] = param
def get(self, key) -> Param:
""":return: The parameter in the table named `key`."""
return self._params[key]
def set(self, key, param: Param):
"""Set `key` to parameter `param`."""
if not isinstance(param, Param):
raise ValueError("Only accepts a Param instance.")
self._params[key] = param
@property
def hyper_space(self) -> dict:
""":return: Hyper space of the table, a valid `hyperopt` graph."""
full_space = {}
for param in self:
if param.hyper_space is not None:
param_space = param.hyper_space
if isinstance(param_space, hyper_spaces.HyperoptProxy):
param_space = param_space.convert(param.name)
full_space[param.name] = param_space
return full_space
def to_frame(self) -> pd.DataFrame:
"""
Convert the parameter table into a pandas data frame.
:return: A `pandas.DataFrame`.
Example:
>>> import matchzoo as mz
>>> table = mz.ParamTable()
>>> table.add(mz.Param(name='x', value=10, desc='my x'))
>>> table.add(mz.Param(name='y', value=20, desc='my y'))
>>> table.to_frame()
Name Description Value Hyper-Space
0 x my x 10 None
1 y my y 20 None
"""
df = pd.DataFrame(data={
'Name': [p.name for p in self],
'Description': [p.desc for p in self],
'Value': [p.value for p in self],
'Hyper-Space': [p.hyper_space for p in self]
}, columns=['Name', 'Description', 'Value', 'Hyper-Space'])
return df
def __getitem__(self, key: str) -> typing.Any:
""":return: The value of the parameter in the table named `key`."""
return self._params[key].value
def __setitem__(self, key: str, value: typing.Any):
"""
Set the value of the parameter named `key`.
:param key: Name of the parameter.
:param value: New value of the parameter to set.
"""
self._params[key].value = value
def __str__(self):
""":return: Pretty formatted parameter table."""
return '\n'.join(param.name.ljust(30) + str(param.value)
for param in self._params.values())
def __iter__(self) -> typing.Iterator:
""":return: A iterator that iterates over all parameter instances."""
yield from self._params.values()
def completed(self, exclude: typing.Optional[list] = None) -> bool:
"""
Check if all params are filled.
:param exclude: List of names of parameters that was excluded
from being computed.
:return: `True` if all params are filled, `False` otherwise.
Example:
>>> import matchzoo
>>> model = matchzoo.models.DenseBaseline()
>>> model.params.completed(
... exclude=['task', 'out_activation_func', 'embedding',
... 'embedding_input_dim', 'embedding_output_dim']
... )
True
"""
return all(param for param in self if param.name not in exclude)
def keys(self) -> collections.abc.KeysView:
""":return: Parameter table keys."""
return self._params.keys()
def __contains__(self, item):
""":return: `True` if parameter in parameters."""
return item in self._params
def update(self, other: dict):
"""
Update `self`.
Update `self` with the key/value pairs from other, overwriting
existing keys. Notice that this does not add new keys to `self`.
This method is usually used by models to obtain useful information
from a preprocessor's context.
:param other: The dictionary used update.
Example:
>>> import matchzoo as mz
>>> model = mz.models.DenseBaseline()
>>> prpr = model.get_default_preprocessor()
>>> _ = prpr.fit(mz.datasets.toy.load_data(), verbose=0)
>>> model.params.update(prpr.context)
"""
for key in other:
if key in self:
self[key] = other[key]
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/engine/param_table.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/engine/param_table.py",
"repo_id": "ContextualSP",
"token_count": 2556
}
| 254 |
"""An implementation of MatchPyramid Model."""
import typing
import torch
import torch.nn as nn
from matchzoo.engine.param_table import ParamTable
from matchzoo.engine.param import Param
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine import hyper_spaces
from matchzoo.modules import Matching
from matchzoo.dataloader import callbacks
from matchzoo.utils import parse_activation
class MatchPyramid(BaseModel):
"""
MatchPyramid Model.
Examples:
>>> model = MatchPyramid()
>>> model.params['embedding_output_dim'] = 300
>>> model.params['kernel_count'] = [16, 32]
>>> model.params['kernel_size'] = [[3, 3], [3, 3]]
>>> model.params['dpool_size'] = [3, 10]
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(with_embedding=True)
params.add(Param(name='kernel_count', value=[32],
desc="The kernel count of the 2D convolution "
"of each block."))
params.add(Param(name='kernel_size', value=[[3, 3]],
desc="The kernel size of the 2D convolution "
"of each block."))
params.add(Param(name='activation', value='relu',
desc="The activation function."))
params.add(Param(name='dpool_size', value=[3, 10],
desc="The max-pooling size of each block."))
params.add(Param(
'dropout_rate', 0.0,
hyper_space=hyper_spaces.quniform(
low=0.0, high=0.8, q=0.01),
desc="The dropout rate."
))
return params
def build(self):
"""
Build model structure.
MatchPyramid text matching as image recognition.
"""
self.embedding = self._make_default_embedding_layer()
# Interaction
self.matching = Matching(matching_type='dot')
# Build conv
activation = parse_activation(self._params['activation'])
in_channel_2d = [
1,
*self._params['kernel_count'][:-1]
]
conv2d = [
self._make_conv_pool_block(ic, oc, ks, activation)
for ic, oc, ks, in zip(in_channel_2d,
self._params['kernel_count'],
self._params['kernel_size'])
]
self.conv2d = nn.Sequential(*conv2d)
# Dynamic Pooling
self.dpool_layer = nn.AdaptiveAvgPool2d(self._params['dpool_size'])
self.dropout = nn.Dropout(p=self._params['dropout_rate'])
left_length = self._params['dpool_size'][0]
right_length = self._params['dpool_size'][1]
# Build output
self.out = self._make_output_layer(
left_length * right_length * self._params['kernel_count'][-1]
)
def forward(self, inputs):
"""Forward."""
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# D = embedding size
# L = `input_left` sequence length
# R = `input_right` sequence length
# F = number of filters
# P = pool size
# Left input and right input.
# shape = [B, L]
# shape = [B, R]
input_left, input_right = inputs['text_left'], inputs['text_right']
# Process left and right input.
# shape = [B, L, D]
# shape = [B, R, D]
embed_left = self.embedding(input_left.long())
embed_right = self.embedding(input_right.long())
# Compute matching signal
# shape = [B, 1, L, R]
embed_cross = self.matching(embed_left, embed_right).unsqueeze(dim=1)
# Convolution
# shape = [B, F, L, R]
conv = self.conv2d(embed_cross)
# Dynamic Pooling
# shape = [B, F, P1, P2]
embed_pool = self.dpool_layer(conv)
# shape = [B, F * P1 * P2]
embed_flat = self.dropout(torch.flatten(embed_pool, start_dim=1))
# shape = [B, *]
out = self.out(embed_flat)
return out
@classmethod
def _make_conv_pool_block(
cls,
in_channels: int,
out_channels: int,
kernel_size: tuple,
activation: nn.Module
) -> nn.Module:
"""Make conv pool block."""
return nn.Sequential(
# Same padding
nn.ConstantPad2d(
(0, kernel_size[1] - 1, 0, kernel_size[0] - 1), 0
),
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size
),
activation
)
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/models/match_pyramid.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/models/match_pyramid.py",
"repo_id": "ContextualSP",
"token_count": 2362
}
| 255 |
"""Spatial GRU module."""
import typing
import torch
import torch.nn as nn
import torch.nn.functional as F
from matchzoo.utils import parse_activation
class SpatialGRU(nn.Module):
"""
Spatial GRU Module.
:param channels: Number of word interaction tensor channels.
:param units: Number of SpatialGRU units.
:param activation: Activation function to use, one of:
- String: name of an activation
- Torch Modele subclass
- Torch Module instance
Default: hyperbolic tangent (`tanh`).
:param recurrent_activation: Activation function to use for
the recurrent step, one of:
- String: name of an activation
- Torch Modele subclass
- Torch Module instance
Default: sigmoid activation (`sigmoid`).
:param direction: Scanning direction. `lt` (i.e., left top)
indicates the scanning from left top to right bottom, and
`rb` (i.e., right bottom) indicates the scanning from
right bottom to left top.
Examples:
>>> import matchzoo as mz
>>> channels, units= 4, 10
>>> spatial_gru = mz.modules.SpatialGRU(channels, units)
"""
def __init__(
self,
channels: int = 4,
units: int = 10,
activation: typing.Union[str, typing.Type[nn.Module], nn.Module] = 'tanh',
recurrent_activation: typing.Union[
str, typing.Type[nn.Module], nn.Module] = 'sigmoid',
direction: str = 'lt'
):
""":class:`SpatialGRU` constructor."""
super().__init__()
self._units = units
self._activation = parse_activation(activation)
self._recurrent_activation = parse_activation(recurrent_activation)
self._direction = direction
self._channels = channels
if self._direction not in ('lt', 'rb'):
raise ValueError(f"Invalid direction. "
f"`{self._direction}` received. "
f"Must be in `lt`, `rb`.")
self._input_dim = self._channels + 3 * self._units
self._wr = nn.Linear(self._input_dim, self._units * 3)
self._wz = nn.Linear(self._input_dim, self._units * 4)
self._w_ij = nn.Linear(self._channels, self._units)
self._U = nn.Linear(self._units * 3, self._units, bias=False)
self.reset_parameters()
def reset_parameters(self):
"""Initialize parameters."""
nn.init.xavier_normal_(self._wr.weight)
nn.init.xavier_normal_(self._wz.weight)
nn.init.orthogonal_(self._w_ij.weight)
nn.init.orthogonal_(self._U.weight)
def softmax_by_row(self, z: torch.tensor) -> tuple:
"""Conduct softmax on each dimension across the four gates."""
# z_transform: [B, 4, U]
z_transform = z.reshape((-1, 4, self._units))
# zi, zl, zt, zd: [B, U]
zi, zl, zt, zd = F.softmax(z_transform, dim=1).unbind(dim=1)
return zi, zl, zt, zd
def calculate_recurrent_unit(
self,
inputs: torch.tensor,
states: list,
i: int,
j: int
):
"""
Calculate recurrent unit.
:param inputs: A tensor which contains interaction
between left text and right text.
:param states: An array of tensors which stores the hidden state
of every step.
:param i: Recurrent row index.
:param j: Recurrent column index.
"""
# Get hidden state h_diag, h_top, h_left
# h = [B, U]
h_diag = states[i][j]
h_top = states[i][j + 1]
h_left = states[i + 1][j]
# Get interaction between word i, j: s_ij
# s = [B, C]
s_ij = inputs[i][j]
# Concatenate h_top, h_left, h_diag, s_ij
# q = [B, 3*U+C]
q = torch.cat([torch.cat([h_top, h_left], 1), torch.cat([h_diag, s_ij], 1)], 1)
# Calculate reset gate
# r = [B, 3*U]
r = self._recurrent_activation(self._wr(q))
# Calculate updating gate
# z: [B, 4*U]
z = self._wz(q)
# Perform softmax
# zi, zl, zt, zd: [B, U]
zi, zl, zt, zd = self.softmax_by_row(z)
# Get h_ij_
# h_ij_ = [B, U]
h_ij_l = self._w_ij(s_ij)
h_ij_r = self._U(r * (torch.cat([h_left, h_top, h_diag], 1)))
h_ij_ = self._activation(h_ij_l + h_ij_r)
# Calculate h_ij
# h_ij = [B, U]
h_ij = zl * h_left + zt * h_top + zd * h_diag + zi * h_ij_
return h_ij
def forward(self, inputs):
"""
Perform SpatialGRU on word interation matrix.
:param inputs: input tensors.
"""
batch_size, channels, left_length, right_length = inputs.shape
# inputs = [L, R, B, C]
inputs = inputs.permute([2, 3, 0, 1])
if self._direction == 'rb':
# inputs = [R, L, B, C]
inputs = torch.flip(inputs, [0, 1])
# states = [L+1, R+1, B, U]
states = [
[torch.zeros([batch_size, self._units]).type_as(inputs)
for j in range(right_length + 1)] for i in range(left_length + 1)
]
# Calculate h_ij
# h_ij = [B, U]
for i in range(left_length):
for j in range(right_length):
states[i + 1][j + 1] = self.calculate_recurrent_unit(inputs, states, i, j)
return states[left_length][right_length]
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/modules/spatial_gru.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/modules/spatial_gru.py",
"repo_id": "ContextualSP",
"token_count": 2607
}
| 256 |
from .unit import Unit
class NgramLetter(Unit):
"""
Process unit for n-letter generation.
Triletter is used in :class:`DSSMModel`.
This processor is expected to execute before `Vocab`
has been created.
Examples:
>>> triletter = NgramLetter()
>>> rv = triletter.transform(['hello', 'word'])
>>> len(rv)
9
>>> rv
['#he', 'hel', 'ell', 'llo', 'lo#', '#wo', 'wor', 'ord', 'rd#']
>>> triletter = NgramLetter(reduce_dim=False)
>>> rv = triletter.transform(['hello', 'word'])
>>> len(rv)
2
>>> rv
[['#he', 'hel', 'ell', 'llo', 'lo#'], ['#wo', 'wor', 'ord', 'rd#']]
"""
def __init__(self, ngram: int = 3, reduce_dim: bool = True):
"""
Class initialization.
:param ngram: By default use 3-gram (tri-letter).
:param reduce_dim: Reduce to 1-D list for sentence representation.
"""
self._ngram = ngram
self._reduce_dim = reduce_dim
def transform(self, input_: list) -> list:
"""
Transform token into tri-letter.
For example, `word` should be represented as `#wo`,
`wor`, `ord` and `rd#`.
:param input_: list of tokens to be transformed.
:return n_letters: generated n_letters.
"""
n_letters = []
if len(input_) == 0:
token_ngram = []
if self._reduce_dim:
n_letters.extend(token_ngram)
else:
n_letters.append(token_ngram)
else:
for token in input_:
token = '#' + token + '#'
token_ngram = []
while len(token) >= self._ngram:
token_ngram.append(token[:self._ngram])
token = token[1:]
if self._reduce_dim:
n_letters.extend(token_ngram)
else:
n_letters.append(token_ngram)
return n_letters
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/units/ngram_letter.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/units/ngram_letter.py",
"repo_id": "ContextualSP",
"token_count": 1008
}
| 257 |
from .one_hot import one_hot
from .tensor_type import TensorType
from .list_recursive_subclasses import list_recursive_concrete_subclasses
from .parse import parse_loss, parse_activation, parse_metric, parse_optimizer
from .average_meter import AverageMeter
from .timer import Timer
from .early_stopping import EarlyStopping
from .get_file import get_file, _hash_file
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/utils/__init__.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/utils/__init__.py",
"repo_id": "ContextualSP",
"token_count": 107
}
| 258 |
import matchzoo as mz
from matchzoo import preprocessors
from matchzoo.dataloader import Dataset
def test_dataset():
data_pack = mz.datasets.toy.load_data('train', task='ranking')
preprocessor = mz.preprocessors.BasicPreprocessor()
data_processed = preprocessor.fit_transform(data_pack)
dataset_point = mz.dataloader.Dataset(
data_processed,
mode='point',
batch_size=1,
resample=False,
shuffle=True,
sort=False
)
dataset_point.batch_size = 10
dataset_point.shuffle = not dataset_point.shuffle
dataset_point.sort = not dataset_point.sort
assert len(dataset_point.batch_indices) == 10
dataset_pair = mz.dataloader.Dataset(
data_processed,
mode='pair',
num_dup=1,
num_neg=1,
batch_size=1,
resample=True,
shuffle=False,
sort=False
)
assert len(dataset_pair) == 5
dataset_pair.num_dup = dataset_pair.num_dup + 1
assert len(dataset_pair) == 10
dataset_pair.num_neg = dataset_pair.num_neg + 2
assert len(dataset_pair) == 10
dataset_pair.batch_size = dataset_pair.batch_size + 1
assert len(dataset_pair) == 5
dataset_pair.resample = not dataset_pair.resample
assert len(dataset_pair) == 5
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tests/dataloader/test_dataset.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tests/dataloader/test_dataset.py",
"repo_id": "ContextualSP",
"token_count": 560
}
| 259 |
<jupyter_start><jupyter_code>%run init.ipynb
preprocessor = mz.models.Bert.get_default_preprocessor()
train_pack_processed = preprocessor.transform(train_pack_raw)
dev_pack_processed = preprocessor.transform(dev_pack_raw)
test_pack_processed = preprocessor.transform(test_pack_raw)
trainset = mz.dataloader.Dataset(
data_pack=train_pack_processed,
mode='point'
)
testset = mz.dataloader.Dataset(
data_pack=test_pack_processed,
mode='point'
)
padding_callback = mz.models.Bert.get_default_padding_callback()
trainloader = mz.dataloader.DataLoader(
dataset=trainset,
batch_size=20,
stage='train',
sort=False,
callback=padding_callback
)
testloader = mz.dataloader.DataLoader(
dataset=testset,
batch_size=20,
stage='dev',
sort=False,
callback=padding_callback
)
model = mz.models.Bert()
model.params['task'] = classification_task
model.params['mode'] = 'bert-base-uncased'
model.params['dropout_rate'] = 0.2
model.build()
print(model)
print('Trainable params: ', sum(p.numel() for p in model.parameters() if p.requires_grad))
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 5e-5},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
from pytorch_transformers import AdamW, WarmupLinearSchedule
optimizer = AdamW(optimizer_grouped_parameters, lr=5e-5, betas=(0.9, 0.98), eps=1e-8)
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=6, t_total=-1)
trainer = mz.trainers.Trainer(
model=model,
optimizer=optimizer,
scheduler=scheduler,
trainloader=trainloader,
validloader=testloader,
validate_interval=None,
epochs=10
)
trainer.run()<jupyter_output><empty_output>
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tutorials/classification/bert.ipynb/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tutorials/classification/bert.ipynb",
"repo_id": "ContextualSP",
"token_count": 740
}
| 260 |
<jupyter_start><jupyter_code>import torch
import numpy as np
import pandas as pd
import matchzoo as mz
print('matchzoo version', mz.__version__)
ranking_task = mz.tasks.Ranking(losses=mz.losses.RankCrossEntropyLoss(num_neg=1))
ranking_task.metrics = [
mz.metrics.NormalizedDiscountedCumulativeGain(k=3),
mz.metrics.NormalizedDiscountedCumulativeGain(k=5),
mz.metrics.MeanAveragePrecision()
]
print("`ranking_task` initialized with metrics", ranking_task.metrics)
print('data loading ...')
train_pack_raw = mz.datasets.wiki_qa.load_data('train', task=ranking_task)
dev_pack_raw = mz.datasets.wiki_qa.load_data('dev', task=ranking_task, filtered=True)
test_pack_raw = mz.datasets.wiki_qa.load_data('test', task=ranking_task, filtered=True)
print('data loaded as `train_pack_raw` `dev_pack_raw` `test_pack_raw`')
preprocessor = mz.models.MatchPyramid.get_default_preprocessor()
train_pack_processed = preprocessor.fit_transform(train_pack_raw)
dev_pack_processed = preprocessor.transform(dev_pack_raw)
test_pack_processed = preprocessor.transform(test_pack_raw)
preprocessor.context
glove_embedding = mz.datasets.embeddings.load_glove_embedding(dimension=300)
term_index = preprocessor.context['vocab_unit'].state['term_index']
embedding_matrix = glove_embedding.build_matrix(term_index)
l2_norm = np.sqrt((embedding_matrix * embedding_matrix).sum(axis=1))
embedding_matrix = embedding_matrix / l2_norm[:, np.newaxis]
trainset = mz.dataloader.Dataset(
data_pack=train_pack_processed,
mode='pair',
num_dup=2,
num_neg=1,
batch_size=20,
resample=True,
sort=False,
shuffle=True
)
testset = mz.dataloader.Dataset(
data_pack=test_pack_processed,
batch_size=20,
sort=False,
shuffle=False
)
padding_callback = mz.models.MatchPyramid.get_default_padding_callback()
trainloader = mz.dataloader.DataLoader(
dataset=trainset,
stage='train',
callback=padding_callback
)
testloader = mz.dataloader.DataLoader(
dataset=testset,
stage='dev',
callback=padding_callback
)
model = mz.models.MatchPyramid()
model.params['task'] = ranking_task
model.params['embedding'] = embedding_matrix
model.params['kernel_count'] = [16, 32]
model.params['kernel_size'] = [[3, 3], [3, 3]]
model.params['dpool_size'] = [3, 10]
model.params['dropout_rate'] = 0.1
model.build()
print(model)
print('Trainable params: ', sum(p.numel() for p in model.parameters() if p.requires_grad))
optimizer = torch.optim.Adam(model.parameters())
trainer = mz.trainers.Trainer(
model=model,
optimizer=optimizer,
trainloader=trainloader,
validloader=testloader,
validate_interval=None,
epochs=5
)
trainer.run()<jupyter_output><empty_output>
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tutorials/ranking/match_pyramid.ipynb/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tutorials/ranking/match_pyramid.ipynb",
"repo_id": "ContextualSP",
"token_count": 1044
}
| 261 |
#!/usr/bin/env bash
export seed=1
export config_file=train_configs/concat.none.jsonnet
export model_file=checkpoints_sparc/sparc_concat_none_model
export tables_file=dataset_sparc/tables.json
export database_path=dataset_sparc/database
export dataset_path=dataset_sparc
export train_data_path=dataset_sparc/train.json
export validation_data_path=dataset_sparc/dev.json
export pretrained_file=glove/glove.twitter.27B.100d.txt
allennlp train -s ${model_file} ${config_file} \
--include-package dataset_reader.sparc_reader \
--include-package models.sparc_parser \
-o "{\"model.serialization_dir\":\"${model_file}\",\"random_seed\":\"${seed}\",\"numpy_seed\":\"${seed}\",\"pytorch_seed\":\"${seed}\",\"dataset_reader.tables_file\":\"${tables_file}\",\"dataset_reader.database_path\":\"${database_path}\",\"train_data_path\":\"${train_data_path}\",\"validation_data_path\":\"${validation_data_path}\",\"model.text_embedder.tokens.pretrained_file\":\"${pretrained_file}\",\"model.dataset_path\":\"${dataset_path}\"}"
|
ContextualSP/semantic_parsing_in_context/bash_files/linux/train_sparc.bash/0
|
{
"file_path": "ContextualSP/semantic_parsing_in_context/bash_files/linux/train_sparc.bash",
"repo_id": "ContextualSP",
"token_count": 374
}
| 262 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import List, Tuple, Dict
from allennlp.common.util import pad_sequence_to_length
from context.converter import SQLConverter
from context.db_context import SparcDBContext
from context.grammar import Grammar, Action, C, T, Segment
class SparcWorld:
"""
World representation for spider dataset.
"""
def __init__(self, db_context: SparcDBContext, sql_clause, sql_query):
"""
:param sql_clause: structural SQL clause(parsed)
:param sql_query: plain SQL query for evaluation
"""
self.db_id = db_context.db_id
self.db_context = db_context
self.sql_clause = sql_clause
self.sql_query = sql_query
self.sql_converter = SQLConverter(db_context=self.db_context)
# keep a list of entities names as they are given in sql queries
self.entities_indexer = {}
for i, entity in enumerate(self.db_context.knowledge_graph.entities):
parts = entity.split(':')
if parts[0] in ['table', 'string']:
self.entities_indexer[parts[1]] = i
else:
# TODO: here we assume the same column name always map into the same text
_, _, column_name = parts
self.entities_indexer[f'{column_name}'] = i
self.valid_actions: Dict[str, List[str]] = {}
self.valid_actions_flat: List[Action] = []
# to support precedent SQL query copy in token-level or segment-level
# this attribute will be assigned in runtime.
self.precedent_action_seq: List[int] = []
# the action is exactly Segment Action
self.precedent_segment_seq: List[Segment] = []
def update_copy_valid_action(self):
"""
For grammar-based decoding method, the copy action will also be constrained under the nonterminal.
Therefore, we should update the valid_actions for considering the copyed action
:return:
"""
for action in self.precedent_segment_seq:
action_key = action.nonterminal
if action_key not in self.valid_actions:
self.valid_actions[action_key] = []
# record action
self.valid_actions[action_key].append(str(action))
self.valid_actions_flat.append(action)
def clear_precedent_state(self, copy_action_ids):
# clear all precedent state
for action in self.precedent_segment_seq:
action_key = action.nonterminal
if action_key in self.valid_actions and str(action) in self.valid_actions[action_key]:
self.valid_actions[action_key].remove(str(action))
copy_action_ids = sorted(copy_action_ids, reverse=True)
for action_idx in copy_action_ids:
del self.valid_actions_flat[action_idx]
self.precedent_action_seq = []
self.precedent_segment_seq = []
def update_precedent_state(self, precedent_sql_query, extract_tree=True):
"""
Receiving string input (in training), or idx input (in testing), convert them into action sequence.
Furthermore, build it as a parsing tree.
**Note this function must be called after `get_action_sequence_and_all_actions` ! **
:param precedent_sql_query: `Dict` or `List[int]`, `Dict` is used in pre-processing,
`List[int]` is used in real-time testing.
:return:
"""
def sub_finder(cus_list, pattern):
indices = []
for i in range(len(cus_list)):
if cus_list[i] == pattern[0] and cus_list[i:i + len(pattern)] == pattern:
indices.append((i, i + len(pattern)))
return indices
# translate string sql query into action sequence
if isinstance(precedent_sql_query, Dict):
precedent_action_seq = self.sql_converter.translate_to_intermediate(precedent_sql_query)
elif isinstance(precedent_sql_query, List):
if isinstance(precedent_sql_query[0], Action):
# convert idx into action string
precedent_action_seq = precedent_sql_query
elif isinstance(precedent_sql_query[0], int):
precedent_action_seq = [self.valid_actions_flat[ind]
for ind in precedent_sql_query]
else:
raise Exception("No support for input format for precedent_sql_query")
else:
precedent_action_seq = []
# Type: List[int]
self.precedent_action_seq = [self.valid_actions_flat.index(action)
for action in precedent_action_seq]
# build AST tree
if extract_tree:
precedent_tree_action = Grammar.extract_all_subtree(precedent_action_seq)
else:
precedent_tree_action = [[action] for action in precedent_action_seq]
# we should convert the action into ids as `text_to_instance` do
precedent_tree_idx = [[self.valid_actions_flat.index(action) for action in action_seq]
for action_seq in precedent_tree_action]
# default action ind is -1
max_len = max([len(sub_tree) for sub_tree in precedent_tree_idx])
precedent_tree_idx = [pad_sequence_to_length(sub_tree, default_value=lambda: -1, desired_length=max_len)
for sub_tree in precedent_tree_idx]
# add to self's action
self.precedent_segment_seq = []
for tree_str, tree_idx in zip(precedent_tree_action, precedent_tree_idx):
self.precedent_segment_seq.append(Segment(tree_str, tree_idx))
def get_action_sequence_and_all_actions(self) -> Tuple[List[str], List[Action], List[Action]]:
"""
Translate the sql clause when initialization into action sequence corresponding to their SemQL.
And return the instantiated local grammars and global grammars.
:return: action sequence corresponding to the sql clause, all valid actions(which has been sorted)
"""
# build global grammar and local grammar
grammar = Grammar(db_context=self.db_context)
global_grammar = grammar.global_grammar
local_grammar = grammar.local_grammar
all_actions = global_grammar + local_grammar
# the sorted actions must follow the same order and
# global grammar will be converted into tensor automatically in allennlp
self.valid_actions_flat = [action for action in all_actions]
# add every action into nonterminal key
for action in self.valid_actions_flat:
action_key = action.nonterminal
if action_key not in self.valid_actions:
self.valid_actions[action_key] = []
# record action
self.valid_actions[action_key].append(str(action))
if self.sql_clause is not None:
action_sequence = self.sql_converter.translate_to_intermediate(self.sql_clause)
# validate action sequence
else:
action_sequence = None
# fetch action_non_terminal
action_non_terminal = None
if action_sequence is not None:
action_non_terminal = [action.__class__.__name__ for action in action_sequence]
return action_non_terminal, action_sequence, all_actions
def get_action_entity_mapping(self) -> Dict[str, int]:
"""
Get the entity index of every local grammar(also named after linked action)
:return:
"""
mapping = {}
for action in self.valid_actions_flat:
# default is padding
mapping[str(action)] = -1
# lowercase for all entities
ins_id = action.ins_id
if isinstance(ins_id, str):
ins_id = ins_id.lower()
# only instance class should apply entity map
if type(action) not in [C, T] or ins_id not in self.entities_indexer:
continue
# record the entity id
mapping[str(action)] = self.entities_indexer[ins_id]
return mapping
|
ContextualSP/semantic_parsing_in_context/context/world.py/0
|
{
"file_path": "ContextualSP/semantic_parsing_in_context/context/world.py",
"repo_id": "ContextualSP",
"token_count": 3493
}
| 263 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import List, Optional
import torch
from allennlp.nn import util
class RnnStatelet:
"""
This class keeps track of all of decoder-RNN-related variables that you need during decoding.
This includes things like the current decoder hidden state, the memory cell (for LSTM
decoders), the encoder output that you need for computing attentions, and so on.
This is intended to be used `inside` a ``State``, which likely has other things it has to keep
track of for doing constrained decoding.
Parameters
----------
hidden_state : ``torch.Tensor``
This holds the LSTM hidden state, with shape ``(decoder_output_dim,)`` if the decoder
has 1 layer and ``(num_layers, decoder_output_dim)`` otherwise.
memory_cell : ``torch.Tensor``
This holds the LSTM memory cell, with shape ``(decoder_output_dim,)`` if the decoder has
1 layer and ``(num_layers, decoder_output_dim)`` otherwise.
previous_action_embedding : ``torch.Tensor``
This holds the embedding for the action we took at the last timestep (which gets input to
the decoder). Has shape ``(action_embedding_dim,)``.
attended_input : ``torch.Tensor``
This holds the attention-weighted sum over the input representations that we computed in
the previous timestep. We keep this as part of the state because we use the previous
attention as part of our decoder cell update. Has shape ``(encoder_output_dim,)``.
encoder_outputs : ``List[torch.Tensor]``
A list of variables, each of shape ``(input_sequence_length, encoder_output_dim)``,
containing the encoder outputs at each timestep. The list is over batch elements, and we
do the input this way so we can easily do a ``torch.cat`` on a list of indices into this
batched list.
Note that all of the above parameters are single tensors, while the encoder outputs and
mask are lists of length ``batch_size``. We always pass around the encoder outputs and
mask unmodified, regardless of what's in the grouping for this state. We'll use the
``batch_indices`` for the group to pull pieces out of these lists when we're ready to
actually do some computation.
encoder_output_mask : ``List[torch.Tensor]``
A list of variables, each of shape ``(input_sequence_length,)``, containing a mask over
question tokens for each batch instance. This is a list over batch elements, for the same
reasons as above.
"""
def __init__(self,
hidden_state: torch.Tensor,
memory_cell: torch.Tensor,
previous_action_embedding: torch.Tensor,
attended_input: torch.Tensor,
encoder_outputs: List[torch.Tensor],
encoder_output_mask: List[torch.Tensor],
attended_sql_input: Optional[torch.Tensor] = None,
sql_outputs: List[torch.Tensor] = None,
sql_output_mask: List[torch.Tensor] = None,
decoder_outputs: Optional[torch.Tensor] = None) -> None:
self.hidden_state = hidden_state
self.memory_cell = memory_cell
self.previous_action_embedding = previous_action_embedding
self.attended_input = attended_input
self.encoder_outputs = encoder_outputs
self.encoder_output_mask = encoder_output_mask
self.decoder_outputs = decoder_outputs
# extra attended input
self.attended_sql_input = attended_sql_input
self.sql_outputs = sql_outputs
self.sql_output_mask = sql_output_mask
def __eq__(self, other):
if isinstance(self, other.__class__):
return all([
util.tensors_equal(self.hidden_state, other.hidden_state, tolerance=1e-5),
util.tensors_equal(self.memory_cell, other.memory_cell, tolerance=1e-5),
util.tensors_equal(self.previous_action_embedding, other.previous_action_embedding,
tolerance=1e-5),
util.tensors_equal(self.attended_input, other.attended_input, tolerance=1e-5)
])
return NotImplemented
|
ContextualSP/semantic_parsing_in_context/models/states_machine/rnn_statelet.py/0
|
{
"file_path": "ContextualSP/semantic_parsing_in_context/models/states_machine/rnn_statelet.py",
"repo_id": "ContextualSP",
"token_count": 1688
}
| 264 |
import argparse
import stanza
from unisar.api import UnisarAPI
class Interactive(object):
def __init__(self, Unisar: UnisarAPI):
self.unisar = Unisar
def ask_any_question(self, question, db_id):
results = self.unisar.infer_query(question, db_id)
print('input:', results['slml_question'])
print(f'"pred:" {results["predict_sql"]} ({results["score"]})')
# try:
# results = self.unisar.execute(results['query'])
# print(results)
# except Exception as e:
# print(str(e))
def show_schema(self, db_id):
for table in self.unisar.schema[db_id].values():
print("Table", f"{table.name}")
for column in table.columns:
print(" Column", f"{column.name}")
def run(self, db_id):
self.show_schema(db_id)
# self.ask_any_question('Tell me the name about organization', db_id)
while True:
question = input("Ask a question: ")
self.ask_any_question(question, db_id)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--logdir", default='./models/spider_sl')
parser.add_argument("--db_id", default='student_1')
parser.add_argument(
"--db-path", default='./data/spider/database',
help="The path to the sqlite database or csv file"
)
parser.add_argument(
"--schema-path", default='./data/spider/tables.json',
help="The path to the tables.json file with human-readable database schema."
)
args = parser.parse_args()
stanza_model = stanza.Pipeline(lang='en', processors='tokenize,pos,lemma')
interactive = Interactive(UnisarAPI(args.logdir, args.db_path, args.schema_path, stanza_model))
interactive.run(args.db_id)
|
ContextualSP/unified_parser_text_to_sql/interactive.py/0
|
{
"file_path": "ContextualSP/unified_parser_text_to_sql/interactive.py",
"repo_id": "ContextualSP",
"token_count": 782
}
| 265 |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.module.Linear_super import LinearSuper
from model.module.layernorm_super import LayerNormSuper
from model.module.multihead_super import AttentionSuper
from model.module.embedding_super import PatchembedSuper
from model.utils import trunc_normal_
from model.utils import DropPath
import numpy as np
def gelu(x: torch.Tensor) -> torch.Tensor:
if hasattr(torch.nn.functional, 'gelu'):
return torch.nn.functional.gelu(x.float()).type_as(x)
else:
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class Vision_TransformerSuper(nn.Module):
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., pre_norm=True, scale=False, gp=False, relative_position=False, change_qkv=False, abs_pos = True, max_relative_position=14):
super(Vision_TransformerSuper, self).__init__()
# the configs of super arch
self.super_embed_dim = embed_dim
# self.super_embed_dim = args.embed_dim
self.super_mlp_ratio = mlp_ratio
self.super_layer_num = depth
self.super_num_heads = num_heads
self.super_dropout = drop_rate
self.super_attn_dropout = attn_drop_rate
self.num_classes = num_classes
self.pre_norm=pre_norm
self.scale=scale
self.patch_embed_super = PatchembedSuper(img_size=img_size, patch_size=patch_size,
in_chans=in_chans, embed_dim=embed_dim)
self.gp = gp
# configs for the sampled subTransformer
self.sample_embed_dim = None
self.sample_mlp_ratio = None
self.sample_layer_num = None
self.sample_num_heads = None
self.sample_dropout = None
self.sample_output_dim = None
self.blocks = nn.ModuleList()
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
for i in range(depth):
self.blocks.append(TransformerEncoderLayer(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale, dropout=drop_rate,
attn_drop=attn_drop_rate, drop_path=dpr[i],
pre_norm=pre_norm, scale=self.scale,
change_qkv=change_qkv, relative_position=relative_position,
max_relative_position=max_relative_position))
# parameters for vision transformer
num_patches = self.patch_embed_super.num_patches
self.abs_pos = abs_pos
if self.abs_pos:
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
trunc_normal_(self.pos_embed, std=.02)
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
trunc_normal_(self.cls_token, std=.02)
# self.pos_drop = nn.Dropout(p=drop_rate)
if self.pre_norm:
self.norm = LayerNormSuper(super_embed_dim=embed_dim)
# classifier head
self.head = LinearSuper(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token', 'rel_pos_embed'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def set_sample_config(self, config: dict):
self.sample_embed_dim = config['embed_dim']
self.sample_mlp_ratio = config['mlp_ratio']
self.sample_layer_num = config['layer_num']
self.sample_num_heads = config['num_heads']
self.sample_dropout = calc_dropout(self.super_dropout, self.sample_embed_dim[0], self.super_embed_dim)
self.patch_embed_super.set_sample_config(self.sample_embed_dim[0])
self.sample_output_dim = [out_dim for out_dim in self.sample_embed_dim[1:]] + [self.sample_embed_dim[-1]]
for i, blocks in enumerate(self.blocks):
# not exceed sample layer number
if i < self.sample_layer_num:
sample_dropout = calc_dropout(self.super_dropout, self.sample_embed_dim[i], self.super_embed_dim)
sample_attn_dropout = calc_dropout(self.super_attn_dropout, self.sample_embed_dim[i], self.super_embed_dim)
blocks.set_sample_config(is_identity_layer=False,
sample_embed_dim=self.sample_embed_dim[i],
sample_mlp_ratio=self.sample_mlp_ratio[i],
sample_num_heads=self.sample_num_heads[i],
sample_dropout=sample_dropout,
sample_out_dim=self.sample_output_dim[i],
sample_attn_dropout=sample_attn_dropout)
# exceeds sample layer number
else:
blocks.set_sample_config(is_identity_layer=True)
if self.pre_norm:
self.norm.set_sample_config(self.sample_embed_dim[-1])
self.head.set_sample_config(self.sample_embed_dim[-1], self.num_classes)
def get_sampled_params_numel(self, config):
self.set_sample_config(config)
numels = []
for name, module in self.named_modules():
if hasattr(module, 'calc_sampled_param_num'):
if name.split('.')[0] == 'blocks' and int(name.split('.')[1]) >= config['layer_num']:
continue
numels.append(module.calc_sampled_param_num())
return sum(numels) + self.sample_embed_dim[0]* (2 +self.patch_embed_super.num_patches)
def get_complexity(self, sequence_length):
total_flops = 0
total_flops += self.patch_embed_super.get_complexity(sequence_length)
total_flops += np.prod(self.pos_embed[..., :self.sample_embed_dim[0]].size()) / 2.0
for blk in self.blocks:
total_flops += blk.get_complexity(sequence_length+1)
total_flops += self.head.get_complexity(sequence_length+1)
return total_flops
def forward_features(self, x):
B = x.shape[0]
x = self.patch_embed_super(x)
cls_tokens = self.cls_token[..., :self.sample_embed_dim[0]].expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
if self.abs_pos:
x = x + self.pos_embed[..., :self.sample_embed_dim[0]]
x = F.dropout(x, p=self.sample_dropout, training=self.training)
# start_time = time.time()
for blk in self.blocks:
x = blk(x)
# print(time.time()-start_time)
if self.pre_norm:
x = self.norm(x)
if self.gp:
return torch.mean(x[:, 1:] , dim=1)
return x[:, 0]
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
class TransformerEncoderLayer(nn.Module):
"""Encoder layer block.
Args:
args (argparse.Namespace): parsed command-line arguments which
"""
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, dropout=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, pre_norm=True, scale=False,
relative_position=False, change_qkv=False, max_relative_position=14):
super().__init__()
# the configs of super arch of the encoder, three dimension [embed_dim, mlp_ratio, and num_heads]
self.super_embed_dim = dim
self.super_mlp_ratio = mlp_ratio
self.super_ffn_embed_dim_this_layer = int(mlp_ratio * dim)
self.super_num_heads = num_heads
self.normalize_before = pre_norm
self.super_dropout = attn_drop
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.scale = scale
self.relative_position = relative_position
# self.super_activation_dropout = getattr(args, 'activation_dropout', 0)
# the configs of current sampled arch
self.sample_embed_dim = None
self.sample_mlp_ratio = None
self.sample_ffn_embed_dim_this_layer = None
self.sample_num_heads_this_layer = None
self.sample_scale = None
self.sample_dropout = None
self.sample_attn_dropout = None
self.is_identity_layer = None
self.attn = AttentionSuper(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop,
proj_drop=dropout, scale=self.scale, relative_position=self.relative_position, change_qkv=change_qkv,
max_relative_position=max_relative_position
)
self.attn_layer_norm = LayerNormSuper(self.super_embed_dim)
self.ffn_layer_norm = LayerNormSuper(self.super_embed_dim)
# self.dropout = dropout
self.activation_fn = gelu
# self.normalize_before = args.encoder_normalize_before
self.fc1 = LinearSuper(super_in_dim=self.super_embed_dim, super_out_dim=self.super_ffn_embed_dim_this_layer)
self.fc2 = LinearSuper(super_in_dim=self.super_ffn_embed_dim_this_layer, super_out_dim=self.super_embed_dim)
def set_sample_config(self, is_identity_layer, sample_embed_dim=None, sample_mlp_ratio=None, sample_num_heads=None, sample_dropout=None, sample_attn_dropout=None, sample_out_dim=None):
if is_identity_layer:
self.is_identity_layer = True
return
self.is_identity_layer = False
self.sample_embed_dim = sample_embed_dim
self.sample_out_dim = sample_out_dim
self.sample_mlp_ratio = sample_mlp_ratio
self.sample_ffn_embed_dim_this_layer = int(sample_embed_dim*sample_mlp_ratio)
self.sample_num_heads_this_layer = sample_num_heads
self.sample_dropout = sample_dropout
self.sample_attn_dropout = sample_attn_dropout
self.attn_layer_norm.set_sample_config(sample_embed_dim=self.sample_embed_dim)
self.attn.set_sample_config(sample_q_embed_dim=self.sample_num_heads_this_layer*64, sample_num_heads=self.sample_num_heads_this_layer, sample_in_embed_dim=self.sample_embed_dim)
self.fc1.set_sample_config(sample_in_dim=self.sample_embed_dim, sample_out_dim=self.sample_ffn_embed_dim_this_layer)
self.fc2.set_sample_config(sample_in_dim=self.sample_ffn_embed_dim_this_layer, sample_out_dim=self.sample_out_dim)
self.ffn_layer_norm.set_sample_config(sample_embed_dim=self.sample_embed_dim)
def forward(self, x):
"""
Args:
x (Tensor): input to the layer of shape `(batch, patch_num , sample_embed_dim)`
Returns:
encoded output of shape `(batch, patch_num, sample_embed_dim)`
"""
if self.is_identity_layer:
return x
# compute attn
# start_time = time.time()
residual = x
x = self.maybe_layer_norm(self.attn_layer_norm, x, before=True)
x = self.attn(x)
x = F.dropout(x, p=self.sample_attn_dropout, training=self.training)
x = self.drop_path(x)
x = residual + x
x = self.maybe_layer_norm(self.attn_layer_norm, x, after=True)
# print("attn :", time.time() - start_time)
# compute the ffn
# start_time = time.time()
residual = x
x = self.maybe_layer_norm(self.ffn_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.sample_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.sample_dropout, training=self.training)
if self.scale:
x = x * (self.super_mlp_ratio / self.sample_mlp_ratio)
x = self.drop_path(x)
x = residual + x
x = self.maybe_layer_norm(self.ffn_layer_norm, x, after=True)
# print("ffn :", time.time() - start_time)
return x
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return layer_norm(x)
else:
return x
def get_complexity(self, sequence_length):
total_flops = 0
if self.is_identity_layer:
return total_flops
total_flops += self.attn_layer_norm.get_complexity(sequence_length+1)
total_flops += self.attn.get_complexity(sequence_length+1)
total_flops += self.ffn_layer_norm.get_complexity(sequence_length+1)
total_flops += self.fc1.get_complexity(sequence_length+1)
total_flops += self.fc2.get_complexity(sequence_length+1)
return total_flops
def calc_dropout(dropout, sample_embed_dim, super_embed_dim):
return dropout * 1.0 * sample_embed_dim / super_embed_dim
|
Cream/AutoFormer/model/supernet_transformer.py/0
|
{
"file_path": "Cream/AutoFormer/model/supernet_transformer.py",
"repo_id": "Cream",
"token_count": 6478
}
| 266 |
# flake8: noqa
from .arraymisc import *
from .utils import *
from .fileio import *
from .opencv_info import *
from .image import *
from .video import *
from .visualization import *
from .version import __version__
# The following modules are not imported to this level, so mmcv may be used
# without PyTorch.
# - runner
# - parallel
|
Cream/CDARTS/CDARTS_detection/mmcv/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/__init__.py",
"repo_id": "Cream",
"token_count": 97
}
| 267 |
def list_from_file(filename, prefix='', offset=0, max_num=0):
"""Load a text file and parse the content as a list of strings.
Args:
filename (str): Filename.
prefix (str): The prefix to be inserted to the begining of each item.
offset (int): The offset of lines.
max_num (int): The maximum number of lines to be read,
zeros and negatives mean no limitation.
Returns:
list[str]: A list of strings.
"""
cnt = 0
item_list = []
with open(filename, 'r') as f:
for _ in range(offset):
f.readline()
for line in f:
if max_num > 0 and cnt >= max_num:
break
item_list.append(prefix + line.rstrip('\n'))
cnt += 1
return item_list
def dict_from_file(filename, key_type=str):
"""Load a text file and parse the content as a dict.
Each line of the text file will be two or more columns splited by
whitespaces or tabs. The first column will be parsed as dict keys, and
the following columns will be parsed as dict values.
Args:
filename(str): Filename.
key_type(type): Type of the dict's keys. str is user by default and
type conversion will be performed if specified.
Returns:
dict: The parsed contents.
"""
mapping = {}
with open(filename, 'r') as f:
for line in f:
items = line.rstrip('\n').split()
assert len(items) >= 2
key = key_type(items[0])
val = items[1:] if len(items) > 2 else items[1]
mapping[key] = val
return mapping
|
Cream/CDARTS/CDARTS_detection/mmcv/fileio/parse.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/fileio/parse.py",
"repo_id": "Cream",
"token_count": 685
}
| 268 |
from .runner import Runner
from .log_buffer import LogBuffer
from .dist_utils import get_dist_info, init_dist, master_only
from .hooks import (Hook, CheckpointHook, ClosureHook, LrUpdaterHook,
OptimizerHook, OptimizerArchHook, IterTimerHook, DistSamplerSeedHook,
LoggerHook, TextLoggerHook, PaviLoggerHook,
TensorboardLoggerHook)
from .checkpoint import (load_state_dict, load_checkpoint, weights_to_cpu,
save_checkpoint)
from .parallel_test import parallel_test
from .priority import Priority, get_priority
from .utils import (get_host_info, get_dist_info, master_only, get_time_str,
obj_from_dict)
__all__ = [
'Runner', 'LogBuffer', 'Hook', 'CheckpointHook', 'ClosureHook',
'LrUpdaterHook', 'OptimizerHook', 'OptimizerArchHook', 'IterTimerHook', 'DistSamplerSeedHook',
'LoggerHook', 'TextLoggerHook', 'PaviLoggerHook', 'TensorboardLoggerHook',
'load_state_dict', 'load_checkpoint', 'weights_to_cpu', 'save_checkpoint',
'parallel_test', 'Priority', 'get_priority', 'get_host_info',
'get_dist_info', 'master_only', 'get_time_str', 'obj_from_dict',
'init_dist', 'get_dist_info', 'master_only'
]
|
Cream/CDARTS/CDARTS_detection/mmcv/runner/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/runner/__init__.py",
"repo_id": "Cream",
"token_count": 521
}
| 269 |
from .hook import Hook
class DistSamplerSeedHook(Hook):
def before_epoch(self, runner):
runner.data_loader.sampler.set_epoch(runner.epoch)
|
Cream/CDARTS/CDARTS_detection/mmcv/runner/hooks/sampler_seed.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/runner/hooks/sampler_seed.py",
"repo_id": "Cream",
"token_count": 61
}
| 270 |
from .version import __version__, short_version
__all__ = ['__version__', 'short_version']
|
Cream/CDARTS/CDARTS_detection/mmdet/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/__init__.py",
"repo_id": "Cream",
"token_count": 28
}
| 271 |
import torch
from .base_assigner import BaseAssigner
from .assign_result import AssignResult
from ..geometry import bbox_overlaps
class MaxIoUAssigner(BaseAssigner):
"""Assign a corresponding gt bbox or background to each bbox.
Each proposals will be assigned with `-1`, `0`, or a positive integer
indicating the ground truth index.
- -1: don't care
- 0: negative sample, no assigned gt
- positive integer: positive sample, index (1-based) of assigned gt
Args:
pos_iou_thr (float): IoU threshold for positive bboxes.
neg_iou_thr (float or tuple): IoU threshold for negative bboxes.
min_pos_iou (float): Minimum iou for a bbox to be considered as a
positive bbox. Positive samples can have smaller IoU than
pos_iou_thr due to the 4th step (assign max IoU sample to each gt).
gt_max_assign_all (bool): Whether to assign all bboxes with the same
highest overlap with some gt to that gt.
ignore_iof_thr (float): IoF threshold for ignoring bboxes (if
`gt_bboxes_ignore` is specified). Negative values mean not
ignoring any bboxes.
ignore_wrt_candidates (bool): Whether to compute the iof between
`bboxes` and `gt_bboxes_ignore`, or the contrary.
"""
def __init__(self,
pos_iou_thr,
neg_iou_thr,
min_pos_iou=.0,
gt_max_assign_all=True,
ignore_iof_thr=-1,
ignore_wrt_candidates=True):
self.pos_iou_thr = pos_iou_thr
self.neg_iou_thr = neg_iou_thr
self.min_pos_iou = min_pos_iou
self.gt_max_assign_all = gt_max_assign_all
self.ignore_iof_thr = ignore_iof_thr
self.ignore_wrt_candidates = ignore_wrt_candidates
def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
"""Assign gt to bboxes.
This method assign a gt bbox to every bbox (proposal/anchor), each bbox
will be assigned with -1, 0, or a positive number. -1 means don't care,
0 means negative sample, positive number is the index (1-based) of
assigned gt.
The assignment is done in following steps, the order matters.
1. assign every bbox to -1
2. assign proposals whose iou with all gts < neg_iou_thr to 0
3. for each bbox, if the iou with its nearest gt >= pos_iou_thr,
assign it to that bbox
4. for each gt bbox, assign its nearest proposals (may be more than
one) to itself
Args:
bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).
gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
labelled as `ignored`, e.g., crowd boxes in COCO.
gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
Returns:
:obj:`AssignResult`: The assign result.
"""
if bboxes.shape[0] == 0 or gt_bboxes.shape[0] == 0:
raise ValueError('No gt or bboxes')
bboxes = bboxes[:, :4]
overlaps = bbox_overlaps(gt_bboxes, bboxes)
if (self.ignore_iof_thr > 0) and (gt_bboxes_ignore is not None) and (
gt_bboxes_ignore.numel() > 0):
if self.ignore_wrt_candidates:
ignore_overlaps = bbox_overlaps(
bboxes, gt_bboxes_ignore, mode='iof')
ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)
else:
ignore_overlaps = bbox_overlaps(
gt_bboxes_ignore, bboxes, mode='iof')
ignore_max_overlaps, _ = ignore_overlaps.max(dim=0)
overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1
assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)
return assign_result
def assign_wrt_overlaps(self, overlaps, gt_labels=None):
"""Assign w.r.t. the overlaps of bboxes with gts.
Args:
overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes,
shape(k, n).
gt_labels (Tensor, optional): Labels of k gt_bboxes, shape (k, ).
Returns:
:obj:`AssignResult`: The assign result.
"""
if overlaps.numel() == 0:
raise ValueError('No gt or proposals')
num_gts, num_bboxes = overlaps.size(0), overlaps.size(1)
# 1. assign -1 by default
assigned_gt_inds = overlaps.new_full(
(num_bboxes, ), -1, dtype=torch.long)
# for each anchor, which gt best overlaps with it
# for each anchor, the max iou of all gts
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
# for each gt, which anchor best overlaps with it
# for each gt, the max iou of all proposals
gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1)
# 2. assign negative: below
if isinstance(self.neg_iou_thr, float):
assigned_gt_inds[(max_overlaps >= 0)
& (max_overlaps < self.neg_iou_thr)] = 0
elif isinstance(self.neg_iou_thr, tuple):
assert len(self.neg_iou_thr) == 2
assigned_gt_inds[(max_overlaps >= self.neg_iou_thr[0])
& (max_overlaps < self.neg_iou_thr[1])] = 0
# 3. assign positive: above positive IoU threshold
pos_inds = max_overlaps >= self.pos_iou_thr
assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1
# 4. assign fg: for each gt, proposals with highest IoU
for i in range(num_gts):
if gt_max_overlaps[i] >= self.min_pos_iou:
if self.gt_max_assign_all:
max_iou_inds = overlaps[i, :] == gt_max_overlaps[i]
assigned_gt_inds[max_iou_inds] = i + 1
else:
assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1
if gt_labels is not None:
assigned_labels = assigned_gt_inds.new_zeros((num_bboxes, ))
pos_inds = torch.nonzero(assigned_gt_inds > 0).squeeze()
if pos_inds.numel() > 0:
assigned_labels[pos_inds] = gt_labels[
assigned_gt_inds[pos_inds] - 1]
else:
assigned_labels = None
return AssignResult(
num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels)
|
Cream/CDARTS/CDARTS_detection/mmdet/core/bbox/assigners/max_iou_assigner.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/bbox/assigners/max_iou_assigner.py",
"repo_id": "Cream",
"token_count": 3164
}
| 272 |
import mmcv
import numpy as np
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from .recall import eval_recalls
def coco_eval(result_files, result_types, coco, max_dets=(100, 300, 1000)):
for res_type in result_types:
assert res_type in [
'proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'
]
if mmcv.is_str(coco):
coco = COCO(coco)
assert isinstance(coco, COCO)
if result_types == ['proposal_fast']:
ar = fast_eval_recall(result_files, coco, np.array(max_dets))
for i, num in enumerate(max_dets):
print('AR@{}\t= {:.4f}'.format(num, ar[i]))
return
for res_type in result_types:
result_file = result_files[res_type]
assert result_file.endswith('.json')
coco_dets = coco.loadRes(result_file)
img_ids = coco.getImgIds()
iou_type = 'bbox' if res_type == 'proposal' else res_type
cocoEval = COCOeval(coco, coco_dets, iou_type)
cocoEval.params.imgIds = img_ids
if res_type == 'proposal':
cocoEval.params.useCats = 0
cocoEval.params.maxDets = list(max_dets)
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
def fast_eval_recall(results,
coco,
max_dets,
iou_thrs=np.arange(0.5, 0.96, 0.05)):
if mmcv.is_str(results):
assert results.endswith('.pkl')
results = mmcv.load(results)
elif not isinstance(results, list):
raise TypeError(
'results must be a list of numpy arrays or a filename, not {}'.
format(type(results)))
gt_bboxes = []
img_ids = coco.getImgIds()
for i in range(len(img_ids)):
ann_ids = coco.getAnnIds(imgIds=img_ids[i])
ann_info = coco.loadAnns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w - 1, y1 + h - 1])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, max_dets, iou_thrs, print_summary=False)
ar = recalls.mean(axis=1)
return ar
def xyxy2xywh(bbox):
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0] + 1,
_bbox[3] - _bbox[1] + 1,
]
def proposal2json(dataset, results):
json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def det2json(dataset, results):
json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = dataset.cat_ids[label]
json_results.append(data)
return json_results
def segm2json(dataset, results):
bbox_json_results = []
segm_json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = dataset.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different score for det and segm
if len(seg) == 2:
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['score'] = float(mask_score[i])
data['category_id'] = dataset.cat_ids[label]
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(dataset, results, out_file):
result_files = dict()
if isinstance(results[0], list):
json_results = det2json(dataset, results)
result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox')
result_files['proposal'] = '{}.{}.json'.format(out_file, 'bbox')
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = segm2json(dataset, results)
result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox')
result_files['proposal'] = '{}.{}.json'.format(out_file, 'bbox')
result_files['segm'] = '{}.{}.json'.format(out_file, 'segm')
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = proposal2json(dataset, results)
result_files['proposal'] = '{}.{}.json'.format(out_file, 'proposal')
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
|
Cream/CDARTS/CDARTS_detection/mmdet/core/evaluation/coco_utils.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/evaluation/coco_utils.py",
"repo_id": "Cream",
"token_count": 3202
}
| 273 |
from functools import partial
import mmcv
import numpy as np
from six.moves import map, zip
def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True):
num_imgs = tensor.size(0)
mean = np.array(mean, dtype=np.float32)
std = np.array(std, dtype=np.float32)
imgs = []
for img_id in range(num_imgs):
img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0)
img = mmcv.imdenormalize(
img, mean, std, to_bgr=to_rgb).astype(np.uint8)
imgs.append(np.ascontiguousarray(img))
return imgs
def multi_apply(func, *args, **kwargs):
pfunc = partial(func, **kwargs) if kwargs else func
map_results = map(pfunc, *args)
return tuple(map(list, zip(*map_results)))
def unmap(data, count, inds, fill=0):
""" Unmap a subset of item (data) back to the original set of items (of
size count) """
if data.dim() == 1:
ret = data.new_full((count, ), fill)
ret[inds] = data
else:
new_size = (count, ) + data.size()[1:]
ret = data.new_full(new_size, fill)
ret[inds, :] = data
return ret
|
Cream/CDARTS/CDARTS_detection/mmdet/core/utils/misc.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/utils/misc.py",
"repo_id": "Cream",
"token_count": 500
}
| 274 |
from mmdet.utils import Registry
DATASETS = Registry('dataset')
PIPELINES = Registry('pipeline')
|
Cream/CDARTS/CDARTS_detection/mmdet/datasets/registry.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/datasets/registry.py",
"repo_id": "Cream",
"token_count": 34
}
| 275 |
from .resnet import ResNet, make_res_layer
from .resnext import ResNeXt
from .ssd_vgg import SSDVGG
from .hrnet import HRNet
from .mobilenetv2 import MobileNetV2
from .detnas import DetNas
from .fbnet import FBNet
from .mnasnet import MnasNet
from .mobilenetv3 import SSDMobilenetV3
from .efficientnet import SSDEFFB0
__all__ = ['ResNet', 'make_res_layer', 'ResNeXt', 'SSDVGG', 'HRNet', 'MobileNetV2', 'DetNas', 'FBNet', 'MnasNet', 'SSDMobilenetV3', 'SSDEFFB0']
|
Cream/CDARTS/CDARTS_detection/mmdet/models/backbones/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/backbones/__init__.py",
"repo_id": "Cream",
"token_count": 181
}
| 276 |
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import (VGG, xavier_init, constant_init, kaiming_init,
normal_init)
from mmcv.runner import load_checkpoint
from ..registry import BACKBONES
@BACKBONES.register_module
class SSDVGG(VGG):
extra_setting = {
300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256),
512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128),
}
def __init__(self,
input_size,
depth,
with_last_pool=False,
ceil_mode=True,
out_indices=(3, 4),
out_feature_indices=(22, 34),
l2_norm_scale=20.):
super(SSDVGG, self).__init__(
depth,
with_last_pool=with_last_pool,
ceil_mode=ceil_mode,
out_indices=out_indices)
assert input_size in (300, 512)
self.input_size = input_size
self.features.add_module(
str(len(self.features)),
nn.MaxPool2d(kernel_size=3, stride=1, padding=1))
self.features.add_module(
str(len(self.features)),
nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6))
self.features.add_module(
str(len(self.features)), nn.ReLU(inplace=True))
self.features.add_module(
str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1))
self.features.add_module(
str(len(self.features)), nn.ReLU(inplace=True))
self.out_feature_indices = out_feature_indices
self.inplanes = 1024
self.extra = self._make_extra_layers(self.extra_setting[input_size])
self.l2_norm = L2Norm(
self.features[out_feature_indices[0] - 1].out_channels,
l2_norm_scale)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.features.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
elif isinstance(m, nn.Linear):
normal_init(m, std=0.01)
else:
raise TypeError('pretrained must be a str or None')
for m in self.extra.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
constant_init(self.l2_norm, self.l2_norm.scale)
def forward(self, x):
outs = []
for i, layer in enumerate(self.features):
x = layer(x)
if i in self.out_feature_indices:
outs.append(x)
for i, layer in enumerate(self.extra):
x = F.relu(layer(x), inplace=True)
if i % 2 == 1:
outs.append(x)
outs[0] = self.l2_norm(outs[0])
if len(outs) == 1:
return outs[0]
else:
return tuple(outs)
def _make_extra_layers(self, outplanes):
layers = []
kernel_sizes = (1, 3)
num_layers = 0
outplane = None
for i in range(len(outplanes)):
if self.inplanes == 'S':
self.inplanes = outplane
continue
k = kernel_sizes[num_layers % 2]
if outplanes[i] == 'S':
outplane = outplanes[i + 1]
conv = nn.Conv2d(
self.inplanes, outplane, k, stride=2, padding=1)
else:
outplane = outplanes[i]
conv = nn.Conv2d(
self.inplanes, outplane, k, stride=1, padding=0)
layers.append(conv)
self.inplanes = outplanes[i]
num_layers += 1
if self.input_size == 512:
layers.append(nn.Conv2d(self.inplanes, 256, 4, padding=1))
return nn.Sequential(*layers)
class L2Norm(nn.Module):
def __init__(self, n_dims, scale=20., eps=1e-10):
super(L2Norm, self).__init__()
self.n_dims = n_dims
self.weight = nn.Parameter(torch.Tensor(self.n_dims))
self.eps = eps
self.scale = scale
def forward(self, x):
# normalization layer convert to FP32 in FP16 training
x_float = x.float()
norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps
return (self.weight[None, :, None, None].float().expand_as(x_float) *
x_float / norm).type_as(x)
|
Cream/CDARTS/CDARTS_detection/mmdet/models/backbones/ssd_vgg.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/backbones/ssd_vgg.py",
"repo_id": "Cream",
"token_count": 2468
}
| 277 |
from .two_stage import TwoStageDetector
from ..registry import DETECTORS
@DETECTORS.register_module
class FasterRCNN(TwoStageDetector):
def __init__(self,
backbone,
rpn_head,
bbox_roi_extractor,
bbox_head,
train_cfg,
test_cfg,
neck=None,
shared_head=None,
cls_roi_scale_factor=None,
reg_roi_scale_factor=None,
pretrained=None):
super(FasterRCNN, self).__init__(
backbone=backbone,
neck=neck,
shared_head=shared_head,
rpn_head=rpn_head,
bbox_roi_extractor=bbox_roi_extractor,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
cls_roi_scale_factor = cls_roi_scale_factor,
reg_roi_scale_factor = reg_roi_scale_factor,
pretrained=pretrained)
|
Cream/CDARTS/CDARTS_detection/mmdet/models/detectors/faster_rcnn.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/detectors/faster_rcnn.py",
"repo_id": "Cream",
"token_count": 575
}
| 278 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..registry import LOSSES
def _expand_binary_labels(labels, label_weights, label_channels):
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
inds = torch.nonzero(labels >= 1).squeeze()
if inds.numel() > 0:
bin_labels[inds, labels[inds] - 1] = 1
bin_label_weights = label_weights.view(-1, 1).expand(
label_weights.size(0), label_channels)
return bin_labels, bin_label_weights
# TODO: code refactoring to make it consistent with other losses
@LOSSES.register_module
class GHMC(nn.Module):
"""GHM Classification Loss.
Details of the theorem can be viewed in the paper
"Gradient Harmonized Single-stage Detector".
https://arxiv.org/abs/1811.05181
Args:
bins (int): Number of the unit regions for distribution calculation.
momentum (float): The parameter for moving average.
use_sigmoid (bool): Can only be true for BCE based loss now.
loss_weight (float): The weight of the total GHM-C loss.
"""
def __init__(self, bins=10, momentum=0, use_sigmoid=True, loss_weight=1.0):
super(GHMC, self).__init__()
self.bins = bins
self.momentum = momentum
self.edges = torch.arange(bins + 1).float().cuda() / bins
self.edges[-1] += 1e-6
if momentum > 0:
self.acc_sum = torch.zeros(bins).cuda()
self.use_sigmoid = use_sigmoid
if not self.use_sigmoid:
raise NotImplementedError
self.loss_weight = loss_weight
def forward(self, pred, target, label_weight, *args, **kwargs):
"""Calculate the GHM-C loss.
Args:
pred (float tensor of size [batch_num, class_num]):
The direct prediction of classification fc layer.
target (float tensor of size [batch_num, class_num]):
Binary class target for each sample.
label_weight (float tensor of size [batch_num, class_num]):
the value is 1 if the sample is valid and 0 if ignored.
Returns:
The gradient harmonized loss.
"""
# the target should be binary class label
if pred.dim() != target.dim():
target, label_weight = _expand_binary_labels(
target, label_weight, pred.size(-1))
target, label_weight = target.float(), label_weight.float()
edges = self.edges
mmt = self.momentum
weights = torch.zeros_like(pred)
# gradient length
g = torch.abs(pred.sigmoid().detach() - target)
valid = label_weight > 0
tot = max(valid.float().sum().item(), 1.0)
n = 0 # n valid bins
for i in range(self.bins):
inds = (g >= edges[i]) & (g < edges[i + 1]) & valid
num_in_bin = inds.sum().item()
if num_in_bin > 0:
if mmt > 0:
self.acc_sum[i] = mmt * self.acc_sum[i] \
+ (1 - mmt) * num_in_bin
weights[inds] = tot / self.acc_sum[i]
else:
weights[inds] = tot / num_in_bin
n += 1
if n > 0:
weights = weights / n
loss = F.binary_cross_entropy_with_logits(
pred, target, weights, reduction='sum') / tot
return loss * self.loss_weight
# TODO: code refactoring to make it consistent with other losses
@LOSSES.register_module
class GHMR(nn.Module):
"""GHM Regression Loss.
Details of the theorem can be viewed in the paper
"Gradient Harmonized Single-stage Detector"
https://arxiv.org/abs/1811.05181
Args:
mu (float): The parameter for the Authentic Smooth L1 loss.
bins (int): Number of the unit regions for distribution calculation.
momentum (float): The parameter for moving average.
loss_weight (float): The weight of the total GHM-R loss.
"""
def __init__(self, mu=0.02, bins=10, momentum=0, loss_weight=1.0):
super(GHMR, self).__init__()
self.mu = mu
self.bins = bins
self.edges = torch.arange(bins + 1).float().cuda() / bins
self.edges[-1] = 1e3
self.momentum = momentum
if momentum > 0:
self.acc_sum = torch.zeros(bins).cuda()
self.loss_weight = loss_weight
# TODO: support reduction parameter
def forward(self, pred, target, label_weight, avg_factor=None):
"""Calculate the GHM-R loss.
Args:
pred (float tensor of size [batch_num, 4 (* class_num)]):
The prediction of box regression layer. Channel number can be 4
or 4 * class_num depending on whether it is class-agnostic.
target (float tensor of size [batch_num, 4 (* class_num)]):
The target regression values with the same size of pred.
label_weight (float tensor of size [batch_num, 4 (* class_num)]):
The weight of each sample, 0 if ignored.
Returns:
The gradient harmonized loss.
"""
mu = self.mu
edges = self.edges
mmt = self.momentum
# ASL1 loss
diff = pred - target
loss = torch.sqrt(diff * diff + mu * mu) - mu
# gradient length
g = torch.abs(diff / torch.sqrt(mu * mu + diff * diff)).detach()
weights = torch.zeros_like(g)
valid = label_weight > 0
tot = max(label_weight.float().sum().item(), 1.0)
n = 0 # n: valid bins
for i in range(self.bins):
inds = (g >= edges[i]) & (g < edges[i + 1]) & valid
num_in_bin = inds.sum().item()
if num_in_bin > 0:
n += 1
if mmt > 0:
self.acc_sum[i] = mmt * self.acc_sum[i] \
+ (1 - mmt) * num_in_bin
weights[inds] = tot / self.acc_sum[i]
else:
weights[inds] = tot / num_in_bin
if n > 0:
weights /= n
loss = loss * weights
loss = loss.sum() / tot
return loss * self.loss_weight
|
Cream/CDARTS/CDARTS_detection/mmdet/models/losses/ghm_loss.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/losses/ghm_loss.py",
"repo_id": "Cream",
"token_count": 2855
}
| 279 |
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import xavier_init
from ..plugins import NonLocal2D
from ..registry import NECKS
from ..utils import ConvModule
@NECKS.register_module
class BFP(nn.Module):
"""BFP (Balanced Feature Pyrmamids)
BFP takes multi-level features as inputs and gather them into a single one,
then refine the gathered feature and scatter the refined results to
multi-level features. This module is used in Libra R-CNN (CVPR 2019), see
https://arxiv.org/pdf/1904.02701.pdf for details.
Args:
in_channels (int): Number of input channels (feature maps of all levels
should have the same channels).
num_levels (int): Number of input feature levels.
conv_cfg (dict): The config dict for convolution layers.
norm_cfg (dict): The config dict for normalization layers.
refine_level (int): Index of integration and refine level of BSF in
multi-level features from bottom to top.
refine_type (str): Type of the refine op, currently support
[None, 'conv', 'non_local'].
"""
def __init__(self,
in_channels,
num_levels,
refine_level=2,
refine_type=None,
conv_cfg=None,
norm_cfg=None):
super(BFP, self).__init__()
assert refine_type in [None, 'conv', 'non_local']
self.in_channels = in_channels
self.num_levels = num_levels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.refine_level = refine_level
self.refine_type = refine_type
assert 0 <= self.refine_level < self.num_levels
if self.refine_type == 'conv':
self.refine = ConvModule(
self.in_channels,
self.in_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
elif self.refine_type == 'non_local':
self.refine = NonLocal2D(
self.in_channels,
reduction=1,
use_scale=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
def forward(self, inputs):
assert len(inputs) == self.num_levels
# step 1: gather multi-level features by resize and average
feats = []
gather_size = inputs[self.refine_level].size()[2:]
for i in range(self.num_levels):
if i < self.refine_level:
gathered = F.adaptive_max_pool2d(
inputs[i], output_size=gather_size)
else:
gathered = F.interpolate(
inputs[i], size=gather_size, mode='nearest')
feats.append(gathered)
bsf = sum(feats) / len(feats)
# step 2: refine gathered features
if self.refine_type is not None:
bsf = self.refine(bsf)
# step 3: scatter refined features to multi-levels by a residual path
outs = []
for i in range(self.num_levels):
out_size = inputs[i].size()[2:]
if i < self.refine_level:
residual = F.interpolate(bsf, size=out_size, mode='nearest')
else:
residual = F.adaptive_max_pool2d(bsf, output_size=out_size)
outs.append(residual + inputs[i])
return tuple(outs)
|
Cream/CDARTS/CDARTS_detection/mmdet/models/necks/bfp.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/necks/bfp.py",
"repo_id": "Cream",
"token_count": 1712
}
| 280 |
import torch.nn as nn
import torch.nn.functional as F
def conv_ws_2d(input,
weight,
bias=None,
stride=1,
padding=0,
dilation=1,
groups=1,
eps=1e-5):
c_in = weight.size(0)
weight_flat = weight.view(c_in, -1)
mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1)
std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1)
weight = (weight - mean) / (std + eps)
return F.conv2d(input, weight, bias, stride, padding, dilation, groups)
class ConvWS2d(nn.Conv2d):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
eps=1e-5):
super(ConvWS2d, self).__init__(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
self.eps = eps
def forward(self, x):
return conv_ws_2d(x, self.weight, self.bias, self.stride, self.padding,
self.dilation, self.groups, self.eps)
|
Cream/CDARTS/CDARTS_detection/mmdet/models/utils/conv_ws.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/utils/conv_ws.py",
"repo_id": "Cream",
"token_count": 786
}
| 281 |
// modify from
// https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/modulated_dcn_cuda.c
// based on
// author: Charles Shang
// https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu
#include <torch/extension.h>
#include <cmath>
#include <vector>
void DeformablePSROIPoolForward(
const at::Tensor data, const at::Tensor bbox, const at::Tensor trans,
at::Tensor out, at::Tensor top_count, const int batch, const int channels,
const int height, const int width, const int num_bbox,
const int channels_trans, const int no_trans, const float spatial_scale,
const int output_dim, const int group_size, const int pooled_size,
const int part_size, const int sample_per_part, const float trans_std);
void DeformablePSROIPoolBackwardAcc(
const at::Tensor out_grad, const at::Tensor data, const at::Tensor bbox,
const at::Tensor trans, const at::Tensor top_count, at::Tensor in_grad,
at::Tensor trans_grad, const int batch, const int channels,
const int height, const int width, const int num_bbox,
const int channels_trans, const int no_trans, const float spatial_scale,
const int output_dim, const int group_size, const int pooled_size,
const int part_size, const int sample_per_part, const float trans_std);
void deform_psroi_pooling_cuda_forward(
at::Tensor input, at::Tensor bbox, at::Tensor trans, at::Tensor out,
at::Tensor top_count, const int no_trans, const float spatial_scale,
const int output_dim, const int group_size, const int pooled_size,
const int part_size, const int sample_per_part, const float trans_std) {
TORCH_CHECK(input.is_contiguous(), "input tensor has to be contiguous");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_trans = no_trans ? 2 : trans.size(1);
const int num_bbox = bbox.size(0);
if (num_bbox != out.size(0))
AT_ERROR("Output shape and bbox number wont match: (%d vs %d).",
out.size(0), num_bbox);
DeformablePSROIPoolForward(
input, bbox, trans, out, top_count, batch, channels, height, width,
num_bbox, channels_trans, no_trans, spatial_scale, output_dim, group_size,
pooled_size, part_size, sample_per_part, trans_std);
}
void deform_psroi_pooling_cuda_backward(
at::Tensor out_grad, at::Tensor input, at::Tensor bbox, at::Tensor trans,
at::Tensor top_count, at::Tensor input_grad, at::Tensor trans_grad,
const int no_trans, const float spatial_scale, const int output_dim,
const int group_size, const int pooled_size, const int part_size,
const int sample_per_part, const float trans_std) {
TORCH_CHECK(out_grad.is_contiguous(), "out_grad tensor has to be contiguous");
TORCH_CHECK(input.is_contiguous(), "input tensor has to be contiguous");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_trans = no_trans ? 2 : trans.size(1);
const int num_bbox = bbox.size(0);
if (num_bbox != out_grad.size(0))
AT_ERROR("Output shape and bbox number wont match: (%d vs %d).",
out_grad.size(0), num_bbox);
DeformablePSROIPoolBackwardAcc(
out_grad, input, bbox, trans, top_count, input_grad, trans_grad, batch,
channels, height, width, num_bbox, channels_trans, no_trans,
spatial_scale, output_dim, group_size, pooled_size, part_size,
sample_per_part, trans_std);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("deform_psroi_pooling_cuda_forward", &deform_psroi_pooling_cuda_forward,
"deform psroi pooling forward(CUDA)");
m.def("deform_psroi_pooling_cuda_backward",
&deform_psroi_pooling_cuda_backward,
"deform psroi pooling backward(CUDA)");
}
|
Cream/CDARTS/CDARTS_detection/mmdet/ops/dcn/src/deform_pool_cuda.cpp/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/dcn/src/deform_pool_cuda.cpp",
"repo_id": "Cream",
"token_count": 1451
}
| 282 |
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <torch/extension.h>
#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x, " must be a CUDAtensor ")
at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh);
at::Tensor nms(const at::Tensor& dets, const float threshold) {
CHECK_CUDA(dets);
if (dets.numel() == 0)
return at::empty({0}, dets.options().dtype(at::kLong).device(at::kCPU));
return nms_cuda(dets, threshold);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("nms", &nms, "non-maximum suppression");
}
|
Cream/CDARTS/CDARTS_detection/mmdet/ops/nms/src/nms_cuda.cpp/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/nms/src/nms_cuda.cpp",
"repo_id": "Cream",
"token_count": 236
}
| 283 |
import torch
from torch.autograd import Function
from .. import roi_pool_cuda
class RoIPoolFunction(Function):
@staticmethod
def forward(ctx, features, rois, out_size, spatial_scale):
if isinstance(out_size, int):
out_h = out_size
out_w = out_size
elif isinstance(out_size, tuple):
assert len(out_size) == 2
assert isinstance(out_size[0], int)
assert isinstance(out_size[1], int)
out_h, out_w = out_size
else:
raise TypeError(
'"out_size" must be an integer or tuple of integers')
assert features.is_cuda
ctx.save_for_backward(rois)
num_channels = features.size(1)
num_rois = rois.size(0)
out_size = (num_rois, num_channels, out_h, out_w)
output = features.new_zeros(out_size)
argmax = features.new_zeros(out_size, dtype=torch.int)
roi_pool_cuda.forward(features, rois, out_h, out_w, spatial_scale,
output, argmax)
ctx.spatial_scale = spatial_scale
ctx.feature_size = features.size()
ctx.argmax = argmax
return output
@staticmethod
def backward(ctx, grad_output):
assert grad_output.is_cuda
spatial_scale = ctx.spatial_scale
feature_size = ctx.feature_size
argmax = ctx.argmax
rois = ctx.saved_tensors[0]
assert feature_size is not None
grad_input = grad_rois = None
if ctx.needs_input_grad[0]:
grad_input = grad_output.new_zeros(feature_size)
roi_pool_cuda.backward(grad_output.contiguous(), rois, argmax,
spatial_scale, grad_input)
return grad_input, grad_rois, None, None
roi_pool = RoIPoolFunction.apply
|
Cream/CDARTS/CDARTS_detection/mmdet/ops/roi_pool/functions/roi_pool.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/roi_pool/functions/roi_pool.py",
"repo_id": "Cream",
"token_count": 886
}
| 284 |
import os.path as osp
import subprocess
import sys
from collections import defaultdict
import cv2
import mmcv
import torch
import torchvision
import mmdet
def collect_env():
env_info = {}
env_info['sys.platform'] = sys.platform
env_info['Python'] = sys.version.replace('\n', '')
cuda_available = torch.cuda.is_available()
env_info['CUDA available'] = cuda_available
if cuda_available:
from torch.utils.cpp_extension import CUDA_HOME
env_info['CUDA_HOME'] = CUDA_HOME
if CUDA_HOME is not None and osp.isdir(CUDA_HOME):
try:
nvcc = osp.join(CUDA_HOME, 'bin/nvcc')
nvcc = subprocess.check_output(
'"{}" -V | tail -n1'.format(nvcc), shell=True)
nvcc = nvcc.decode('utf-8').strip()
except subprocess.SubprocessError:
nvcc = 'Not Available'
env_info['NVCC'] = nvcc
devices = defaultdict(list)
for k in range(torch.cuda.device_count()):
devices[torch.cuda.get_device_name(k)].append(str(k))
for name, devids in devices.items():
env_info['GPU ' + ','.join(devids)] = name
gcc = subprocess.check_output('gcc --version | head -n1', shell=True)
gcc = gcc.decode('utf-8').strip()
env_info['GCC'] = gcc
env_info['PyTorch'] = torch.__version__
env_info['PyTorch compiling details'] = torch.__config__.show()
env_info['TorchVision'] = torchvision.__version__
env_info['OpenCV'] = cv2.__version__
env_info['MMCV'] = mmcv.__version__
env_info['MMDetection'] = mmdet.__version__
from mmdet.ops import get_compiler_version, get_compiling_cuda_version
env_info['MMDetection Compiler'] = get_compiler_version()
env_info['MMDetection CUDA Compiler'] = get_compiling_cuda_version()
return env_info
if __name__ == "__main__":
for name, val in collect_env().items():
print('{}: {}'.format(name, val))
|
Cream/CDARTS/CDARTS_detection/mmdet/utils/collect_env.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/utils/collect_env.py",
"repo_id": "Cream",
"token_count": 855
}
| 285 |
import argparse
import os
import os.path as osp
import pickle
import shutil
import tempfile
import mmcv
import torch
import torch.distributed as dist
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
from mmdet.core import wrap_fp16_model
from mmdet.datasets import build_dataloader, build_dataset
from mmdet.models import build_detector
def single_gpu_test(model, data_loader, show=False):
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=not show, **data)
results.append(result)
if show:
model.module.show_result(data, result)
batch_size = data['img'][0].size(0)
for _ in range(batch_size):
prog_bar.update()
return results
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
"""Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode.
gpu_collect (bool): Option to use either gpu or cpu to collect results.
Returns:
list: The prediction results.
"""
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
results.append(result)
if rank == 0:
batch_size = data['img'][0].size(0)
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
if gpu_collect:
results = collect_results_gpu(results, len(dataset))
else:
results = collect_results_cpu(results, len(dataset), tmpdir)
return results
def collect_results_cpu(result_part, size, tmpdir=None):
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN, ),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
tmpdir = tempfile.mkdtemp()
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))
dist.barrier()
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
def collect_results_gpu(result_part, size):
rank, world_size = get_dist_info()
# dump result part to tensor with pickle
part_tensor = torch.tensor(
bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
# gather all result part tensor shape
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
shape_list = [shape_tensor.clone() for _ in range(world_size)]
dist.all_gather(shape_list, shape_tensor)
# padding result part tensor to max length
shape_max = torch.tensor(shape_list).max()
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
part_send[:shape_tensor[0]] = part_tensor
part_recv_list = [
part_tensor.new_zeros(shape_max) for _ in range(world_size)
]
# gather all result part
dist.all_gather(part_recv_list, part_send)
if rank == 0:
part_list = []
for recv, shape in zip(part_recv_list, shape_list):
part_list.append(
pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
return ordered_results
class MultipleKVAction(argparse.Action):
"""
argparse action to split an argument into KEY=VALUE form
on the first = and append to a dictionary.
"""
def _is_int(self, val):
try:
_ = int(val)
return True
except Exception:
return False
def _is_float(self, val):
try:
_ = float(val)
return True
except Exception:
return False
def _is_bool(self, val):
return val.lower() in ['true', 'false']
def __call__(self, parser, namespace, values, option_string=None):
options = {}
for val in values:
parts = val.split('=')
key = parts[0].strip()
if len(parts) > 2:
val = '='.join(parts[1:])
else:
val = parts[1].strip()
# try parsing val to bool/int/float first
if self._is_bool(val):
import json
val = json.loads(val.lower())
elif self._is_int(val):
val = int(val)
elif self._is_float(val):
val = float(val)
options[key] = val
setattr(namespace, self.dest, options)
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--format_only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--gpu_collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu_collect is not specified')
parser.add_argument(
'--options', nargs='+', action=MultipleKVAction, help='custom options')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
assert args.out or args.eval or args.format_only or args.show, \
('Please specify at least one operation (save/eval/format/show the '
'results) with the argument "--out", "--eval", "--format_only" '
'or "--show"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = mmcv.Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# build the dataloader
# TODO: support multiple images per gpu (only minor changes are needed)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
imgs_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show)
else:
model = MMDistributedDataParallel(model.cuda())
outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect)
rank, _ = get_dist_info()
if rank == 0:
if args.out:
print('\nwriting results to {}'.format(args.out))
mmcv.dump(outputs, args.out)
kwargs = {} if args.options is None else args.options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
dataset.evaluate(outputs, args.eval, **kwargs)
if __name__ == '__main__':
main()
|
Cream/CDARTS/CDARTS_detection/tools/test.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/tools/test.py",
"repo_id": "Cream",
"token_count": 4690
}
| 286 |
from __future__ import print_function, division
import os
from PIL import Image
import numpy as np
from torch.utils.data import Dataset
from torchvision import transforms
from dataloaders import custom_transforms as tr
class VOCSegmentation(Dataset):
"""
PascalVoc dataset
"""
NUM_CLASSES = 21
def __init__(self,
args,
base_dir,
split='train',
):
"""
:param base_dir: path to VOC dataset directory
:param split: train/val
:param transform: transform to apply
"""
super().__init__()
self._base_dir = base_dir
self._image_dir = os.path.join(self._base_dir, 'JPEGImages')
self._cat_dir = os.path.join(self._base_dir, 'SegmentationClass')
if isinstance(split, str):
self.split = [split]
else:
split.sort()
self.split = split
self.args = args
_splits_dir = os.path.join(self._base_dir, 'ImageSets', 'Segmentation')
self.im_ids = []
self.images = []
self.categories = []
for splt in self.split:
with open(os.path.join(os.path.join(_splits_dir, splt + '.txt')), "r") as f:
lines = f.read().splitlines()
for ii, line in enumerate(lines):
_image = os.path.join(self._image_dir, line + ".jpg")
_cat = os.path.join(self._cat_dir, line + ".png")
assert os.path.isfile(_image)
assert os.path.isfile(_cat)
self.im_ids.append(line)
self.images.append(_image)
self.categories.append(_cat)
assert (len(self.images) == len(self.categories))
# Display stats
print('Number of images in {}: {:d}'.format(split, len(self.images)))
def __len__(self):
return len(self.images)
def __getitem__(self, index):
_img, _target = self._make_img_gt_point_pair(index)
sample = {'image': _img, 'label': _target}
for split in self.split:
if split == "train":
return self.transform_tr(sample)
elif split == 'val':
return self.transform_val(sample)
def _make_img_gt_point_pair(self, index):
_img = Image.open(self.images[index]).convert('RGB')
_target = Image.open(self.categories[index])
return _img, _target
def transform_tr(self, sample):
composed_transforms = transforms.Compose([
tr.RandomHorizontalFlip(),
tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size),
tr.RandomGaussianBlur(),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])
return composed_transforms(sample)
def transform_val(self, sample):
composed_transforms = transforms.Compose([
tr.FixScaleCrop(crop_size=self.args.crop_size),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])
return composed_transforms(sample)
def __str__(self):
return 'VOC2012(split=' + str(self.split) + ')'
if __name__ == '__main__':
from dataloaders.dataloader_utils import decode_segmap
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.base_size = 513
args.crop_size = 513
voc_train = VOCSegmentation(args, split='train')
dataloader = DataLoader(voc_train, batch_size=5, shuffle=True, num_workers=0)
for ii, sample in enumerate(dataloader):
for jj in range(sample["image"].size()[0]):
img = sample['image'].numpy()
gt = sample['label'].numpy()
tmp = np.array(gt[jj]).astype(np.uint8)
segmap = decode_segmap(tmp, dataset='pascal')
img_tmp = np.transpose(img[jj], axes=[1, 2, 0])
img_tmp *= (0.229, 0.224, 0.225)
img_tmp += (0.485, 0.456, 0.406)
img_tmp *= 255.0
img_tmp = img_tmp.astype(np.uint8)
plt.figure()
plt.title('display')
plt.subplot(211)
plt.imshow(img_tmp)
plt.subplot(212)
plt.imshow(segmap)
if ii == 1:
break
plt.show(block=True)
|
Cream/CDARTS/CDARTS_segmentation/dataloaders/datasets/pascal.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/dataloaders/datasets/pascal.py",
"repo_id": "Cream",
"token_count": 2151
}
| 287 |
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Create by Bin Xiao ([email protected])
# Modified by Ke Sun ([email protected]), Rainbowsecret ([email protected])
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from yacs.config import CfgNode as CN
# configs for HRNet48
HRNET_48 = CN()
HRNET_48.FINAL_CONV_KERNEL = 1
HRNET_48.STAGE1 = CN()
HRNET_48.STAGE1.NUM_MODULES = 1
HRNET_48.STAGE1.NUM_BRANCHES = 1
HRNET_48.STAGE1.NUM_BLOCKS = [4]
HRNET_48.STAGE1.NUM_CHANNELS = [64]
HRNET_48.STAGE1.BLOCK = 'BOTTLENECK'
HRNET_48.STAGE1.FUSE_METHOD = 'SUM'
HRNET_48.STAGE2 = CN()
HRNET_48.STAGE2.NUM_MODULES = 1
HRNET_48.STAGE2.NUM_BRANCHES = 2
HRNET_48.STAGE2.NUM_BLOCKS = [4, 4]
HRNET_48.STAGE2.NUM_CHANNELS = [48, 96]
HRNET_48.STAGE2.BLOCK = 'BASIC'
HRNET_48.STAGE2.FUSE_METHOD = 'SUM'
HRNET_48.STAGE3 = CN()
HRNET_48.STAGE3.NUM_MODULES = 4
HRNET_48.STAGE3.NUM_BRANCHES = 3
HRNET_48.STAGE3.NUM_BLOCKS = [4, 4, 4]
HRNET_48.STAGE3.NUM_CHANNELS = [48, 96, 192]
HRNET_48.STAGE3.BLOCK = 'BASIC'
HRNET_48.STAGE3.FUSE_METHOD = 'SUM'
HRNET_48.STAGE4 = CN()
HRNET_48.STAGE4.NUM_MODULES = 3
HRNET_48.STAGE4.NUM_BRANCHES = 4
HRNET_48.STAGE4.NUM_BLOCKS = [4, 4, 4, 4]
HRNET_48.STAGE4.NUM_CHANNELS = [48, 96, 192, 384]
HRNET_48.STAGE4.BLOCK = 'BASIC'
HRNET_48.STAGE4.FUSE_METHOD = 'SUM'
# configs for HRNet32
HRNET_32 = CN()
HRNET_32.FINAL_CONV_KERNEL = 1
HRNET_32.STAGE1 = CN()
HRNET_32.STAGE1.NUM_MODULES = 1
HRNET_32.STAGE1.NUM_BRANCHES = 1
HRNET_32.STAGE1.NUM_BLOCKS = [4]
HRNET_32.STAGE1.NUM_CHANNELS = [64]
HRNET_32.STAGE1.BLOCK = 'BOTTLENECK'
HRNET_32.STAGE1.FUSE_METHOD = 'SUM'
HRNET_32.STAGE2 = CN()
HRNET_32.STAGE2.NUM_MODULES = 1
HRNET_32.STAGE2.NUM_BRANCHES = 2
HRNET_32.STAGE2.NUM_BLOCKS = [4, 4]
HRNET_32.STAGE2.NUM_CHANNELS = [32, 64]
HRNET_32.STAGE2.BLOCK = 'BASIC'
HRNET_32.STAGE2.FUSE_METHOD = 'SUM'
HRNET_32.STAGE3 = CN()
HRNET_32.STAGE3.NUM_MODULES = 4
HRNET_32.STAGE3.NUM_BRANCHES = 3
HRNET_32.STAGE3.NUM_BLOCKS = [4, 4, 4]
HRNET_32.STAGE3.NUM_CHANNELS = [32, 64, 128]
HRNET_32.STAGE3.BLOCK = 'BASIC'
HRNET_32.STAGE3.FUSE_METHOD = 'SUM'
HRNET_32.STAGE4 = CN()
HRNET_32.STAGE4.NUM_MODULES = 3
HRNET_32.STAGE4.NUM_BRANCHES = 4
HRNET_32.STAGE4.NUM_BLOCKS = [4, 4, 4, 4]
HRNET_32.STAGE4.NUM_CHANNELS = [32, 64, 128, 256]
HRNET_32.STAGE4.BLOCK = 'BASIC'
HRNET_32.STAGE4.FUSE_METHOD = 'SUM'
# configs for HRNet18
HRNET_18 = CN()
HRNET_18.FINAL_CONV_KERNEL = 1
HRNET_18.STAGE1 = CN()
HRNET_18.STAGE1.NUM_MODULES = 1
HRNET_18.STAGE1.NUM_BRANCHES = 1
HRNET_18.STAGE1.NUM_BLOCKS = [4]
HRNET_18.STAGE1.NUM_CHANNELS = [64]
HRNET_18.STAGE1.BLOCK = 'BOTTLENECK'
HRNET_18.STAGE1.FUSE_METHOD = 'SUM'
HRNET_18.STAGE2 = CN()
HRNET_18.STAGE2.NUM_MODULES = 1
HRNET_18.STAGE2.NUM_BRANCHES = 2
HRNET_18.STAGE2.NUM_BLOCKS = [4, 4]
HRNET_18.STAGE2.NUM_CHANNELS = [18, 36]
HRNET_18.STAGE2.BLOCK = 'BASIC'
HRNET_18.STAGE2.FUSE_METHOD = 'SUM'
HRNET_18.STAGE3 = CN()
HRNET_18.STAGE3.NUM_MODULES = 4
HRNET_18.STAGE3.NUM_BRANCHES = 3
HRNET_18.STAGE3.NUM_BLOCKS = [4, 4, 4]
HRNET_18.STAGE3.NUM_CHANNELS = [18, 36, 72]
HRNET_18.STAGE3.BLOCK = 'BASIC'
HRNET_18.STAGE3.FUSE_METHOD = 'SUM'
HRNET_18.STAGE4 = CN()
HRNET_18.STAGE4.NUM_MODULES = 3
HRNET_18.STAGE4.NUM_BRANCHES = 4
HRNET_18.STAGE4.NUM_BLOCKS = [4, 4, 4, 4]
HRNET_18.STAGE4.NUM_CHANNELS = [18, 36, 72, 144]
HRNET_18.STAGE4.BLOCK = 'BASIC'
HRNET_18.STAGE4.FUSE_METHOD = 'SUM'
MODEL_CONFIGS = {
'hrnet18': HRNET_18,
'hrnet32': HRNET_32,
'hrnet48': HRNET_48,
}
|
Cream/CDARTS/CDARTS_segmentation/segmentation/config/hrnet_config.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/config/hrnet_config.py",
"repo_id": "Cream",
"token_count": 1819
}
| 288 |
from .semantic import SemanticEvaluator
from .instance import CityscapesInstanceEvaluator
from .panoptic import CityscapesPanopticEvaluator
from .coco_instance import COCOInstanceEvaluator
from .coco_panoptic import COCOPanopticEvaluator
|
Cream/CDARTS/CDARTS_segmentation/segmentation/evaluation/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/evaluation/__init__.py",
"repo_id": "Cream",
"token_count": 78
}
| 289 |
# ------------------------------------------------------------------------------
# Common modules.
# Written by Bowen Cheng ([email protected])
# ------------------------------------------------------------------------------
from functools import partial
import torch
from torch import nn
from torch.nn import functional as F
def basic_conv(in_planes, out_planes, kernel_size, stride=1, padding=1, groups=1,
with_bn=True, with_relu=True):
"""convolution with bn and relu"""
module = []
has_bias = not with_bn
module.append(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups,
bias=has_bias)
)
if with_bn:
module.append(nn.BatchNorm2d(out_planes))
if with_relu:
module.append(nn.ReLU())
return nn.Sequential(*module)
def depthwise_separable_conv(in_planes, out_planes, kernel_size, stride=1, padding=1, groups=1,
with_bn=True, with_relu=True):
"""depthwise separable convolution with bn and relu"""
del groups
module = []
module.extend([
basic_conv(in_planes, in_planes, kernel_size, stride, padding, groups=in_planes,
with_bn=True, with_relu=True),
nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False),
])
if with_bn:
module.append(nn.BatchNorm2d(out_planes))
if with_relu:
module.append(nn.ReLU())
return nn.Sequential(*module)
def stacked_conv(in_planes, out_planes, kernel_size, num_stack, stride=1, padding=1, groups=1,
with_bn=True, with_relu=True, conv_type='basic_conv'):
"""stacked convolution with bn and relu"""
if num_stack < 1:
assert ValueError('`num_stack` has to be a positive integer.')
if conv_type == 'basic_conv':
conv = partial(basic_conv, out_planes=out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, groups=groups, with_bn=with_bn, with_relu=with_relu)
elif conv_type == 'depthwise_separable_conv':
conv = partial(depthwise_separable_conv, out_planes=out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, groups=1, with_bn=with_bn, with_relu=with_relu)
else:
raise ValueError('Unknown conv_type: {}'.format(conv_type))
module = []
module.append(conv(in_planes=in_planes))
for n in range(1, num_stack):
module.append(conv(in_planes=out_planes))
return nn.Sequential(*module)
if __name__ == '__main__':
import torch
model = stacked_conv(4, 2, 3, 3)
print(model)
data = torch.zeros(1, 4, 5, 5)
print(model.forward(data).shape)
|
Cream/CDARTS/CDARTS_segmentation/segmentation/model/decoder/conv_module.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/model/decoder/conv_module.py",
"repo_id": "Cream",
"token_count": 1127
}
| 290 |
# ------------------------------------------------------------------------------
# Reference: https://github.com/facebookresearch/detectron2/blob/master/detectron2/solver/build.py
# Modified by Bowen Cheng ([email protected])
# ------------------------------------------------------------------------------
from enum import Enum
from typing import Any, Callable, Dict, Iterable, List, Set, Type, Union
import torch
from .lr_scheduler import WarmupCosineLR, WarmupMultiStepLR, WarmupPolyLR
_GradientClipperInput = Union[torch.Tensor, Iterable[torch.Tensor]]
_GradientClipper = Callable[[_GradientClipperInput], None]
class GradientClipType(Enum):
VALUE = "value"
NORM = "norm"
def _create_gradient_clipper(config):
"""
Creates gradient clipping closure to clip by value or by norm,
according to the provided config.
"""
cfg = config.clone()
def clip_grad_norm(p: _GradientClipperInput):
torch.nn.utils.clip_grad_norm_(p, cfg.CLIP_VALUE, cfg.NORM_TYPE)
def clip_grad_value(p: _GradientClipperInput):
torch.nn.utils.clip_grad_value_(p, cfg.CLIP_VALUE)
_GRADIENT_CLIP_TYPE_TO_CLIPPER = {
GradientClipType.VALUE: clip_grad_value,
GradientClipType.NORM: clip_grad_norm,
}
return _GRADIENT_CLIP_TYPE_TO_CLIPPER[GradientClipType(cfg.CLIP_TYPE)]
def _generate_optimizer_class_with_gradient_clipping(optimizer_type, gradient_clipper):
"""
Dynamically creates a new type that inherits the type of a given instance
and overrides the `step` method to add gradient clipping
"""
def optimizer_wgc_step(self, closure=None):
for group in self.param_groups:
for p in group["params"]:
gradient_clipper(p)
super(type(self), self).step(closure)
OptimizerWithGradientClip = type(
optimizer_type.__name__ + "WithGradientClip",
(optimizer_type,),
{"step": optimizer_wgc_step},
)
return OptimizerWithGradientClip
def maybe_add_gradient_clipping(config, optimizer):
"""
If gradient clipping is enabled through config options, wraps the existing
optimizer instance of some type OptimizerType to become an instance
of the new dynamically created class OptimizerTypeWithGradientClip
that inherits OptimizerType and overrides the `step` method to
include gradient clipping.
Args:
config: configuration options
optimizer: torch.optim.Optimizer
existing optimizer instance
Return:
optimizer: torch.optim.Optimizer
either the unmodified optimizer instance (if gradient clipping is
disabled), or the same instance with adjusted __class__ to override
the `step` method and include gradient clipping
"""
if not config.SOLVER.CLIP_GRADIENTS.ENABLED:
return optimizer
grad_clipper = _create_gradient_clipper(config.SOLVER.CLIP_GRADIENTS)
OptimizerWithGradientClip = _generate_optimizer_class_with_gradient_clipping(
type(optimizer), grad_clipper
)
optimizer.__class__ = OptimizerWithGradientClip
return optimizer
def build_optimizer(config, model):
"""Build an optimizer from config.
Args:
config: configuration file.
model: nn.Module, the model.
Returns:
A torch Optimizer.
Raises:
ValueError: optimizer type has unexpected value.
"""
norm_module_types = (
torch.nn.BatchNorm1d,
torch.nn.BatchNorm2d,
torch.nn.BatchNorm3d,
torch.nn.SyncBatchNorm,
# NaiveSyncBatchNorm inherits from BatchNorm2d
torch.nn.GroupNorm,
torch.nn.InstanceNorm1d,
torch.nn.InstanceNorm2d,
torch.nn.InstanceNorm3d,
torch.nn.LayerNorm,
torch.nn.LocalResponseNorm,
)
# A list of dict: List[Dict[str, Any]].
params: List[Dict[str, Any]] = []
memo: Set[torch.nn.parameter.Parameter] = set()
for module in model.modules():
for key, value in module.named_parameters(recurse=False):
if not value.requires_grad:
continue
# Avoid duplicating parameters
if value in memo:
continue
memo.add(value)
lr = config.SOLVER.BASE_LR
weight_decay = config.SOLVER.WEIGHT_DECAY
if isinstance(module, norm_module_types):
weight_decay = config.SOLVER.WEIGHT_DECAY_NORM
elif key == "bias":
# NOTE: unlike Detectron v1, we now default BIAS_LR_FACTOR to 1.0
# and WEIGHT_DECAY_BIAS to WEIGHT_DECAY so that bias optimizer
# hyperparameters are by default exactly the same as for regular
# weights.
lr = config.SOLVER.BASE_LR * config.SOLVER.BIAS_LR_FACTOR
weight_decay = config.SOLVER.WEIGHT_DECAY_BIAS
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
if config.SOLVER.OPTIMIZER == "sgd":
optimizer = torch.optim.SGD(params, config.SOLVER.BASE_LR, momentum=config.SOLVER.MOMENTUM)
elif config.SOLVER.OPTIMIZER == "adam":
optimizer = torch.optim.Adam(params, config.SOLVER.BASE_LR, betas=config.SOLVER.ADAM_BETAS,
eps=config.SOLVER.ADAM_EPS)
else:
raise ValueError('Unknown optimizer: {}'.format(config.SOLVER.OPTIMIZER))
optimizer = maybe_add_gradient_clipping(config, optimizer)
return optimizer
def build_lr_scheduler(config, optimizer):
"""Build a LR scheduler from config.
Args:
config: configuration file.
optimizer: torch optimizer.
Returns:
A torch LRScheduler.
Raises:
ValueError: LRScheduler type has unexpected value.
"""
name = config.SOLVER.LR_SCHEDULER_NAME
if name == "WarmupMultiStepLR":
return WarmupMultiStepLR(
optimizer,
config.SOLVER.STEPS,
config.SOLVER.GAMMA,
warmup_factor=config.SOLVER.WARMUP_FACTOR,
warmup_iters=config.SOLVER.WARMUP_ITERS,
warmup_method=config.SOLVER.WARMUP_METHOD,
)
elif name == "WarmupCosineLR":
return WarmupCosineLR(
optimizer,
config.TRAIN.MAX_ITER,
warmup_factor=config.SOLVER.WARMUP_FACTOR,
warmup_iters=config.SOLVER.WARMUP_ITERS,
warmup_method=config.SOLVER.WARMUP_METHOD,
)
elif name == "WarmupPolyLR":
return WarmupPolyLR(
optimizer,
config.TRAIN.MAX_ITER,
warmup_factor=config.SOLVER.WARMUP_FACTOR,
warmup_iters=config.SOLVER.WARMUP_ITERS,
warmup_method=config.SOLVER.WARMUP_METHOD,
power=config.SOLVER.POLY_LR_POWER,
constant_ending=config.SOLVER.POLY_LR_CONSTANT_ENDING,
)
else:
raise ValueError("Unknown LR scheduler: {}".format(name))
|
Cream/CDARTS/CDARTS_segmentation/segmentation/solver/build.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/solver/build.py",
"repo_id": "Cream",
"token_count": 3020
}
| 291 |
import numpy as np
np.seterr(divide='ignore', invalid='ignore')
# voc cityscapes metric
def hist_info(n_cl, pred, gt):
assert (pred.shape == gt.shape)
k = (gt >= 0) & (gt < n_cl)
labeled = np.sum(k)
correct = np.sum((pred[k] == gt[k]))
return np.bincount(n_cl * gt[k].astype(int) + pred[k].astype(int),
minlength=n_cl ** 2).reshape(n_cl,
n_cl), labeled, correct
def compute_score(hist, correct, labeled):
iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
mean_IU = np.nanmean(iu)
mean_IU_no_back = np.nanmean(iu[1:])
# freq = hist.sum(1) / hist.sum()
# freq_IU = (iu[freq > 0] * freq[freq > 0]).sum()
mean_pixel_acc = correct / labeled
return iu, mean_IU, mean_IU_no_back, mean_pixel_acc
# ade metric
def meanIoU(area_intersection, area_union):
iou = 1.0 * np.sum(area_intersection, axis=1) / np.sum(area_union, axis=1)
meaniou = np.nanmean(iou)
meaniou_no_back = np.nanmean(iou[1:])
return iou, meaniou, meaniou_no_back
def intersectionAndUnion(imPred, imLab, numClass):
# Remove classes from unlabeled pixels in gt image.
# We should not penalize detections in unlabeled portions of the image.
imPred = np.asarray(imPred).copy()
imLab = np.asarray(imLab).copy()
imPred += 1
imLab += 1
# Remove classes from unlabeled pixels in gt image.
# We should not penalize detections in unlabeled portions of the image.
imPred = imPred * (imLab > 0)
# imPred = imPred * (imLab >= 0)
# Compute area intersection:
intersection = imPred * (imPred == imLab)
(area_intersection, _) = np.histogram(intersection, bins=numClass,
range=(1, numClass))
# Compute area union:
(area_pred, _) = np.histogram(imPred, bins=numClass, range=(1, numClass))
(area_lab, _) = np.histogram(imLab, bins=numClass, range=(1, numClass))
area_union = area_pred + area_lab - area_intersection
return area_intersection, area_union
def mean_pixel_accuracy(pixel_correct, pixel_labeled):
mean_pixel_accuracy = 1.0 * np.sum(pixel_correct) / (
np.spacing(1) + np.sum(pixel_labeled))
return mean_pixel_accuracy
def pixelAccuracy(imPred, imLab):
# Remove classes from unlabeled pixels in gt image.
# We should not penalize detections in unlabeled portions of the image.
pixel_labeled = np.sum(imLab >= 0)
pixel_correct = np.sum((imPred == imLab) * (imLab >= 0))
pixel_accuracy = 1.0 * pixel_correct / pixel_labeled
return pixel_accuracy, pixel_correct, pixel_labeled
def accuracy(preds, label):
valid = (label >= 0)
acc_sum = (valid * (preds == label)).sum()
valid_sum = valid.sum()
acc = float(acc_sum) / (valid_sum + 1e-10)
return acc, valid_sum
|
Cream/CDARTS/CDARTS_segmentation/tools/seg_opr/metric.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/tools/seg_opr/metric.py",
"repo_id": "Cream",
"token_count": 1221
}
| 292 |
import torch
import math
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from torch import nn, einsum
from einops import rearrange
def pair(x):
return (x, x) if not isinstance(x, tuple) else x
def expand_dim(t, dim, k):
t = t.unsqueeze(dim = dim)
expand_shape = [-1] * len(t.shape)
expand_shape[dim] = k
return t.expand(*expand_shape)
def rel_to_abs(x):
b, h, l, _, device, dtype = *x.shape, x.device, x.dtype
dd = {'device': device, 'dtype': dtype}
col_pad = torch.zeros((b, h, l, 1), **dd)
x = torch.cat((x, col_pad), dim = 3)
flat_x = rearrange(x, 'b h l c -> b h (l c)')
flat_pad = torch.zeros((b, h, l - 1), **dd)
flat_x_padded = torch.cat((flat_x, flat_pad), dim = 2)
final_x = flat_x_padded.reshape(b, h, l + 1, 2 * l - 1)
final_x = final_x[:, :, :l, (l-1):]
return final_x
def relative_logits_1d(q, rel_k):
b, heads, h, w, dim = q.shape
logits = einsum('b h x y d, r d -> b h x y r', q, rel_k)
logits = rearrange(logits, 'b h x y r -> b (h x) y r')
logits = rel_to_abs(logits)
logits = logits.reshape(b, heads, h, w, w)
logits = expand_dim(logits, dim = 3, k = h)
return logits
# positional embeddings
class AbsPosEmb(nn.Module):
def __init__(
self,
fmap_size,
dim_head
):
super().__init__()
height, width = pair(fmap_size)
scale = dim_head ** -0.5
self.height = nn.Parameter(torch.randn(height, dim_head) * scale)
self.width = nn.Parameter(torch.randn(width, dim_head) * scale)
def forward(self, q):
emb = rearrange(self.height, 'h d -> h () d') + rearrange(self.width, 'w d -> () w d')
emb = rearrange(emb, ' h w d -> (h w) d')
logits = einsum('b h i d, j d -> b h i j', q, emb)
return logits
class RelPosEmb(nn.Module):
def __init__(
self,
fmap_size,
dim_head
):
super().__init__()
height, width = pair(fmap_size)
scale = dim_head ** -0.5
self.fmap_size = fmap_size
self.rel_height = nn.Parameter(torch.randn(height * 2 - 1, dim_head) * scale)
self.rel_width = nn.Parameter(torch.randn(width * 2 - 1, dim_head) * scale)
def forward(self, q):
h, w = self.fmap_size
q = rearrange(q, 'b h (x y) d -> b h x y d', x = h, y = w)
rel_logits_w = relative_logits_1d(q, self.rel_width)
rel_logits_w = rearrange(rel_logits_w, 'b h x i y j-> b h (x y) (i j)')
q = rearrange(q, 'b h x y d -> b h y x d')
rel_logits_h = relative_logits_1d(q, self.rel_height)
rel_logits_h = rearrange(rel_logits_h, 'b h x i y j -> b h (y x) (j i)')
return rel_logits_w + rel_logits_h
# classes
class Attention(nn.Module):
def __init__(
self,
*,
dim,
fmap_size,
heads = 4,
dim_head = 128,
rel_pos_emb = False
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = heads * dim_head
self.to_qkv = nn.Conv2d(dim, inner_dim * 3, 1, bias = False)
rel_pos_class = AbsPosEmb if not rel_pos_emb else RelPosEmb
self.pos_emb = rel_pos_class(fmap_size, dim_head)
def forward(self, fmap):
heads, b, c, h, w = self.heads, *fmap.shape
q, k, v = self.to_qkv(fmap).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h d) x y -> b h (x y) d', h = heads), (q, k, v))
q *= self.scale
sim = einsum('b h i d, b h j d -> b h i j', q, k)
sim += self.pos_emb(q)
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h (x y) d -> b (h d) x y', x = h, y = w)
return out
class Self_Attn(nn.Module):
def __init__(
self,
*,
dim,
fmap_size,
dim_out,
proj_factor,
downsample,
heads = 4,
dim_head = 128,
rel_pos_emb = False,
activation = nn.ReLU(inplace=True)
):
super().__init__()
# shortcut
proj_factor = 1
self.stride = 2 if downsample else 1
if dim != dim_out or downsample:
kernel_size, stride, padding = (3, 2, 1) if downsample else (1, 1, 0)
self.shortcut = nn.Sequential(
nn.Conv2d(dim, dim_out, kernel_size, stride = stride, padding = padding, bias = False),
nn.BatchNorm2d(dim_out),
activation
)
else:
self.shortcut = nn.Identity()
# contraction and expansion
attn_dim_in = dim_out // proj_factor
# attn_dim_out = heads * dim_head
attn_dim_out = attn_dim_in
self.net = nn.Sequential(
nn.Conv2d(dim, attn_dim_in, 1, bias = False),
nn.BatchNorm2d(attn_dim_in),
activation,
ATT(attn_dim_in),
# Attention(
# dim = attn_dim_in,
# fmap_size = fmap_size,
# heads = heads,
# dim_head = dim_head,
# rel_pos_emb = rel_pos_emb
# ),
nn.AvgPool2d((2, 2)) if downsample else nn.Identity(),
nn.BatchNorm2d(attn_dim_out),
activation,
nn.Conv2d(attn_dim_out, dim_out, 1, bias = False),
nn.BatchNorm2d(dim_out)
)
# init last batch norm gamma to zero
nn.init.zeros_(self.net[-1].weight)
# final activation
self.activation = activation
def forward(self, x):
shortcut = self.shortcut(x)
out = F.interpolate(x, size=(int(x.size(2))//2, int(x.size(3))//2), mode='bilinear', align_corners=True)
out = self.net(out)
if self.stride == 1:
out = F.interpolate(out, size=(int(x.size(2)), int(x.size(3))), mode='bilinear', align_corners=True)
out += shortcut
return self.activation(out)
class ATT(nn.Module):
""" Self attention Layer"""
def __init__(self, in_dim):
super(ATT, self).__init__()
self.chanel_in = in_dim
self.query_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim//8 , kernel_size= 1)
self.key_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim//8 , kernel_size= 1)
self.value_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim , kernel_size= 1)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1) #
def forward(self,x):
"""
inputs :
x : input feature maps( B X C X W X H)
returns :
out : self attention value + input feature
attention: B X N X N (N is Width*Height)
"""
m_batchsize, C, width, height = x.size()
proj_query = self.query_conv(x).view(m_batchsize, -1,width*height).permute(0,2,1) # B X CX(N)
proj_key = self.key_conv(x).view(m_batchsize,-1, width*height) # B X C x (*W*H)
energy = torch.bmm(proj_query, proj_key) # transpose check
attention = self.softmax(energy) # BX (N) X (N)
proj_value = self.value_conv(x).view(m_batchsize,-1,width*height) # B X C X N
out = torch.bmm(proj_value, attention.permute(0,2,1) )
out = out.view(m_batchsize, C, width,height)
out = self.gamma*out + x
return out
|
Cream/CDARTS/CDARTS_segmentation/train/att_sa.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/train/att_sa.py",
"repo_id": "Cream",
"token_count": 3832
}
| 293 |
from __future__ import absolute_import, division, print_function, unicode_literals
"""
Modified by Xiyang for effortlessly launching on Azure ML
"""
r"""
`torch.distributed.launch` is a module that spawns up multiple distributed
training processes on each of the training nodes.
The utility can be used for single-node distributed training, in which one or
more processes per node will be spawned. The utility can be used for either
CPU training or GPU training. If the utility is used for GPU training,
each distributed process will be operating on a single GPU. This can achieve
well-improved single-node training performance. It can also be used in
multi-node distributed training, by spawning up multiple processes on each node
for well-improved multi-node distributed training performance as well.
This will especially be benefitial for systems with multiple Infiniband
interfaces that have direct-GPU support, since all of them can be utilized for
aggregated communication bandwidth.
In both cases of single-node distributed training or multi-node distributed
training, this utility will launch the given number of processes per node
(``--nproc_per_node``). If used for GPU training, this number needs to be less
or equal to the number of GPUs on the current system (``nproc_per_node``),
and each process will be operating on a single GPU from *GPU 0 to
GPU (nproc_per_node - 1)*.
**How to use this module:**
1. Single-Node multi-process distributed training
::
>>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE
YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 and all other
arguments of your training script)
2. Multi-Node multi-process distributed training: (e.g. two nodes)
Node 1: *(IP: 192.168.1.1, and has a free port: 1234)*
::
>>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE
--nnodes=2 --node_rank=0 --master_addr="192.168.1.1"
--master_port=1234 YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3
and all other arguments of your training script)
Node 2:
::
>>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE
--nnodes=2 --node_rank=1 --master_addr="192.168.1.1"
--master_port=1234 YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3
and all other arguments of your training script)
3. To look up what optional arguments this module offers:
::
>>> python -m torch.distributed.launch --help
**Important Notices:**
1. This utility and multi-process distributed (single-node or
multi-node) GPU training currently only achieves the best performance using
the NCCL distributed backend. Thus NCCL backend is the recommended backend to
use for GPU training.
2. In your training program, you must parse the command-line argument:
``--local_rank=LOCAL_PROCESS_RANK``, which will be provided by this module.
If your training program uses GPUs, you should ensure that your code only
runs on the GPU device of LOCAL_PROCESS_RANK. This can be done by:
Parsing the local_rank argument
::
>>> import argparse
>>> parser = argparse.ArgumentParser()
>>> parser.add_argument("--local_rank", type=int)
>>> args = parser.parse_args()
Set your device to local rank using either
::
>>> torch.cuda.set_device(arg.local_rank) # before your code runs
or
::
>>> with torch.cuda.device(arg.local_rank):
>>> # your code to run
3. In your training program, you are supposed to call the following function
at the beginning to start the distributed backend. You need to make sure that
the init_method uses ``env://``, which is the only supported ``init_method``
by this module.
::
torch.distributed.init_process_group(backend='YOUR BACKEND',
init_method='env://')
4. In your training program, you can either use regular distributed functions
or use :func:`torch.nn.parallel.DistributedDataParallel` module. If your
training program uses GPUs for training and you would like to use
:func:`torch.nn.parallel.DistributedDataParallel` module,
here is how to configure it.
::
model = torch.nn.parallel.DistributedDataParallel(model,
device_ids=[arg.local_rank],
output_device=arg.local_rank)
Please ensure that ``device_ids`` argument is set to be the only GPU device id
that your code will be operating on. This is generally the local rank of the
process. In other words, the ``device_ids`` needs to be ``[args.local_rank]``,
and ``output_device`` needs to be ``args.local_rank`` in order to use this
utility
5. Another way to pass ``local_rank`` to the subprocesses via environment variable
``LOCAL_RANK``. This behavior is enabled when you launch the script with
``--use_env=True``. You must adjust the subprocess example above to replace
``args.local_rank`` with ``os.environ['LOCAL_RANK']``; the launcher
will not pass ``--local_rank`` when you specify this flag.
.. warning::
``local_rank`` is NOT globally unique: it is only unique per process
on a machine. Thus, don't use it to decide if you should, e.g.,
write to a networked filesystem. See
https://github.com/pytorch/pytorch/issues/12042 for an example of
how things can go wrong if you don't do this correctly.
"""
import sys
import subprocess
import os
from argparse import ArgumentParser, REMAINDER
NODE_RANK = os.environ['AZ_BATCHAI_TASK_INDEX'] \
if 'AZ_BATCHAI_TASK_INDEX' in os.environ else 0
NODE_RANK = int(NODE_RANK)
MASTER_ADDR, MASTER_PORT = os.environ['AZ_BATCH_MASTER_NODE'].split(':') \
if 'AZ_BATCH_MASTER_NODE' in os.environ else ("127.0.0.1", 29500)
MASTER_PORT = int(MASTER_PORT)
def parse_args():
"""
Helper function parsing the command line options
@retval ArgumentParser
"""
parser = ArgumentParser(description="PyTorch distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes")
# Optional arguments for the launch helper
parser.add_argument("--nnodes", type=int, default=1,
help="The number of nodes to use for distributed "
"training")
parser.add_argument("--node_rank", type=int, default=NODE_RANK,
help="The rank of the node for multi-node distributed "
"training")
parser.add_argument("--nproc_per_node", type=int, default=1,
help="The number of processes to launch on each node, "
"for GPU training, this is recommended to be set "
"to the number of GPUs in your system so that "
"each process can be bound to a single GPU.")
parser.add_argument("--master_addr", default=MASTER_ADDR, type=str,
help="Master node (rank 0)'s address, should be either "
"the IP address or the hostname of node 0, for "
"single node multi-proc training, the "
"--master_addr can simply be 127.0.0.1")
parser.add_argument("--master_port", default=MASTER_PORT, type=int,
help="Master node (rank 0)'s free port that needs to "
"be used for communication during distributed "
"training")
parser.add_argument("--use_env", default=False, action="store_true",
help="Use environment variable to pass "
"'local rank'. For legacy reasons, the default value is False. "
"If set to True, the script will not pass "
"--local_rank as argument, and will instead set LOCAL_RANK.")
parser.add_argument("-m", "--module", default=False, action="store_true",
help="Changes each process to interpret the launch script "
"as a python module, executing with the same behavior as"
"'python -m'.")
parser.add_argument("--no_python", default=False, action="store_true",
help="Do not prepend the training script with \"python\" - just exec "
"it directly. Useful when the script is not a Python script.")
# positional
parser.add_argument("training_script", type=str,
help="The full path to the single GPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script")
# rest from the training program
parser.add_argument('training_script_args', nargs=REMAINDER)
return parser.parse_args()
def main():
args = parse_args()
# world size in terms of number of processes
dist_world_size = args.nproc_per_node * args.nnodes
# set PyTorch distributed related environmental variables
current_env = os.environ.copy()
current_env["MASTER_ADDR"] = args.master_addr
current_env["MASTER_PORT"] = str(args.master_port)
current_env["WORLD_SIZE"] = str(dist_world_size)
processes = []
if 'OMP_NUM_THREADS' not in os.environ and args.nproc_per_node > 1:
current_env["OMP_NUM_THREADS"] = str(1)
print("*****************************************\n"
"Setting OMP_NUM_THREADS environment variable for each process "
"to be {} in default, to avoid your system being overloaded, "
"please further tune the variable for optimal performance in "
"your application as needed. \n"
"*****************************************".format(current_env["OMP_NUM_THREADS"]))
print('Launching Node', args.node_rank)
for local_rank in range(0, args.nproc_per_node):
# each process's rank
dist_rank = args.nproc_per_node * args.node_rank + local_rank
current_env["RANK"] = str(dist_rank)
current_env["LOCAL_RANK"] = str(local_rank)
# spawn the processes
with_python = not args.no_python
cmd = []
if with_python:
cmd = [sys.executable, "-u"]
if args.module:
cmd.append("-m")
else:
if not args.use_env:
raise ValueError("When using the '--no_python' flag, you must also set the '--use_env' flag.")
if args.module:
raise ValueError("Don't use both the '--no_python' flag and the '--module' flag at the same time.")
cmd.append(args.training_script)
if not args.use_env:
cmd.append("--local_rank={}".format(local_rank))
cmd.extend(args.training_script_args)
process = subprocess.Popen(cmd, env=current_env)
processes.append(process)
for process in processes:
process.wait()
if process.returncode != 0:
raise subprocess.CalledProcessError(returncode=process.returncode,
cmd=cmd)
if __name__ == "__main__":
main()
|
Cream/CDARTS/CDARTS_segmentation/train/launch.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/train/launch.py",
"repo_id": "Cream",
"token_count": 4453
}
| 294 |
10/22 12:30:18 AM |
10/22 12:30:18 AM | Parameters:
10/22 12:30:18 AM | ALPHA_LR=0.0006
10/22 12:30:18 AM | ALPHA_WEIGHT_DECAY=0.001
10/22 12:30:18 AM | AUX_WEIGHT=0.4
10/22 12:30:18 AM | BATCH_SIZE=128
10/22 12:30:18 AM | CELLS_NUM=3
10/22 12:30:18 AM | CLEAN_ARCH=False
10/22 12:30:18 AM | CUTOUT_LENGTH=16
10/22 12:30:18 AM | DATA_DIR=/data/cifar
10/22 12:30:18 AM | DATA_PATH=./data/
10/22 12:30:18 AM | DATASET=imagenet
10/22 12:30:18 AM | DIST_URL=tcp://127.0.0.1:23456
10/22 12:30:18 AM | DISTRIBUTED=False
10/22 12:30:18 AM | DROP_PATH_PROB=0.2
10/22 12:30:18 AM | ENSEMBLE=False
10/22 12:30:18 AM | GPUS=[0]
10/22 12:30:18 AM | INIT_CHANNELS=16
10/22 12:30:18 AM | INPUT_CHANNELS=3
10/22 12:30:18 AM | LAYER_NUM=3
10/22 12:30:18 AM | LOCAL_RANK=0
10/22 12:30:18 AM | LR_RATIO=0.5
10/22 12:30:18 AM | MODEL_TYPE=cifar
10/22 12:30:18 AM | N_CLASSES=10
10/22 12:30:18 AM | NAME=cifar10-search
10/22 12:30:18 AM | NO_REPEAT=False
10/22 12:30:18 AM | PATH=searchs/cifar10-search
10/22 12:30:18 AM | PLOT_PATH=searchs/cifar10-search/plots
10/22 12:30:18 AM | PRETRAIN_DECAY=5
10/22 12:30:18 AM | PRETRAIN_EPOCHS=5
10/22 12:30:18 AM | PRINT_FREQ=50
10/22 12:30:18 AM | RETRAIN_EPOCHS=25
10/22 12:30:18 AM | RETRAIN_PATH=searchs/cifar10-search/retrains
10/22 12:30:18 AM | RETRAIN_SETTING=0
10/22 12:30:18 AM | RETRAIN_UPDATE_W=False
10/22 12:30:18 AM | SAME_STRUCTURE=False
10/22 12:30:18 AM | SAMPLE_RATIO=0.2
10/22 12:30:18 AM | SEARCH_ITER=5
10/22 12:30:18 AM | SEARCH_ITER_EPOCHS=5
10/22 12:30:18 AM | SEED=0
10/22 12:30:18 AM | SHORT_CONNECT=False
10/22 12:30:18 AM | SYNC_PARAM=False
10/22 12:30:18 AM | TEACHER2STUDENT=False
10/22 12:30:18 AM | TEST_DIR=/data/imagenet/val
10/22 12:30:18 AM | TRAIN_DIR=/data/imagenet/train
10/22 12:30:18 AM | TRAIN_PORTION=0.5
10/22 12:30:18 AM | UNROLLED=False
10/22 12:30:18 AM | USE_BETA=False
10/22 12:30:18 AM | VAL_DIR=/data/imagenet/train
10/22 12:30:18 AM | W_GRAD_CLIP=5.0
10/22 12:30:18 AM | W_LR=0.05
10/22 12:30:18 AM | W_LR_MIN=0.001
10/22 12:30:18 AM | W_MOMENTUM=0.9
10/22 12:30:18 AM | W_WEIGHT_DECAY=0.0003
10/22 12:30:18 AM | WORKERS=4
10/22 12:30:18 AM | WORLD_SIZE=1
10/22 12:30:18 AM |
10/22 12:30:18 AM | Logger is set - training start
10/22 12:31:35 AM |
10/22 12:31:35 AM | Parameters:
10/22 12:31:35 AM | ALPHA_LR=0.0003
10/22 12:31:35 AM | ALPHA_WEIGHT_DECAY=0.001
10/22 12:31:35 AM | AUX_WEIGHT=0.4
10/22 12:31:35 AM | BATCH_SIZE=64
10/22 12:31:35 AM | CELLS_NUM=3
10/22 12:31:35 AM | CLEAN_ARCH=True
10/22 12:31:35 AM | CUTOUT_LENGTH=16
10/22 12:31:35 AM | DATA_DIR=/data/cifar
10/22 12:31:35 AM | DATA_PATH=./data/
10/22 12:31:35 AM | DATASET=cifar10
10/22 12:31:35 AM | DIST_URL='tcp://127.0.0.1:23343'
10/22 12:31:35 AM | DISTRIBUTED=True
10/22 12:31:35 AM | DROP_PATH_PROB=0.2
10/22 12:31:35 AM | ENSEMBLE=True
10/22 12:31:35 AM | GPUS=[0]
10/22 12:31:35 AM | INIT_CHANNELS=16
10/22 12:31:35 AM | INPUT_CHANNELS=3
10/22 12:31:35 AM | LAYER_NUM=3
10/22 12:31:35 AM | LOCAL_RANK=0
10/22 12:31:35 AM | LR_RATIO=0.5
10/22 12:31:35 AM | MODEL_TYPE=cifar
10/22 12:31:35 AM | N_CLASSES=10
10/22 12:31:35 AM | NAME=cifar10-search
10/22 12:31:35 AM | NO_REPEAT=False
10/22 12:31:35 AM | PATH=searchs/cifar10-search
10/22 12:31:35 AM | PLOT_PATH=searchs/cifar10-search/plots
10/22 12:31:35 AM | PRETRAIN_DECAY=0
10/22 12:31:35 AM | PRETRAIN_EPOCHS=0
10/22 12:31:35 AM | PRINT_FREQ=10
10/22 12:31:35 AM | RETRAIN_EPOCHS=1
10/22 12:31:35 AM | RETRAIN_PATH=searchs/cifar10-search/retrains
10/22 12:31:35 AM | RETRAIN_SETTING=0
10/22 12:31:35 AM | RETRAIN_UPDATE_W=True
10/22 12:31:35 AM | SAME_STRUCTURE=True
10/22 12:31:35 AM | SAMPLE_RATIO=0.2
10/22 12:31:35 AM | SEARCH_ITER=25
10/22 12:31:35 AM | SEARCH_ITER_EPOCHS=1
10/22 12:31:35 AM | SEED=0
10/22 12:31:35 AM | SHORT_CONNECT=False
10/22 12:31:35 AM | SYNC_PARAM=True
10/22 12:31:35 AM | TEACHER2STUDENT=True
10/22 12:31:35 AM | TEST_DIR=/data/imagenet/val
10/22 12:31:35 AM | TRAIN_DIR=/data/imagenet/train
10/22 12:31:35 AM | TRAIN_PORTION=0.5
10/22 12:31:35 AM | UNROLLED=False
10/22 12:31:35 AM | USE_BETA=True
10/22 12:31:35 AM | VAL_DIR=/data/imagenet/train
10/22 12:31:35 AM | W_GRAD_CLIP=5.0
10/22 12:31:35 AM | W_LR=0.05
10/22 12:31:35 AM | W_LR_MIN=0.001
10/22 12:31:35 AM | W_MOMENTUM=0.9
10/22 12:31:35 AM | W_WEIGHT_DECAY=0.0003
10/22 12:31:35 AM | WORKERS=1
10/22 12:31:35 AM | WORLD_SIZE=2
10/22 12:31:35 AM |
10/22 12:31:35 AM | Logger is set - training start
10/22 12:31:48 AM |
10/22 12:31:48 AM | Parameters:
10/22 12:31:48 AM | ALPHA_LR=0.0003
10/22 12:31:48 AM | ALPHA_WEIGHT_DECAY=0.001
10/22 12:31:48 AM | AUX_WEIGHT=0.4
10/22 12:31:48 AM | BATCH_SIZE=64
10/22 12:31:48 AM | CELLS_NUM=3
10/22 12:31:48 AM | CLEAN_ARCH=True
10/22 12:31:48 AM | CUTOUT_LENGTH=16
10/22 12:31:48 AM | DATA_DIR=/data/cifar
10/22 12:31:48 AM | DATA_PATH=./data/
10/22 12:31:48 AM | DATASET=cifar10
10/22 12:31:48 AM | DIST_URL=tcp://127.0.0.1:23343
10/22 12:31:48 AM | DISTRIBUTED=True
10/22 12:31:48 AM | DROP_PATH_PROB=0.2
10/22 12:31:48 AM | ENSEMBLE=True
10/22 12:31:48 AM | GPUS=[0]
10/22 12:31:48 AM | INIT_CHANNELS=16
10/22 12:31:48 AM | INPUT_CHANNELS=3
10/22 12:31:48 AM | LAYER_NUM=3
10/22 12:31:48 AM | LOCAL_RANK=0
10/22 12:31:48 AM | LR_RATIO=0.5
10/22 12:31:48 AM | MODEL_TYPE=cifar
10/22 12:31:48 AM | N_CLASSES=10
10/22 12:31:48 AM | NAME=cifar10-search
10/22 12:31:48 AM | NO_REPEAT=False
10/22 12:31:48 AM | PATH=searchs/cifar10-search
10/22 12:31:48 AM | PLOT_PATH=searchs/cifar10-search/plots
10/22 12:31:48 AM | PRETRAIN_DECAY=0
10/22 12:31:48 AM | PRETRAIN_EPOCHS=0
10/22 12:31:48 AM | PRINT_FREQ=10
10/22 12:31:48 AM | RETRAIN_EPOCHS=1
10/22 12:31:48 AM | RETRAIN_PATH=searchs/cifar10-search/retrains
10/22 12:31:48 AM | RETRAIN_SETTING=0
10/22 12:31:48 AM | RETRAIN_UPDATE_W=True
10/22 12:31:48 AM | SAME_STRUCTURE=True
10/22 12:31:48 AM | SAMPLE_RATIO=0.2
10/22 12:31:48 AM | SEARCH_ITER=25
10/22 12:31:48 AM | SEARCH_ITER_EPOCHS=1
10/22 12:31:48 AM | SEED=0
10/22 12:31:48 AM | SHORT_CONNECT=False
10/22 12:31:48 AM | SYNC_PARAM=True
10/22 12:31:48 AM | TEACHER2STUDENT=True
10/22 12:31:48 AM | TEST_DIR=/data/imagenet/val
10/22 12:31:48 AM | TRAIN_DIR=/data/imagenet/train
10/22 12:31:48 AM | TRAIN_PORTION=0.5
10/22 12:31:48 AM | UNROLLED=False
10/22 12:31:48 AM | USE_BETA=True
10/22 12:31:48 AM | VAL_DIR=/data/imagenet/train
10/22 12:31:48 AM | W_GRAD_CLIP=5.0
10/22 12:31:48 AM | W_LR=0.05
10/22 12:31:48 AM | W_LR_MIN=0.001
10/22 12:31:48 AM | W_MOMENTUM=0.9
10/22 12:31:48 AM | W_WEIGHT_DECAY=0.0003
10/22 12:31:48 AM | WORKERS=1
10/22 12:31:48 AM | WORLD_SIZE=2
10/22 12:31:48 AM |
10/22 12:31:48 AM | Logger is set - training start
10/22 12:32:09 AM |
10/22 12:32:09 AM | Parameters:
10/22 12:32:09 AM | ALPHA_LR=0.0003
10/22 12:32:09 AM | ALPHA_WEIGHT_DECAY=0.001
10/22 12:32:09 AM | AUX_WEIGHT=0.4
10/22 12:32:09 AM | BATCH_SIZE=64
10/22 12:32:09 AM | CELLS_NUM=3
10/22 12:32:09 AM | CLEAN_ARCH=True
10/22 12:32:09 AM | CUTOUT_LENGTH=16
10/22 12:32:09 AM | DATA_DIR=/data/cifar
10/22 12:32:09 AM | DATA_PATH=./data/
10/22 12:32:09 AM | DATASET=cifar10
10/22 12:32:09 AM | DIST_URL=tcp://127.0.0.1:23343
10/22 12:32:09 AM | DISTRIBUTED=True
10/22 12:32:09 AM | DROP_PATH_PROB=0.2
10/22 12:32:09 AM | ENSEMBLE=True
10/22 12:32:09 AM | GPUS=[0]
10/22 12:32:09 AM | INIT_CHANNELS=16
10/22 12:32:09 AM | INPUT_CHANNELS=3
10/22 12:32:09 AM | LAYER_NUM=3
10/22 12:32:09 AM | LOCAL_RANK=0
10/22 12:32:09 AM | LR_RATIO=0.5
10/22 12:32:09 AM | MODEL_TYPE=cifar
10/22 12:32:09 AM | N_CLASSES=10
10/22 12:32:09 AM | NAME=cifar10-search
10/22 12:32:09 AM | NO_REPEAT=False
10/22 12:32:09 AM | PATH=searchs/cifar10-search
10/22 12:32:09 AM | PLOT_PATH=searchs/cifar10-search/plots
10/22 12:32:09 AM | PRETRAIN_DECAY=0
10/22 12:32:09 AM | PRETRAIN_EPOCHS=0
10/22 12:32:09 AM | PRINT_FREQ=10
10/22 12:32:09 AM | RETRAIN_EPOCHS=1
10/22 12:32:09 AM | RETRAIN_PATH=searchs/cifar10-search/retrains
10/22 12:32:09 AM | RETRAIN_SETTING=0
10/22 12:32:09 AM | RETRAIN_UPDATE_W=True
10/22 12:32:09 AM | SAME_STRUCTURE=True
10/22 12:32:09 AM | SAMPLE_RATIO=0.2
10/22 12:32:09 AM | SEARCH_ITER=25
10/22 12:32:09 AM | SEARCH_ITER_EPOCHS=1
10/22 12:32:09 AM | SEED=0
10/22 12:32:09 AM | SHORT_CONNECT=False
10/22 12:32:09 AM | SYNC_PARAM=True
10/22 12:32:09 AM | TEACHER2STUDENT=True
10/22 12:32:09 AM | TEST_DIR=/data/imagenet/val
10/22 12:32:09 AM | TRAIN_DIR=/data/imagenet/train
10/22 12:32:09 AM | TRAIN_PORTION=0.5
10/22 12:32:09 AM | UNROLLED=False
10/22 12:32:09 AM | USE_BETA=True
10/22 12:32:09 AM | VAL_DIR=/data/imagenet/train
10/22 12:32:09 AM | W_GRAD_CLIP=5.0
10/22 12:32:09 AM | W_LR=0.05
10/22 12:32:09 AM | W_LR_MIN=0.001
10/22 12:32:09 AM | W_MOMENTUM=0.9
10/22 12:32:09 AM | W_WEIGHT_DECAY=0.0003
10/22 12:32:09 AM | WORKERS=1
10/22 12:32:09 AM | WORLD_SIZE=2
10/22 12:32:09 AM |
10/22 12:32:09 AM | Logger is set - training start
10/22 12:32:53 AM |
10/22 12:32:53 AM | Parameters:
10/22 12:32:53 AM | ALPHA_LR=0.0003
10/22 12:32:53 AM | ALPHA_WEIGHT_DECAY=0.001
10/22 12:32:53 AM | AUX_WEIGHT=0.4
10/22 12:32:53 AM | BATCH_SIZE=64
10/22 12:32:53 AM | CELLS_NUM=3
10/22 12:32:53 AM | CLEAN_ARCH=True
10/22 12:32:53 AM | CUTOUT_LENGTH=16
10/22 12:32:53 AM | DATA_DIR=./cifar
10/22 12:32:53 AM | DATA_PATH=./data/
10/22 12:32:53 AM | DATASET=cifar10
10/22 12:32:53 AM | DIST_URL=tcp://127.0.0.1:23343
10/22 12:32:53 AM | DISTRIBUTED=True
10/22 12:32:53 AM | DROP_PATH_PROB=0.2
10/22 12:32:53 AM | ENSEMBLE=True
10/22 12:32:53 AM | GPUS=[0]
10/22 12:32:53 AM | INIT_CHANNELS=16
10/22 12:32:53 AM | INPUT_CHANNELS=3
10/22 12:32:53 AM | LAYER_NUM=3
10/22 12:32:53 AM | LOCAL_RANK=0
10/22 12:32:53 AM | LR_RATIO=0.5
10/22 12:32:53 AM | MODEL_TYPE=cifar
10/22 12:32:53 AM | N_CLASSES=10
10/22 12:32:53 AM | NAME=cifar10-search
10/22 12:32:53 AM | NO_REPEAT=False
10/22 12:32:53 AM | PATH=searchs/cifar10-search
10/22 12:32:53 AM | PLOT_PATH=searchs/cifar10-search/plots
10/22 12:32:53 AM | PRETRAIN_DECAY=0
10/22 12:32:53 AM | PRETRAIN_EPOCHS=0
10/22 12:32:53 AM | PRINT_FREQ=10
10/22 12:32:53 AM | RETRAIN_EPOCHS=1
10/22 12:32:53 AM | RETRAIN_PATH=searchs/cifar10-search/retrains
10/22 12:32:53 AM | RETRAIN_SETTING=0
10/22 12:32:53 AM | RETRAIN_UPDATE_W=True
10/22 12:32:53 AM | SAME_STRUCTURE=True
10/22 12:32:53 AM | SAMPLE_RATIO=0.2
10/22 12:32:53 AM | SEARCH_ITER=25
10/22 12:32:53 AM | SEARCH_ITER_EPOCHS=1
10/22 12:32:53 AM | SEED=0
10/22 12:32:53 AM | SHORT_CONNECT=False
10/22 12:32:53 AM | SYNC_PARAM=True
10/22 12:32:53 AM | TEACHER2STUDENT=True
10/22 12:32:53 AM | TEST_DIR=/data/imagenet/val
10/22 12:32:53 AM | TRAIN_DIR=/data/imagenet/train
10/22 12:32:53 AM | TRAIN_PORTION=0.5
10/22 12:32:53 AM | UNROLLED=False
10/22 12:32:53 AM | USE_BETA=True
10/22 12:32:53 AM | VAL_DIR=/data/imagenet/train
10/22 12:32:53 AM | W_GRAD_CLIP=5.0
10/22 12:32:53 AM | W_LR=0.05
10/22 12:32:53 AM | W_LR_MIN=0.001
10/22 12:32:53 AM | W_MOMENTUM=0.9
10/22 12:32:53 AM | W_WEIGHT_DECAY=0.0003
10/22 12:32:53 AM | WORKERS=1
10/22 12:32:53 AM | WORLD_SIZE=2
10/22 12:32:53 AM |
10/22 12:32:53 AM | Logger is set - training start
10/22 12:33:48 AM |
10/22 12:33:48 AM | Parameters:
10/22 12:33:48 AM | ALPHA_LR=0.0003
10/22 12:33:48 AM | ALPHA_WEIGHT_DECAY=0.001
10/22 12:33:48 AM | AUX_WEIGHT=0.4
10/22 12:33:48 AM | BATCH_SIZE=64
10/22 12:33:48 AM | CELLS_NUM=3
10/22 12:33:48 AM | CLEAN_ARCH=True
10/22 12:33:48 AM | CUTOUT_LENGTH=16
10/22 12:33:48 AM | DATA_DIR=./cifar
10/22 12:33:48 AM | DATA_PATH=./data/
10/22 12:33:48 AM | DATASET=cifar10
10/22 12:33:48 AM | DIST_URL=tcp://127.0.0.1:23343
10/22 12:33:48 AM | DISTRIBUTED=True
10/22 12:33:48 AM | DROP_PATH_PROB=0.2
10/22 12:33:48 AM | ENSEMBLE=True
10/22 12:33:48 AM | GPUS=[0]
10/22 12:33:48 AM | INIT_CHANNELS=16
10/22 12:33:48 AM | INPUT_CHANNELS=3
10/22 12:33:48 AM | LAYER_NUM=3
10/22 12:33:48 AM | LOCAL_RANK=0
10/22 12:33:48 AM | LR_RATIO=0.5
10/22 12:33:48 AM | MODEL_TYPE=cifar
10/22 12:33:48 AM | N_CLASSES=10
10/22 12:33:48 AM | NAME=cifar10-search
10/22 12:33:48 AM | NO_REPEAT=False
10/22 12:33:48 AM | PATH=searchs/cifar10-search
10/22 12:33:48 AM | PLOT_PATH=searchs/cifar10-search/plots
10/22 12:33:48 AM | PRETRAIN_DECAY=0
10/22 12:33:48 AM | PRETRAIN_EPOCHS=0
10/22 12:33:48 AM | PRINT_FREQ=10
10/22 12:33:48 AM | RETRAIN_EPOCHS=1
10/22 12:33:48 AM | RETRAIN_PATH=searchs/cifar10-search/retrains
10/22 12:33:48 AM | RETRAIN_SETTING=0
10/22 12:33:48 AM | RETRAIN_UPDATE_W=True
10/22 12:33:48 AM | SAME_STRUCTURE=True
10/22 12:33:48 AM | SAMPLE_RATIO=0.2
10/22 12:33:48 AM | SEARCH_ITER=25
10/22 12:33:48 AM | SEARCH_ITER_EPOCHS=1
10/22 12:33:48 AM | SEED=0
10/22 12:33:48 AM | SHORT_CONNECT=False
10/22 12:33:48 AM | SYNC_PARAM=True
10/22 12:33:48 AM | TEACHER2STUDENT=True
10/22 12:33:48 AM | TEST_DIR=/data/imagenet/val
10/22 12:33:48 AM | TRAIN_DIR=/data/imagenet/train
10/22 12:33:48 AM | TRAIN_PORTION=0.5
10/22 12:33:48 AM | UNROLLED=False
10/22 12:33:48 AM | USE_BETA=True
10/22 12:33:48 AM | VAL_DIR=/data/imagenet/train
10/22 12:33:48 AM | W_GRAD_CLIP=5.0
10/22 12:33:48 AM | W_LR=0.05
10/22 12:33:48 AM | W_LR_MIN=0.001
10/22 12:33:48 AM | W_MOMENTUM=0.9
10/22 12:33:48 AM | W_WEIGHT_DECAY=0.0003
10/22 12:33:48 AM | WORKERS=1
10/22 12:33:48 AM | WORLD_SIZE=1
10/22 12:33:48 AM |
10/22 12:33:48 AM | Logger is set - training start
####### ALPHA #######
# Alpha - normal
tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252],
[0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250],
[0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251],
[0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251],
[0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251],
[0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250],
[0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251],
[0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250],
[0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249],
[0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252],
[0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248],
[0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251],
[0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250],
[0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252],
[0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252],
[0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250],
[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250],
[0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251],
[0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5007, 0.4993], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.4997, 0.5003], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250],
[0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250],
[0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251],
[0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251],
[0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251],
[0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249],
[0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250],
[0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249],
[0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]],
device='cuda:0')
tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249],
[0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252],
[0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252],
[0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250],
[0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]],
device='cuda:0')
#####################
####### BETA #######
# Beta - normal
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5002, 0.4998], device='cuda:0')
tensor([0.3336, 0.3333, 0.3331], device='cuda:0')
tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0')
tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0')
#####################
10/22 12:34:41 AM |
10/22 12:34:41 AM | Parameters:
10/22 12:34:41 AM | ALPHA_LR=0.0003
10/22 12:34:41 AM | ALPHA_WEIGHT_DECAY=0.001
10/22 12:34:41 AM | AUX_WEIGHT=0.4
10/22 12:34:41 AM | BATCH_SIZE=64
10/22 12:34:41 AM | CELLS_NUM=3
10/22 12:34:41 AM | CLEAN_ARCH=True
10/22 12:34:41 AM | CUTOUT_LENGTH=16
10/22 12:34:41 AM | DATA_DIR=./cifar
10/22 12:34:41 AM | DATA_PATH=./data/
10/22 12:34:41 AM | DATASET=cifar10
10/22 12:34:41 AM | DIST_URL=tcp://127.0.0.1:23343
10/22 12:34:41 AM | DISTRIBUTED=True
10/22 12:34:41 AM | DROP_PATH_PROB=0.2
10/22 12:34:41 AM | ENSEMBLE=True
10/22 12:34:41 AM | GPUS=[0]
10/22 12:34:41 AM | INIT_CHANNELS=16
10/22 12:34:41 AM | INPUT_CHANNELS=3
10/22 12:34:41 AM | LAYER_NUM=3
10/22 12:34:41 AM | LOCAL_RANK=0
10/22 12:34:41 AM | LR_RATIO=0.5
10/22 12:34:41 AM | MODEL_TYPE=cifar
10/22 12:34:41 AM | N_CLASSES=10
10/22 12:34:41 AM | NAME=cifar10-search
10/22 12:34:41 AM | NO_REPEAT=False
10/22 12:34:41 AM | PATH=searchs/cifar10-search
10/22 12:34:41 AM | PLOT_PATH=searchs/cifar10-search/plots
10/22 12:34:41 AM | PRETRAIN_DECAY=0
10/22 12:34:41 AM | PRETRAIN_EPOCHS=0
10/22 12:34:41 AM | PRINT_FREQ=10
10/22 12:34:41 AM | RETRAIN_EPOCHS=1
10/22 12:34:41 AM | RETRAIN_PATH=searchs/cifar10-search/retrains
10/22 12:34:41 AM | RETRAIN_SETTING=0
10/22 12:34:41 AM | RETRAIN_UPDATE_W=True
10/22 12:34:41 AM | SAME_STRUCTURE=True
10/22 12:34:41 AM | SAMPLE_RATIO=0.2
10/22 12:34:41 AM | SEARCH_ITER=25
10/22 12:34:41 AM | SEARCH_ITER_EPOCHS=1
10/22 12:34:41 AM | SEED=0
10/22 12:34:41 AM | SHORT_CONNECT=False
10/22 12:34:41 AM | SYNC_PARAM=True
10/22 12:34:41 AM | TEACHER2STUDENT=True
10/22 12:34:41 AM | TEST_DIR=/data/imagenet/val
10/22 12:34:41 AM | TRAIN_DIR=/data/imagenet/train
10/22 12:34:41 AM | TRAIN_PORTION=0.5
10/22 12:34:41 AM | UNROLLED=False
10/22 12:34:41 AM | USE_BETA=True
10/22 12:34:41 AM | VAL_DIR=/data/imagenet/train
10/22 12:34:41 AM | W_GRAD_CLIP=5.0
10/22 12:34:41 AM | W_LR=0.05
10/22 12:34:41 AM | W_LR_MIN=0.001
10/22 12:34:41 AM | W_MOMENTUM=0.9
10/22 12:34:41 AM | W_WEIGHT_DECAY=0.0003
10/22 12:34:41 AM | WORKERS=1
10/22 12:34:41 AM | WORLD_SIZE=1
10/22 12:34:41 AM |
10/22 12:34:41 AM | Logger is set - training start
####### ALPHA #######
# Alpha - normal
tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252],
[0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250],
[0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251],
[0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251],
[0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251],
[0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250],
[0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251],
[0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250],
[0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249],
[0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252],
[0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248],
[0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251],
[0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250],
[0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252],
[0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252],
[0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250],
[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250],
[0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251],
[0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5007, 0.4993], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.4997, 0.5003], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250],
[0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250],
[0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251],
[0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251],
[0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251],
[0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249],
[0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250],
[0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249],
[0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]],
device='cuda:0')
tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249],
[0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252],
[0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252],
[0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250],
[0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]],
device='cuda:0')
#####################
####### BETA #######
# Beta - normal
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5002, 0.4998], device='cuda:0')
tensor([0.3336, 0.3333, 0.3331], device='cuda:0')
tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0')
tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0')
#####################
10/22 12:37:14 AM |
10/22 12:37:14 AM | Parameters:
10/22 12:37:14 AM | ALPHA_LR=0.0003
10/22 12:37:14 AM | ALPHA_WEIGHT_DECAY=0.001
10/22 12:37:14 AM | AUX_WEIGHT=0.4
10/22 12:37:14 AM | BATCH_SIZE=64
10/22 12:37:14 AM | CELLS_NUM=3
10/22 12:37:14 AM | CLEAN_ARCH=True
10/22 12:37:14 AM | CUTOUT_LENGTH=16
10/22 12:37:14 AM | DATA_DIR=./cifar
10/22 12:37:14 AM | DATA_PATH=./data/
10/22 12:37:14 AM | DATASET=cifar10
10/22 12:37:14 AM | DIST_URL=tcp://127.0.0.1:23343
10/22 12:37:14 AM | DISTRIBUTED=True
10/22 12:37:14 AM | DROP_PATH_PROB=0.2
10/22 12:37:14 AM | ENSEMBLE=True
10/22 12:37:14 AM | GPUS=[0]
10/22 12:37:14 AM | INIT_CHANNELS=16
10/22 12:37:14 AM | INPUT_CHANNELS=3
10/22 12:37:14 AM | LAYER_NUM=3
10/22 12:37:14 AM | LOCAL_RANK=0
10/22 12:37:14 AM | LR_RATIO=0.5
10/22 12:37:14 AM | MODEL_TYPE=cifar
10/22 12:37:14 AM | N_CLASSES=10
10/22 12:37:14 AM | NAME=cifar10-search
10/22 12:37:14 AM | NO_REPEAT=False
10/22 12:37:14 AM | PATH=searchs/cifar10-search
10/22 12:37:14 AM | PLOT_PATH=searchs/cifar10-search/plots
10/22 12:37:14 AM | PRETRAIN_DECAY=0
10/22 12:37:14 AM | PRETRAIN_EPOCHS=0
10/22 12:37:14 AM | PRINT_FREQ=10
10/22 12:37:14 AM | RETRAIN_EPOCHS=1
10/22 12:37:14 AM | RETRAIN_PATH=searchs/cifar10-search/retrains
10/22 12:37:14 AM | RETRAIN_SETTING=0
10/22 12:37:14 AM | RETRAIN_UPDATE_W=True
10/22 12:37:14 AM | SAME_STRUCTURE=True
10/22 12:37:14 AM | SAMPLE_RATIO=0.2
10/22 12:37:14 AM | SEARCH_ITER=25
10/22 12:37:14 AM | SEARCH_ITER_EPOCHS=1
10/22 12:37:14 AM | SEED=0
10/22 12:37:14 AM | SHORT_CONNECT=False
10/22 12:37:14 AM | SYNC_PARAM=True
10/22 12:37:14 AM | TEACHER2STUDENT=True
10/22 12:37:14 AM | TEST_DIR=/data/imagenet/val
10/22 12:37:14 AM | TRAIN_DIR=/data/imagenet/train
10/22 12:37:14 AM | TRAIN_PORTION=0.5
10/22 12:37:14 AM | UNROLLED=False
10/22 12:37:14 AM | USE_BETA=True
10/22 12:37:14 AM | VAL_DIR=/data/imagenet/train
10/22 12:37:14 AM | W_GRAD_CLIP=5.0
10/22 12:37:14 AM | W_LR=0.05
10/22 12:37:14 AM | W_LR_MIN=0.001
10/22 12:37:14 AM | W_MOMENTUM=0.9
10/22 12:37:14 AM | W_WEIGHT_DECAY=0.0003
10/22 12:37:14 AM | WORKERS=1
10/22 12:37:14 AM | WORLD_SIZE=1
10/22 12:37:14 AM |
10/22 12:37:14 AM | Logger is set - training start
####### ALPHA #######
# Alpha - normal
tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252],
[0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250],
[0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251],
[0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251],
[0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251],
[0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250],
[0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251],
[0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250],
[0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249],
[0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252],
[0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248],
[0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251],
[0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250],
[0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252],
[0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252],
[0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250],
[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250],
[0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251],
[0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5007, 0.4993], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.4997, 0.5003], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250],
[0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250],
[0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251],
[0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251],
[0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251],
[0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249],
[0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250],
[0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249],
[0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]],
device='cuda:0')
tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249],
[0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252],
[0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252],
[0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250],
[0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]],
device='cuda:0')
#####################
####### BETA #######
# Beta - normal
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5002, 0.4998], device='cuda:0')
tensor([0.3336, 0.3333, 0.3331], device='cuda:0')
tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0')
tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0')
#####################
10/22 12:37:47 AM |
10/22 12:37:47 AM | Parameters:
10/22 12:37:47 AM | ALPHA_LR=0.0003
10/22 12:37:47 AM | ALPHA_WEIGHT_DECAY=0.001
10/22 12:37:47 AM | AUX_WEIGHT=0.4
10/22 12:37:47 AM | BATCH_SIZE=64
10/22 12:37:47 AM | CELLS_NUM=3
10/22 12:37:47 AM | CLEAN_ARCH=True
10/22 12:37:47 AM | CUTOUT_LENGTH=16
10/22 12:37:47 AM | DATA_DIR=./cifar
10/22 12:37:47 AM | DATA_PATH=./data/
10/22 12:37:47 AM | DATASET=cifar10
10/22 12:37:47 AM | DIST_URL=tcp://127.0.0.1:23343
10/22 12:37:47 AM | DISTRIBUTED=True
10/22 12:37:47 AM | DROP_PATH_PROB=0.2
10/22 12:37:47 AM | ENSEMBLE=True
10/22 12:37:47 AM | GPUS=[0]
10/22 12:37:47 AM | INIT_CHANNELS=16
10/22 12:37:47 AM | INPUT_CHANNELS=3
10/22 12:37:47 AM | LAYER_NUM=3
10/22 12:37:47 AM | LOCAL_RANK=0
10/22 12:37:47 AM | LR_RATIO=0.5
10/22 12:37:47 AM | MODEL_TYPE=cifar
10/22 12:37:47 AM | N_CLASSES=10
10/22 12:37:47 AM | NAME=cifar10-search
10/22 12:37:47 AM | NO_REPEAT=False
10/22 12:37:47 AM | PATH=searchs/cifar10-search
10/22 12:37:47 AM | PLOT_PATH=searchs/cifar10-search/plots
10/22 12:37:47 AM | PRETRAIN_DECAY=0
10/22 12:37:47 AM | PRETRAIN_EPOCHS=0
10/22 12:37:47 AM | PRINT_FREQ=10
10/22 12:37:47 AM | RETRAIN_EPOCHS=1
10/22 12:37:47 AM | RETRAIN_PATH=searchs/cifar10-search/retrains
10/22 12:37:47 AM | RETRAIN_SETTING=0
10/22 12:37:47 AM | RETRAIN_UPDATE_W=True
10/22 12:37:47 AM | SAME_STRUCTURE=True
10/22 12:37:47 AM | SAMPLE_RATIO=0.2
10/22 12:37:47 AM | SEARCH_ITER=25
10/22 12:37:47 AM | SEARCH_ITER_EPOCHS=1
10/22 12:37:47 AM | SEED=0
10/22 12:37:47 AM | SHORT_CONNECT=False
10/22 12:37:47 AM | SYNC_PARAM=True
10/22 12:37:47 AM | TEACHER2STUDENT=True
10/22 12:37:47 AM | TEST_DIR=/data/imagenet/val
10/22 12:37:47 AM | TRAIN_DIR=/data/imagenet/train
10/22 12:37:47 AM | TRAIN_PORTION=0.5
10/22 12:37:47 AM | UNROLLED=False
10/22 12:37:47 AM | USE_BETA=True
10/22 12:37:47 AM | VAL_DIR=/data/imagenet/train
10/22 12:37:47 AM | W_GRAD_CLIP=5.0
10/22 12:37:47 AM | W_LR=0.05
10/22 12:37:47 AM | W_LR_MIN=0.001
10/22 12:37:47 AM | W_MOMENTUM=0.9
10/22 12:37:47 AM | W_WEIGHT_DECAY=0.0003
10/22 12:37:47 AM | WORKERS=1
10/22 12:37:47 AM | WORLD_SIZE=1
10/22 12:37:47 AM |
10/22 12:37:47 AM | Logger is set - training start
####### ALPHA #######
# Alpha - normal
tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252],
[0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250],
[0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251],
[0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251],
[0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251],
[0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250],
[0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251],
[0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250],
[0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249],
[0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252],
[0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248],
[0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251],
[0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250],
[0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252],
[0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252],
[0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250],
[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250],
[0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251],
[0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5007, 0.4993], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.4997, 0.5003], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250],
[0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250],
[0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251],
[0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251],
[0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251],
[0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249],
[0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250],
[0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249],
[0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]],
device='cuda:0')
tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249],
[0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252],
[0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252],
[0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250],
[0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]],
device='cuda:0')
#####################
####### BETA #######
# Beta - normal
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5002, 0.4998], device='cuda:0')
tensor([0.3336, 0.3333, 0.3331], device='cuda:0')
tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0')
tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0')
#####################
10/22 12:38:25 AM |
10/22 12:38:25 AM | Parameters:
10/22 12:38:25 AM | ALPHA_LR=0.0003
10/22 12:38:25 AM | ALPHA_WEIGHT_DECAY=0.001
10/22 12:38:25 AM | AUX_WEIGHT=0.4
10/22 12:38:25 AM | BATCH_SIZE=64
10/22 12:38:25 AM | CELLS_NUM=3
10/22 12:38:25 AM | CLEAN_ARCH=True
10/22 12:38:25 AM | CUTOUT_LENGTH=16
10/22 12:38:25 AM | DATA_DIR=./cifar
10/22 12:38:25 AM | DATA_PATH=./data/
10/22 12:38:25 AM | DATASET=cifar10
10/22 12:38:25 AM | DIST_URL=tcp://127.0.0.1:23343
10/22 12:38:25 AM | DISTRIBUTED=True
10/22 12:38:25 AM | DROP_PATH_PROB=0.2
10/22 12:38:25 AM | ENSEMBLE=True
10/22 12:38:25 AM | GPUS=[0]
10/22 12:38:25 AM | INIT_CHANNELS=16
10/22 12:38:25 AM | INPUT_CHANNELS=3
10/22 12:38:25 AM | LAYER_NUM=3
10/22 12:38:25 AM | LOCAL_RANK=0
10/22 12:38:25 AM | LR_RATIO=0.5
10/22 12:38:25 AM | MODEL_TYPE=cifar
10/22 12:38:25 AM | N_CLASSES=10
10/22 12:38:25 AM | NAME=cifar10-search
10/22 12:38:25 AM | NO_REPEAT=False
10/22 12:38:25 AM | PATH=searchs/cifar10-search
10/22 12:38:25 AM | PLOT_PATH=searchs/cifar10-search/plots
10/22 12:38:25 AM | PRETRAIN_DECAY=0
10/22 12:38:25 AM | PRETRAIN_EPOCHS=0
10/22 12:38:25 AM | PRINT_FREQ=10
10/22 12:38:25 AM | RETRAIN_EPOCHS=1
10/22 12:38:25 AM | RETRAIN_PATH=searchs/cifar10-search/retrains
10/22 12:38:25 AM | RETRAIN_SETTING=0
10/22 12:38:25 AM | RETRAIN_UPDATE_W=True
10/22 12:38:25 AM | SAME_STRUCTURE=True
10/22 12:38:25 AM | SAMPLE_RATIO=0.2
10/22 12:38:25 AM | SEARCH_ITER=25
10/22 12:38:25 AM | SEARCH_ITER_EPOCHS=1
10/22 12:38:25 AM | SEED=0
10/22 12:38:25 AM | SHORT_CONNECT=False
10/22 12:38:25 AM | SYNC_PARAM=True
10/22 12:38:25 AM | TEACHER2STUDENT=True
10/22 12:38:25 AM | TEST_DIR=/data/imagenet/val
10/22 12:38:25 AM | TRAIN_DIR=/data/imagenet/train
10/22 12:38:25 AM | TRAIN_PORTION=0.5
10/22 12:38:25 AM | UNROLLED=False
10/22 12:38:25 AM | USE_BETA=True
10/22 12:38:25 AM | VAL_DIR=/data/imagenet/train
10/22 12:38:25 AM | W_GRAD_CLIP=5.0
10/22 12:38:25 AM | W_LR=0.05
10/22 12:38:25 AM | W_LR_MIN=0.001
10/22 12:38:25 AM | W_MOMENTUM=0.9
10/22 12:38:25 AM | W_WEIGHT_DECAY=0.0003
10/22 12:38:25 AM | WORKERS=1
10/22 12:38:25 AM | WORLD_SIZE=1
10/22 12:38:25 AM |
10/22 12:38:25 AM | Logger is set - training start
####### ALPHA #######
# Alpha - normal
tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252],
[0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250],
[0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251],
[0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251],
[0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251],
[0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250],
[0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251],
[0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250],
[0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249],
[0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252],
[0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248],
[0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251],
[0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250],
[0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252],
[0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252],
[0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250],
[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250],
[0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251],
[0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5007, 0.4993], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.4997, 0.5003], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250],
[0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250],
[0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251],
[0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251],
[0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251],
[0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249],
[0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250],
[0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249],
[0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]],
device='cuda:0')
tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249],
[0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252],
[0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252],
[0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250],
[0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]],
device='cuda:0')
#####################
####### BETA #######
# Beta - normal
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5002, 0.4998], device='cuda:0')
tensor([0.3336, 0.3333, 0.3331], device='cuda:0')
tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0')
tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0')
#####################
10/22 12:41:07 AM |
10/22 12:41:07 AM | Parameters:
10/22 12:41:07 AM | ALPHA_LR=0.0003
10/22 12:41:07 AM | ALPHA_WEIGHT_DECAY=0.001
10/22 12:41:07 AM | AUX_WEIGHT=0.4
10/22 12:41:07 AM | BATCH_SIZE=64
10/22 12:41:07 AM | CELLS_NUM=3
10/22 12:41:07 AM | CLEAN_ARCH=True
10/22 12:41:07 AM | CUTOUT_LENGTH=16
10/22 12:41:07 AM | DATA_DIR=./cifar
10/22 12:41:07 AM | DATA_PATH=./data/
10/22 12:41:07 AM | DATASET=cifar10
10/22 12:41:07 AM | DIST_URL=tcp://127.0.0.1:23343
10/22 12:41:07 AM | DISTRIBUTED=True
10/22 12:41:07 AM | DROP_PATH_PROB=0.2
10/22 12:41:07 AM | ENSEMBLE=True
10/22 12:41:07 AM | GPUS=[0]
10/22 12:41:07 AM | INIT_CHANNELS=16
10/22 12:41:07 AM | INPUT_CHANNELS=3
10/22 12:41:07 AM | LAYER_NUM=3
10/22 12:41:07 AM | LOCAL_RANK=0
10/22 12:41:07 AM | LR_RATIO=0.5
10/22 12:41:07 AM | MODEL_TYPE=cifar
10/22 12:41:07 AM | N_CLASSES=10
10/22 12:41:07 AM | NAME=cifar10-search
10/22 12:41:07 AM | NO_REPEAT=False
10/22 12:41:07 AM | PATH=searchs/cifar10-search
10/22 12:41:07 AM | PLOT_PATH=searchs/cifar10-search/plots
10/22 12:41:07 AM | PRETRAIN_DECAY=0
10/22 12:41:07 AM | PRETRAIN_EPOCHS=0
10/22 12:41:07 AM | PRINT_FREQ=10
10/22 12:41:07 AM | RETRAIN_EPOCHS=1
10/22 12:41:07 AM | RETRAIN_PATH=searchs/cifar10-search/retrains
10/22 12:41:07 AM | RETRAIN_SETTING=0
10/22 12:41:07 AM | RETRAIN_UPDATE_W=True
10/22 12:41:07 AM | SAME_STRUCTURE=True
10/22 12:41:07 AM | SAMPLE_RATIO=0.2
10/22 12:41:07 AM | SEARCH_ITER=25
10/22 12:41:07 AM | SEARCH_ITER_EPOCHS=1
10/22 12:41:07 AM | SEED=0
10/22 12:41:07 AM | SHORT_CONNECT=False
10/22 12:41:07 AM | SYNC_PARAM=True
10/22 12:41:07 AM | TEACHER2STUDENT=True
10/22 12:41:07 AM | TEST_DIR=/data/imagenet/val
10/22 12:41:07 AM | TRAIN_DIR=/data/imagenet/train
10/22 12:41:07 AM | TRAIN_PORTION=0.5
10/22 12:41:07 AM | UNROLLED=False
10/22 12:41:07 AM | USE_BETA=True
10/22 12:41:07 AM | VAL_DIR=/data/imagenet/train
10/22 12:41:07 AM | W_GRAD_CLIP=5.0
10/22 12:41:07 AM | W_LR=0.05
10/22 12:41:07 AM | W_LR_MIN=0.001
10/22 12:41:07 AM | W_MOMENTUM=0.9
10/22 12:41:07 AM | W_WEIGHT_DECAY=0.0003
10/22 12:41:07 AM | WORKERS=1
10/22 12:41:07 AM | WORLD_SIZE=1
10/22 12:41:07 AM |
10/22 12:41:07 AM | Logger is set - training start
####### ALPHA #######
# Alpha - normal
tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252],
[0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250],
[0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251],
[0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251],
[0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251],
[0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250],
[0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251],
[0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250],
[0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249],
[0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252],
[0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248],
[0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251],
[0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250],
[0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252],
[0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252],
[0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250],
[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250],
[0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251],
[0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5007, 0.4993], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.4997, 0.5003], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250],
[0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250],
[0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251],
[0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251],
[0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251],
[0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249],
[0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250],
[0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249],
[0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]],
device='cuda:0')
tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249],
[0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252],
[0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252],
[0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250],
[0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]],
device='cuda:0')
#####################
####### BETA #######
# Beta - normal
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5002, 0.4998], device='cuda:0')
tensor([0.3336, 0.3333, 0.3331], device='cuda:0')
tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0')
tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0')
#####################
Train: Layer 1/3 Epoch 1/25 Step 000/391 Loss 2.300 Prec@(1,5) (6.2%, 50.0%)
10/22 12:43:33 AM |
10/22 12:43:33 AM | Parameters:
10/22 12:43:33 AM | ALPHA_LR=0.0003
10/22 12:43:33 AM | ALPHA_WEIGHT_DECAY=0.001
10/22 12:43:33 AM | AUX_WEIGHT=0.4
10/22 12:43:33 AM | BATCH_SIZE=64
10/22 12:43:33 AM | CELLS_NUM=3
10/22 12:43:33 AM | CLEAN_ARCH=True
10/22 12:43:33 AM | CUTOUT_LENGTH=16
10/22 12:43:33 AM | DATA_DIR=./cifar
10/22 12:43:33 AM | DATA_PATH=./data/
10/22 12:43:33 AM | DATASET=cifar10
10/22 12:43:33 AM | DIST_URL=tcp://127.0.0.1:23343
10/22 12:43:33 AM | DISTRIBUTED=True
10/22 12:43:33 AM | DROP_PATH_PROB=0.2
10/22 12:43:33 AM | ENSEMBLE=True
10/22 12:43:33 AM | GPUS=[0]
10/22 12:43:33 AM | INIT_CHANNELS=16
10/22 12:43:33 AM | INPUT_CHANNELS=3
10/22 12:43:33 AM | LAYER_NUM=3
10/22 12:43:33 AM | LOCAL_RANK=0
10/22 12:43:33 AM | LR_RATIO=0.5
10/22 12:43:33 AM | MODEL_TYPE=cifar
10/22 12:43:33 AM | N_CLASSES=10
10/22 12:43:33 AM | NAME=cifar10-search
10/22 12:43:33 AM | NO_REPEAT=False
10/22 12:43:33 AM | PATH=searchs/cifar10-search
10/22 12:43:33 AM | PLOT_PATH=searchs/cifar10-search/plots
10/22 12:43:33 AM | PRETRAIN_DECAY=0
10/22 12:43:33 AM | PRETRAIN_EPOCHS=0
10/22 12:43:33 AM | PRINT_FREQ=10
10/22 12:43:33 AM | RETRAIN_EPOCHS=1
10/22 12:43:33 AM | RETRAIN_PATH=searchs/cifar10-search/retrains
10/22 12:43:33 AM | RETRAIN_SETTING=0
10/22 12:43:33 AM | RETRAIN_UPDATE_W=True
10/22 12:43:33 AM | SAME_STRUCTURE=True
10/22 12:43:33 AM | SAMPLE_RATIO=0.2
10/22 12:43:33 AM | SEARCH_ITER=25
10/22 12:43:33 AM | SEARCH_ITER_EPOCHS=1
10/22 12:43:33 AM | SEED=0
10/22 12:43:33 AM | SHORT_CONNECT=False
10/22 12:43:33 AM | SYNC_PARAM=True
10/22 12:43:33 AM | TEACHER2STUDENT=True
10/22 12:43:33 AM | TEST_DIR=/data/imagenet/val
10/22 12:43:33 AM | TRAIN_DIR=/data/imagenet/train
10/22 12:43:33 AM | TRAIN_PORTION=0.5
10/22 12:43:33 AM | UNROLLED=False
10/22 12:43:33 AM | USE_BETA=True
10/22 12:43:33 AM | VAL_DIR=/data/imagenet/train
10/22 12:43:33 AM | W_GRAD_CLIP=5.0
10/22 12:43:33 AM | W_LR=0.05
10/22 12:43:33 AM | W_LR_MIN=0.001
10/22 12:43:33 AM | W_MOMENTUM=0.9
10/22 12:43:33 AM | W_WEIGHT_DECAY=0.0003
10/22 12:43:33 AM | WORKERS=1
10/22 12:43:33 AM | WORLD_SIZE=1
10/22 12:43:33 AM |
10/22 12:43:33 AM | Logger is set - training start
####### ALPHA #######
# Alpha - normal
tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252],
[0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250],
[0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251],
[0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251],
[0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251],
[0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250],
[0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251],
[0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250],
[0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249],
[0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252],
[0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248],
[0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251],
[0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250],
[0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252],
[0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252],
[0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250],
[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250],
[0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251],
[0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5007, 0.4993], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.4997, 0.5003], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250],
[0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250],
[0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251],
[0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251],
[0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251],
[0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249],
[0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250],
[0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249],
[0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]],
device='cuda:0')
tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249],
[0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252],
[0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252],
[0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250],
[0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]],
device='cuda:0')
#####################
####### BETA #######
# Beta - normal
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5002, 0.4998], device='cuda:0')
tensor([0.3336, 0.3333, 0.3331], device='cuda:0')
tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0')
tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0')
#####################
Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%)
Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%)
Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375%
Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Final best Prec@1 = 0.0000%
Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
10/22 12:44:42 AM |
10/22 12:44:42 AM | Parameters:
10/22 12:44:42 AM | ALPHA_LR=0.0003
10/22 12:44:42 AM | ALPHA_WEIGHT_DECAY=0.001
10/22 12:44:42 AM | AUX_WEIGHT=0.4
10/22 12:44:42 AM | BATCH_SIZE=64
10/22 12:44:42 AM | CELLS_NUM=3
10/22 12:44:42 AM | CLEAN_ARCH=True
10/22 12:44:42 AM | CUTOUT_LENGTH=16
10/22 12:44:42 AM | DATA_DIR=./cifar
10/22 12:44:42 AM | DATA_PATH=./data/
10/22 12:44:42 AM | DATASET=cifar10
10/22 12:44:42 AM | DIST_URL=tcp://127.0.0.1:23343
10/22 12:44:42 AM | DISTRIBUTED=True
10/22 12:44:42 AM | DROP_PATH_PROB=0.2
10/22 12:44:42 AM | ENSEMBLE=True
10/22 12:44:42 AM | GPUS=[0]
10/22 12:44:42 AM | INIT_CHANNELS=16
10/22 12:44:42 AM | INPUT_CHANNELS=3
10/22 12:44:42 AM | LAYER_NUM=3
10/22 12:44:42 AM | LOCAL_RANK=0
10/22 12:44:42 AM | LR_RATIO=0.5
10/22 12:44:42 AM | MODEL_TYPE=cifar
10/22 12:44:42 AM | N_CLASSES=10
10/22 12:44:42 AM | NAME=cifar10-search
10/22 12:44:42 AM | NO_REPEAT=False
10/22 12:44:42 AM | PATH=searchs/cifar10-search
10/22 12:44:42 AM | PLOT_PATH=searchs/cifar10-search/plots
10/22 12:44:42 AM | PRETRAIN_DECAY=0
10/22 12:44:42 AM | PRETRAIN_EPOCHS=0
10/22 12:44:42 AM | PRINT_FREQ=10
10/22 12:44:42 AM | RETRAIN_EPOCHS=1
10/22 12:44:42 AM | RETRAIN_PATH=searchs/cifar10-search/retrains
10/22 12:44:42 AM | RETRAIN_SETTING=0
10/22 12:44:42 AM | RETRAIN_UPDATE_W=True
10/22 12:44:42 AM | SAME_STRUCTURE=True
10/22 12:44:42 AM | SAMPLE_RATIO=0.2
10/22 12:44:42 AM | SEARCH_ITER=25
10/22 12:44:42 AM | SEARCH_ITER_EPOCHS=1
10/22 12:44:42 AM | SEED=0
10/22 12:44:42 AM | SHORT_CONNECT=False
10/22 12:44:42 AM | SYNC_PARAM=True
10/22 12:44:42 AM | TEACHER2STUDENT=True
10/22 12:44:42 AM | TEST_DIR=/data/imagenet/val
10/22 12:44:42 AM | TRAIN_DIR=/data/imagenet/train
10/22 12:44:42 AM | TRAIN_PORTION=0.5
10/22 12:44:42 AM | UNROLLED=False
10/22 12:44:42 AM | USE_BETA=True
10/22 12:44:42 AM | VAL_DIR=/data/imagenet/train
10/22 12:44:42 AM | W_GRAD_CLIP=5.0
10/22 12:44:42 AM | W_LR=0.05
10/22 12:44:42 AM | W_LR_MIN=0.001
10/22 12:44:42 AM | W_MOMENTUM=0.9
10/22 12:44:42 AM | W_WEIGHT_DECAY=0.0003
10/22 12:44:42 AM | WORKERS=1
10/22 12:44:42 AM | WORLD_SIZE=1
10/22 12:44:42 AM |
10/22 12:44:42 AM | Logger is set - training start
####### ALPHA #######
# Alpha - normal
tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252],
[0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250],
[0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251],
[0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251],
[0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251],
[0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250],
[0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251],
[0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250],
[0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249],
[0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252],
[0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248],
[0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251],
[0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250],
[0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252],
[0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252],
[0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250],
[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250],
[0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251],
[0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5007, 0.4993], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.4997, 0.5003], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250],
[0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250],
[0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251],
[0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251],
[0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251],
[0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249],
[0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250],
[0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249],
[0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]],
device='cuda:0')
tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249],
[0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252],
[0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252],
[0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250],
[0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]],
device='cuda:0')
#####################
####### BETA #######
# Beta - normal
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5002, 0.4998], device='cuda:0')
tensor([0.3336, 0.3333, 0.3331], device='cuda:0')
tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0')
tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0')
#####################
Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%)
Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%)
Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375%
Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
10/22 12:46:37 AM |
10/22 12:46:37 AM | Parameters:
10/22 12:46:37 AM | ALPHA_LR=0.0003
10/22 12:46:37 AM | ALPHA_WEIGHT_DECAY=0.001
10/22 12:46:37 AM | AUX_WEIGHT=0.4
10/22 12:46:37 AM | BATCH_SIZE=64
10/22 12:46:37 AM | CELLS_NUM=3
10/22 12:46:37 AM | CLEAN_ARCH=True
10/22 12:46:37 AM | CUTOUT_LENGTH=16
10/22 12:46:37 AM | DATA_DIR=./cifar
10/22 12:46:37 AM | DATA_PATH=./data/
10/22 12:46:37 AM | DATASET=cifar10
10/22 12:46:37 AM | DIST_URL=tcp://127.0.0.1:23343
10/22 12:46:37 AM | DISTRIBUTED=True
10/22 12:46:37 AM | DROP_PATH_PROB=0.2
10/22 12:46:37 AM | ENSEMBLE=True
10/22 12:46:37 AM | GPUS=[0]
10/22 12:46:37 AM | INIT_CHANNELS=16
10/22 12:46:37 AM | INPUT_CHANNELS=3
10/22 12:46:37 AM | LAYER_NUM=3
10/22 12:46:37 AM | LOCAL_RANK=0
10/22 12:46:37 AM | LR_RATIO=0.5
10/22 12:46:37 AM | MODEL_TYPE=cifar
10/22 12:46:37 AM | N_CLASSES=10
10/22 12:46:37 AM | NAME=cifar10-search
10/22 12:46:37 AM | NO_REPEAT=False
10/22 12:46:37 AM | PATH=searchs/cifar10-search
10/22 12:46:37 AM | PLOT_PATH=searchs/cifar10-search/plots
10/22 12:46:37 AM | PRETRAIN_DECAY=0
10/22 12:46:37 AM | PRETRAIN_EPOCHS=0
10/22 12:46:37 AM | PRINT_FREQ=10
10/22 12:46:37 AM | RETRAIN_EPOCHS=1
10/22 12:46:37 AM | RETRAIN_PATH=searchs/cifar10-search/retrains
10/22 12:46:37 AM | RETRAIN_SETTING=0
10/22 12:46:37 AM | RETRAIN_UPDATE_W=True
10/22 12:46:37 AM | SAME_STRUCTURE=True
10/22 12:46:37 AM | SAMPLE_RATIO=0.2
10/22 12:46:37 AM | SEARCH_ITER=25
10/22 12:46:37 AM | SEARCH_ITER_EPOCHS=1
10/22 12:46:37 AM | SEED=0
10/22 12:46:37 AM | SHORT_CONNECT=False
10/22 12:46:37 AM | SYNC_PARAM=True
10/22 12:46:37 AM | TEACHER2STUDENT=True
10/22 12:46:37 AM | TEST_DIR=/data/imagenet/val
10/22 12:46:37 AM | TRAIN_DIR=/data/imagenet/train
10/22 12:46:37 AM | TRAIN_PORTION=0.5
10/22 12:46:37 AM | UNROLLED=False
10/22 12:46:37 AM | USE_BETA=True
10/22 12:46:37 AM | VAL_DIR=/data/imagenet/train
10/22 12:46:37 AM | W_GRAD_CLIP=5.0
10/22 12:46:37 AM | W_LR=0.05
10/22 12:46:37 AM | W_LR_MIN=0.001
10/22 12:46:37 AM | W_MOMENTUM=0.9
10/22 12:46:37 AM | W_WEIGHT_DECAY=0.0003
10/22 12:46:37 AM | WORKERS=1
10/22 12:46:37 AM | WORLD_SIZE=1
10/22 12:46:37 AM |
10/22 12:46:37 AM | Logger is set - training start
####### ALPHA #######
# Alpha - normal
tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252],
[0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250],
[0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251],
[0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251],
[0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251],
[0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250],
[0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251],
[0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250],
[0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249],
[0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252],
[0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248],
[0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251],
[0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250],
[0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252],
[0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252],
[0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250],
[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250],
[0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251],
[0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5007, 0.4993], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.4997, 0.5003], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250],
[0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250],
[0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251],
[0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251],
[0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251],
[0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249],
[0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250],
[0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249],
[0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]],
device='cuda:0')
tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249],
[0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252],
[0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252],
[0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250],
[0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]],
device='cuda:0')
#####################
####### BETA #######
# Beta - normal
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5002, 0.4998], device='cuda:0')
tensor([0.3336, 0.3333, 0.3331], device='cuda:0')
tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0')
tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0')
#####################
Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%)
Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%)
Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375%
Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Final best Prec@1 = 0.0000%
Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
10/22 12:47:58 AM |
10/22 12:47:58 AM | Parameters:
10/22 12:47:58 AM | ALPHA_LR=0.0003
10/22 12:47:58 AM | ALPHA_WEIGHT_DECAY=0.001
10/22 12:47:58 AM | AUX_WEIGHT=0.4
10/22 12:47:58 AM | BATCH_SIZE=64
10/22 12:47:58 AM | CELLS_NUM=3
10/22 12:47:58 AM | CLEAN_ARCH=True
10/22 12:47:58 AM | CUTOUT_LENGTH=16
10/22 12:47:58 AM | DATA_DIR=./cifar
10/22 12:47:58 AM | DATA_PATH=./data/
10/22 12:47:58 AM | DATASET=cifar10
10/22 12:47:58 AM | DIST_URL=tcp://127.0.0.1:23343
10/22 12:47:58 AM | DISTRIBUTED=True
10/22 12:47:58 AM | DROP_PATH_PROB=0.2
10/22 12:47:58 AM | ENSEMBLE=True
10/22 12:47:58 AM | GPUS=[0]
10/22 12:47:58 AM | INIT_CHANNELS=16
10/22 12:47:58 AM | INPUT_CHANNELS=3
10/22 12:47:58 AM | LAYER_NUM=3
10/22 12:47:58 AM | LOCAL_RANK=0
10/22 12:47:58 AM | LR_RATIO=0.5
10/22 12:47:58 AM | MODEL_TYPE=cifar
10/22 12:47:58 AM | N_CLASSES=10
10/22 12:47:58 AM | NAME=cifar10-search
10/22 12:47:58 AM | NO_REPEAT=False
10/22 12:47:58 AM | PATH=searchs/cifar10-search
10/22 12:47:58 AM | PLOT_PATH=searchs/cifar10-search/plots
10/22 12:47:58 AM | PRETRAIN_DECAY=0
10/22 12:47:58 AM | PRETRAIN_EPOCHS=0
10/22 12:47:58 AM | PRINT_FREQ=10
10/22 12:47:58 AM | RETRAIN_EPOCHS=1
10/22 12:47:58 AM | RETRAIN_PATH=searchs/cifar10-search/retrains
10/22 12:47:58 AM | RETRAIN_SETTING=0
10/22 12:47:58 AM | RETRAIN_UPDATE_W=True
10/22 12:47:58 AM | SAME_STRUCTURE=True
10/22 12:47:58 AM | SAMPLE_RATIO=0.2
10/22 12:47:58 AM | SEARCH_ITER=25
10/22 12:47:58 AM | SEARCH_ITER_EPOCHS=1
10/22 12:47:58 AM | SEED=0
10/22 12:47:58 AM | SHORT_CONNECT=False
10/22 12:47:58 AM | SYNC_PARAM=True
10/22 12:47:58 AM | TEACHER2STUDENT=True
10/22 12:47:58 AM | TEST_DIR=/data/imagenet/val
10/22 12:47:58 AM | TRAIN_DIR=/data/imagenet/train
10/22 12:47:58 AM | TRAIN_PORTION=0.5
10/22 12:47:58 AM | UNROLLED=False
10/22 12:47:58 AM | USE_BETA=True
10/22 12:47:58 AM | VAL_DIR=/data/imagenet/train
10/22 12:47:58 AM | W_GRAD_CLIP=5.0
10/22 12:47:58 AM | W_LR=0.05
10/22 12:47:58 AM | W_LR_MIN=0.001
10/22 12:47:58 AM | W_MOMENTUM=0.9
10/22 12:47:58 AM | W_WEIGHT_DECAY=0.0003
10/22 12:47:58 AM | WORKERS=1
10/22 12:47:58 AM | WORLD_SIZE=1
10/22 12:47:58 AM |
10/22 12:47:58 AM | Logger is set - training start
####### ALPHA #######
# Alpha - normal
tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252],
[0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250],
[0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251],
[0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251],
[0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251],
[0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250],
[0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251],
[0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250],
[0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249],
[0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252],
[0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248],
[0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251],
[0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250],
[0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252],
[0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252],
[0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250],
[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250],
[0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251],
[0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5007, 0.4993], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.4997, 0.5003], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250],
[0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250],
[0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251],
[0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251],
[0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251],
[0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249],
[0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250],
[0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249],
[0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]],
device='cuda:0')
tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249],
[0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252],
[0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252],
[0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250],
[0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]],
device='cuda:0')
#####################
####### BETA #######
# Beta - normal
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5002, 0.4998], device='cuda:0')
tensor([0.3336, 0.3333, 0.3331], device='cuda:0')
tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0')
tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0')
#####################
Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%)
Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%)
Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375%
Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Final best Prec@1 = 0.0000%
Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
####### ALPHA #######
# Alpha - normal
tensor([[0.1248, 0.1249, 0.1254, 0.1250, 0.1249, 0.1249, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251, 0.1248, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250, 0.1248],
[0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1246, 0.1251],
[0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1251],
[0.1249, 0.1250, 0.1252, 0.1251, 0.1249, 0.1249, 0.1250, 0.1250],
[0.1247, 0.1249, 0.1251, 0.1251, 0.1250, 0.1250, 0.1250, 0.1252],
[0.1249, 0.1250, 0.1249, 0.1251, 0.1249, 0.1251, 0.1249, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1252, 0.1252, 0.1249, 0.1249, 0.1248, 0.1252],
[0.1252, 0.1250, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1249, 0.1249, 0.1252, 0.1250, 0.1252, 0.1249, 0.1248],
[0.1248, 0.1253, 0.1249, 0.1249, 0.1251, 0.1252, 0.1248, 0.1250],
[0.1249, 0.1249, 0.1252, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249],
[0.1251, 0.1250, 0.1249, 0.1248, 0.1250, 0.1251, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1248, 0.1249, 0.1250, 0.1250, 0.1252, 0.1251, 0.1251],
[0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1253, 0.1249, 0.1250],
[0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1248, 0.1251, 0.1249, 0.1251, 0.1250, 0.1252, 0.1250],
[0.1251, 0.1251, 0.1248, 0.1250, 0.1249, 0.1250, 0.1252, 0.1250],
[0.1249, 0.1249, 0.1250, 0.1251, 0.1251, 0.1250, 0.1250, 0.1251],
[0.1249, 0.1250, 0.1250, 0.1250, 0.1252, 0.1252, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1249, 0.1251, 0.1250, 0.1251, 0.1248, 0.1251, 0.1249, 0.1250],
[0.1250, 0.1249, 0.1249, 0.1249, 0.1250, 0.1252, 0.1249, 0.1252],
[0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1251, 0.1251, 0.1248],
[0.1251, 0.1251, 0.1248, 0.1249, 0.1248, 0.1251, 0.1251, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5004, 0.4996], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3332, 0.3329, 0.3339], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2503, 0.2499, 0.2498], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2000, 0.1998, 0.2001, 0.2001], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.4999, 0.5001], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3336, 0.3327, 0.3337], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2501, 0.2499, 0.2502, 0.2498], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2001, 0.2000, 0.1998, 0.2001], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1249, 0.1251, 0.1248, 0.1252, 0.1252, 0.1250, 0.1248, 0.1249],
[0.1250, 0.1251, 0.1250, 0.1248, 0.1248, 0.1250, 0.1252, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1247, 0.1250, 0.1250, 0.1254, 0.1252, 0.1247, 0.1249, 0.1251],
[0.1250, 0.1251, 0.1250, 0.1251, 0.1252, 0.1248, 0.1249, 0.1249],
[0.1251, 0.1251, 0.1250, 0.1248, 0.1251, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1251, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250],
[0.1251, 0.1253, 0.1251, 0.1250, 0.1249, 0.1249, 0.1249, 0.1249],
[0.1250, 0.1249, 0.1251, 0.1250, 0.1247, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1248, 0.1251, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1250, 0.1252],
[0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251, 0.1250, 0.1252],
[0.1249, 0.1249, 0.1249, 0.1253, 0.1251, 0.1251, 0.1249, 0.1249],
[0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250, 0.1249, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1249, 0.1250, 0.1249, 0.1251, 0.1251, 0.1251, 0.1248, 0.1251],
[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1249, 0.1250, 0.1247, 0.1252, 0.1251, 0.1250, 0.1250],
[0.1249, 0.1252, 0.1249, 0.1249, 0.1252, 0.1248, 0.1252, 0.1249],
[0.1249, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1250, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251, 0.1250, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1251, 0.1249],
[0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250],
[0.1249, 0.1252, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1249, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251],
[0.1249, 0.1250, 0.1250, 0.1250, 0.1252, 0.1251, 0.1248, 0.1250],
[0.1248, 0.1249, 0.1250, 0.1249, 0.1249, 0.1251, 0.1251, 0.1252],
[0.1250, 0.1248, 0.1251, 0.1251, 0.1249, 0.1248, 0.1253, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5004, 0.4996], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3333, 0.3338, 0.3329], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2496, 0.2501, 0.2499, 0.2504], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1998, 0.2001, 0.2000, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.4998, 0.5002], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3332, 0.3335, 0.3333], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2499, 0.2498, 0.2498, 0.2504], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1999, 0.2000, 0.2002, 0.1999, 0.1999], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1249, 0.1250],
[0.1248, 0.1248, 0.1248, 0.1252, 0.1250, 0.1251, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1251, 0.1250],
[0.1249, 0.1249, 0.1250, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251],
[0.1251, 0.1249, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1248, 0.1252, 0.1252, 0.1250, 0.1248, 0.1252],
[0.1250, 0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1248, 0.1252],
[0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251],
[0.1247, 0.1249, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1252, 0.1250, 0.1249, 0.1252, 0.1249, 0.1250, 0.1248],
[0.1250, 0.1248, 0.1252, 0.1250, 0.1250, 0.1250, 0.1251, 0.1249],
[0.1250, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251],
[0.1249, 0.1251, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251],
[0.1251, 0.1249, 0.1250, 0.1249, 0.1250, 0.1249, 0.1251, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249],
[0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]],
device='cuda:0')
tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249],
[0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252],
[0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252],
[0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250],
[0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]],
device='cuda:0')
#####################
####### BETA #######
# Beta - normal
tensor([0.5002, 0.4998], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3335, 0.3331, 0.3333], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2504, 0.2498, 0.2497, 0.2502], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2002, 0.1997, 0.1998, 0.2000, 0.2003], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5002, 0.4998], device='cuda:0')
tensor([0.3336, 0.3333, 0.3331], device='cuda:0')
tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0')
tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0')
#####################
Train: Layer 1/3 Epoch 2/25 Step 000/002 Loss 2.273 Prec@(1,5) (18.8%, 60.9%)
Train: Layer 1/3 Epoch 2/25 Step 001/002 Loss 2.270 Prec@(1,5) (18.0%, 55.5%)
Train: Layer 1/3 Epoch 2/25 Final Prec@1 17.9688%
Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('max_pool_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_3x3', 4), ('avg_pool_3x3', 1)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('avg_pool_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('dil_conv_5x5', 3), ('dil_conv_3x3', 1)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_3x3', 2), ('sep_conv_5x5', 1)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('dil_conv_5x5', 4), ('avg_pool_3x3', 0)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Final best Prec@1 = 0.0000%
Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('max_pool_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_3x3', 4), ('avg_pool_3x3', 1)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('avg_pool_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('dil_conv_5x5', 3), ('dil_conv_3x3', 1)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_3x3', 2), ('sep_conv_5x5', 1)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('dil_conv_5x5', 4), ('avg_pool_3x3', 0)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
####### ALPHA #######
# Alpha - normal
tensor([[0.1248, 0.1249, 0.1255, 0.1250, 0.1249, 0.1249, 0.1250, 0.1251],
[0.1249, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1248, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1249, 0.1251, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248],
[0.1249, 0.1249, 0.1251, 0.1251, 0.1249, 0.1252, 0.1247, 0.1251],
[0.1248, 0.1250, 0.1251, 0.1252, 0.1249, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1250, 0.1251],
[0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250],
[0.1246, 0.1248, 0.1251, 0.1250, 0.1251, 0.1251, 0.1251, 0.1252],
[0.1248, 0.1250, 0.1250, 0.1251, 0.1249, 0.1251, 0.1249, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1252, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252],
[0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1249, 0.1250, 0.1252, 0.1249, 0.1252, 0.1249, 0.1249],
[0.1248, 0.1252, 0.1249, 0.1248, 0.1251, 0.1251, 0.1249, 0.1251],
[0.1249, 0.1249, 0.1251, 0.1249, 0.1251, 0.1251, 0.1251, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1253, 0.1251, 0.1249],
[0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1248, 0.1249, 0.1250, 0.1250, 0.1252, 0.1251, 0.1251],
[0.1250, 0.1249, 0.1248, 0.1249, 0.1251, 0.1254, 0.1250, 0.1250],
[0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1248, 0.1250, 0.1248, 0.1252, 0.1251, 0.1251, 0.1251],
[0.1251, 0.1250, 0.1247, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251],
[0.1248, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1250, 0.1251],
[0.1248, 0.1249, 0.1250, 0.1250, 0.1252, 0.1252, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1252, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1248],
[0.1249, 0.1252, 0.1251, 0.1251, 0.1248, 0.1251, 0.1249, 0.1250],
[0.1251, 0.1250, 0.1249, 0.1249, 0.1250, 0.1251, 0.1249, 0.1252],
[0.1251, 0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1251, 0.1248],
[0.1251, 0.1252, 0.1248, 0.1248, 0.1247, 0.1252, 0.1251, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5003, 0.4997], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3334, 0.3329, 0.3337], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2501, 0.2503, 0.2498, 0.2498], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.1999, 0.1998, 0.2002, 0.2002], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.4999, 0.5001], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3335, 0.3328, 0.3337], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2502, 0.2499, 0.2502, 0.2498], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1999, 0.2001, 0.2001, 0.1997, 0.2002], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1249, 0.1251, 0.1249, 0.1253, 0.1252, 0.1250, 0.1248, 0.1249],
[0.1249, 0.1252, 0.1250, 0.1249, 0.1248, 0.1250, 0.1251, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1247, 0.1250, 0.1250, 0.1254, 0.1252, 0.1246, 0.1249, 0.1252],
[0.1250, 0.1251, 0.1250, 0.1251, 0.1252, 0.1248, 0.1248, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1252, 0.1251, 0.1249, 0.1250, 0.1248, 0.1251, 0.1249],
[0.1251, 0.1253, 0.1252, 0.1249, 0.1249, 0.1248, 0.1249, 0.1248],
[0.1251, 0.1250, 0.1250, 0.1251, 0.1247, 0.1251, 0.1250, 0.1251],
[0.1251, 0.1253, 0.1249, 0.1249, 0.1251, 0.1249, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1248, 0.1250, 0.1247, 0.1251, 0.1251, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1247, 0.1251, 0.1249, 0.1250, 0.1252],
[0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1252],
[0.1249, 0.1249, 0.1249, 0.1253, 0.1251, 0.1251, 0.1249, 0.1248],
[0.1250, 0.1248, 0.1252, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1249, 0.1250, 0.1249, 0.1251, 0.1251, 0.1250, 0.1249, 0.1251],
[0.1249, 0.1251, 0.1250, 0.1250, 0.1250, 0.1251, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1249, 0.1250, 0.1247, 0.1252, 0.1252, 0.1249, 0.1250],
[0.1249, 0.1252, 0.1249, 0.1249, 0.1252, 0.1248, 0.1252, 0.1249],
[0.1249, 0.1251, 0.1249, 0.1248, 0.1252, 0.1249, 0.1250, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1251, 0.1249, 0.1249, 0.1249, 0.1250, 0.1251, 0.1251],
[0.1250, 0.1250, 0.1251, 0.1248, 0.1249, 0.1252, 0.1251, 0.1249],
[0.1249, 0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1251],
[0.1249, 0.1252, 0.1248, 0.1252, 0.1251, 0.1249, 0.1252, 0.1248]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1249, 0.1251, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1250, 0.1250, 0.1249, 0.1252, 0.1248, 0.1251],
[0.1249, 0.1250, 0.1250, 0.1251, 0.1253, 0.1251, 0.1247, 0.1250],
[0.1248, 0.1249, 0.1251, 0.1249, 0.1249, 0.1250, 0.1252, 0.1252],
[0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1254, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5002, 0.4998], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3333, 0.3338, 0.3329], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2495, 0.2501, 0.2499, 0.2506], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1998, 0.2002, 0.1999, 0.2002, 0.1999], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.4998, 0.5002], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3332, 0.3335, 0.3333], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2499, 0.2499, 0.2498, 0.2504], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1998, 0.2001, 0.2003, 0.1998, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1249, 0.1251, 0.1250, 0.1253, 0.1248, 0.1250, 0.1249, 0.1251],
[0.1247, 0.1248, 0.1248, 0.1252, 0.1251, 0.1251, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1249, 0.1249, 0.1251, 0.1249, 0.1251, 0.1251, 0.1251],
[0.1249, 0.1249, 0.1249, 0.1251, 0.1252, 0.1249, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1249, 0.1253, 0.1249, 0.1248, 0.1250, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1250, 0.1248, 0.1252, 0.1252, 0.1249, 0.1248, 0.1252],
[0.1250, 0.1249, 0.1250, 0.1251, 0.1251, 0.1250, 0.1248, 0.1252],
[0.1248, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1249, 0.1252],
[0.1246, 0.1249, 0.1250, 0.1250, 0.1251, 0.1252, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1252, 0.1251, 0.1249, 0.1252, 0.1248, 0.1250, 0.1248],
[0.1250, 0.1249, 0.1252, 0.1250, 0.1249, 0.1250, 0.1251, 0.1249],
[0.1249, 0.1250, 0.1249, 0.1251, 0.1249, 0.1251, 0.1251, 0.1251],
[0.1249, 0.1251, 0.1251, 0.1249, 0.1249, 0.1248, 0.1252, 0.1251],
[0.1251, 0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249],
[0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]],
device='cuda:0')
tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249],
[0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252],
[0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252],
[0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250],
[0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]],
device='cuda:0')
#####################
####### BETA #######
# Beta - normal
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3333, 0.3334, 0.3333], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2503, 0.2498, 0.2496, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2003, 0.1997, 0.1998, 0.1998, 0.2004], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5002, 0.4998], device='cuda:0')
tensor([0.3336, 0.3333, 0.3331], device='cuda:0')
tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0')
tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0')
#####################
10/22 12:48:58 AM |
10/22 12:48:58 AM | Parameters:
10/22 12:48:58 AM | ALPHA_LR=0.0003
10/22 12:48:58 AM | ALPHA_WEIGHT_DECAY=0.001
10/22 12:48:58 AM | AUX_WEIGHT=0.4
10/22 12:48:58 AM | BATCH_SIZE=64
10/22 12:48:58 AM | CELLS_NUM=3
10/22 12:48:58 AM | CLEAN_ARCH=True
10/22 12:48:58 AM | CUTOUT_LENGTH=16
10/22 12:48:58 AM | DATA_DIR=./cifar
10/22 12:48:58 AM | DATA_PATH=./data/
10/22 12:48:58 AM | DATASET=cifar10
10/22 12:48:58 AM | DIST_URL=tcp://127.0.0.1:23343
10/22 12:48:58 AM | DISTRIBUTED=True
10/22 12:48:58 AM | DROP_PATH_PROB=0.2
10/22 12:48:58 AM | ENSEMBLE=True
10/22 12:48:58 AM | GPUS=[0]
10/22 12:48:58 AM | INIT_CHANNELS=16
10/22 12:48:58 AM | INPUT_CHANNELS=3
10/22 12:48:58 AM | LAYER_NUM=3
10/22 12:48:58 AM | LOCAL_RANK=0
10/22 12:48:58 AM | LR_RATIO=0.5
10/22 12:48:58 AM | MODEL_TYPE=cifar
10/22 12:48:58 AM | N_CLASSES=10
10/22 12:48:58 AM | NAME=cifar10-search
10/22 12:48:58 AM | NO_REPEAT=False
10/22 12:48:58 AM | PATH=searchs/cifar10-search
10/22 12:48:58 AM | PLOT_PATH=searchs/cifar10-search/plots
10/22 12:48:58 AM | PRETRAIN_DECAY=0
10/22 12:48:58 AM | PRETRAIN_EPOCHS=0
10/22 12:48:58 AM | PRINT_FREQ=10
10/22 12:48:58 AM | RETRAIN_EPOCHS=1
10/22 12:48:58 AM | RETRAIN_PATH=searchs/cifar10-search/retrains
10/22 12:48:58 AM | RETRAIN_SETTING=0
10/22 12:48:58 AM | RETRAIN_UPDATE_W=True
10/22 12:48:58 AM | SAME_STRUCTURE=True
10/22 12:48:58 AM | SAMPLE_RATIO=0.2
10/22 12:48:58 AM | SEARCH_ITER=25
10/22 12:48:58 AM | SEARCH_ITER_EPOCHS=1
10/22 12:48:58 AM | SEED=0
10/22 12:48:58 AM | SHORT_CONNECT=False
10/22 12:48:58 AM | SYNC_PARAM=True
10/22 12:48:58 AM | TEACHER2STUDENT=True
10/22 12:48:58 AM | TEST_DIR=/data/imagenet/val
10/22 12:48:58 AM | TRAIN_DIR=/data/imagenet/train
10/22 12:48:58 AM | TRAIN_PORTION=0.5
10/22 12:48:58 AM | UNROLLED=False
10/22 12:48:58 AM | USE_BETA=True
10/22 12:48:58 AM | VAL_DIR=/data/imagenet/train
10/22 12:48:58 AM | W_GRAD_CLIP=5.0
10/22 12:48:58 AM | W_LR=0.05
10/22 12:48:58 AM | W_LR_MIN=0.001
10/22 12:48:58 AM | W_MOMENTUM=0.9
10/22 12:48:58 AM | W_WEIGHT_DECAY=0.0003
10/22 12:48:58 AM | WORKERS=1
10/22 12:48:58 AM | WORLD_SIZE=1
10/22 12:48:58 AM |
10/22 12:48:58 AM | Logger is set - training start
####### ALPHA #######
# Alpha - normal
tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252],
[0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250],
[0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251],
[0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251],
[0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251],
[0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250],
[0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251],
[0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250],
[0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249],
[0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252],
[0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248],
[0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251],
[0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250],
[0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252],
[0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252],
[0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250],
[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250],
[0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251],
[0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5007, 0.4993], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.4997, 0.5003], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250],
[0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250],
[0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251],
[0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251],
[0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251],
[0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249],
[0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250],
[0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249],
[0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]],
device='cuda:0')
tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249],
[0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252],
[0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252],
[0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250],
[0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]],
device='cuda:0')
#####################
####### BETA #######
# Beta - normal
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5002, 0.4998], device='cuda:0')
tensor([0.3336, 0.3333, 0.3331], device='cuda:0')
tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0')
tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0')
#####################
10/22 12:49:16 AM |
10/22 12:49:16 AM | Parameters:
10/22 12:49:16 AM | ALPHA_LR=0.0003
10/22 12:49:16 AM | ALPHA_WEIGHT_DECAY=0.001
10/22 12:49:16 AM | AUX_WEIGHT=0.4
10/22 12:49:16 AM | BATCH_SIZE=64
10/22 12:49:16 AM | CELLS_NUM=3
10/22 12:49:16 AM | CLEAN_ARCH=True
10/22 12:49:16 AM | CUTOUT_LENGTH=16
10/22 12:49:16 AM | DATA_DIR=./cifar
10/22 12:49:16 AM | DATA_PATH=./data/
10/22 12:49:16 AM | DATASET=cifar10
10/22 12:49:16 AM | DIST_URL=tcp://127.0.0.1:23343
10/22 12:49:16 AM | DISTRIBUTED=True
10/22 12:49:16 AM | DROP_PATH_PROB=0.2
10/22 12:49:16 AM | ENSEMBLE=True
10/22 12:49:16 AM | GPUS=[0]
10/22 12:49:16 AM | INIT_CHANNELS=16
10/22 12:49:16 AM | INPUT_CHANNELS=3
10/22 12:49:16 AM | LAYER_NUM=3
10/22 12:49:16 AM | LOCAL_RANK=0
10/22 12:49:16 AM | LR_RATIO=0.5
10/22 12:49:16 AM | MODEL_TYPE=cifar
10/22 12:49:16 AM | N_CLASSES=10
10/22 12:49:16 AM | NAME=cifar10-search
10/22 12:49:16 AM | NO_REPEAT=False
10/22 12:49:16 AM | PATH=searchs/cifar10-search
10/22 12:49:16 AM | PLOT_PATH=searchs/cifar10-search/plots
10/22 12:49:16 AM | PRETRAIN_DECAY=0
10/22 12:49:16 AM | PRETRAIN_EPOCHS=0
10/22 12:49:16 AM | PRINT_FREQ=10
10/22 12:49:16 AM | RETRAIN_EPOCHS=1
10/22 12:49:16 AM | RETRAIN_PATH=searchs/cifar10-search/retrains
10/22 12:49:16 AM | RETRAIN_SETTING=0
10/22 12:49:16 AM | RETRAIN_UPDATE_W=True
10/22 12:49:16 AM | SAME_STRUCTURE=True
10/22 12:49:16 AM | SAMPLE_RATIO=0.2
10/22 12:49:16 AM | SEARCH_ITER=25
10/22 12:49:16 AM | SEARCH_ITER_EPOCHS=1
10/22 12:49:16 AM | SEED=0
10/22 12:49:16 AM | SHORT_CONNECT=False
10/22 12:49:16 AM | SYNC_PARAM=True
10/22 12:49:16 AM | TEACHER2STUDENT=True
10/22 12:49:16 AM | TEST_DIR=/data/imagenet/val
10/22 12:49:16 AM | TRAIN_DIR=/data/imagenet/train
10/22 12:49:16 AM | TRAIN_PORTION=0.5
10/22 12:49:16 AM | UNROLLED=False
10/22 12:49:16 AM | USE_BETA=True
10/22 12:49:16 AM | VAL_DIR=/data/imagenet/train
10/22 12:49:16 AM | W_GRAD_CLIP=5.0
10/22 12:49:16 AM | W_LR=0.05
10/22 12:49:16 AM | W_LR_MIN=0.001
10/22 12:49:16 AM | W_MOMENTUM=0.9
10/22 12:49:16 AM | W_WEIGHT_DECAY=0.0003
10/22 12:49:16 AM | WORKERS=1
10/22 12:49:16 AM | WORLD_SIZE=1
10/22 12:49:16 AM |
10/22 12:49:16 AM | Logger is set - training start
####### ALPHA #######
# Alpha - normal
tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252],
[0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250],
[0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251],
[0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251],
[0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251],
[0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250],
[0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251],
[0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250],
[0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249],
[0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252],
[0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248],
[0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251],
[0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250],
[0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252],
[0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252],
[0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250],
[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250],
[0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251],
[0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5007, 0.4993], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.4997, 0.5003], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250],
[0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250],
[0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251],
[0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251],
[0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251],
[0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249],
[0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250],
[0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249],
[0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]],
device='cuda:0')
tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249],
[0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252],
[0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252],
[0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250],
[0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]],
device='cuda:0')
#####################
####### BETA #######
# Beta - normal
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5002, 0.4998], device='cuda:0')
tensor([0.3336, 0.3333, 0.3331], device='cuda:0')
tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0')
tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0')
#####################
Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%)
Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%)
Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375%
Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Final best Prec@1 = 0.0000%
Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
10/22 12:49:46 AM |
10/22 12:49:46 AM | Parameters:
10/22 12:49:46 AM | ALPHA_LR=0.0003
10/22 12:49:46 AM | ALPHA_WEIGHT_DECAY=0.001
10/22 12:49:46 AM | AUX_WEIGHT=0.4
10/22 12:49:46 AM | BATCH_SIZE=64
10/22 12:49:46 AM | CELLS_NUM=3
10/22 12:49:46 AM | CLEAN_ARCH=True
10/22 12:49:46 AM | CUTOUT_LENGTH=16
10/22 12:49:46 AM | DATA_DIR=./cifar
10/22 12:49:46 AM | DATA_PATH=./data/
10/22 12:49:46 AM | DATASET=cifar10
10/22 12:49:46 AM | DIST_URL=tcp://127.0.0.1:23343
10/22 12:49:46 AM | DISTRIBUTED=True
10/22 12:49:46 AM | DROP_PATH_PROB=0.2
10/22 12:49:46 AM | ENSEMBLE=True
10/22 12:49:46 AM | GPUS=[0]
10/22 12:49:46 AM | INIT_CHANNELS=16
10/22 12:49:46 AM | INPUT_CHANNELS=3
10/22 12:49:46 AM | LAYER_NUM=3
10/22 12:49:46 AM | LOCAL_RANK=0
10/22 12:49:46 AM | LR_RATIO=0.5
10/22 12:49:46 AM | MODEL_TYPE=cifar
10/22 12:49:46 AM | N_CLASSES=10
10/22 12:49:46 AM | NAME=cifar10-search
10/22 12:49:46 AM | NO_REPEAT=False
10/22 12:49:46 AM | PATH=searchs/cifar10-search
10/22 12:49:46 AM | PLOT_PATH=searchs/cifar10-search/plots
10/22 12:49:46 AM | PRETRAIN_DECAY=0
10/22 12:49:46 AM | PRETRAIN_EPOCHS=0
10/22 12:49:46 AM | PRINT_FREQ=10
10/22 12:49:46 AM | RETRAIN_EPOCHS=1
10/22 12:49:46 AM | RETRAIN_PATH=searchs/cifar10-search/retrains
10/22 12:49:46 AM | RETRAIN_SETTING=0
10/22 12:49:46 AM | RETRAIN_UPDATE_W=True
10/22 12:49:46 AM | SAME_STRUCTURE=True
10/22 12:49:46 AM | SAMPLE_RATIO=0.2
10/22 12:49:46 AM | SEARCH_ITER=25
10/22 12:49:46 AM | SEARCH_ITER_EPOCHS=1
10/22 12:49:46 AM | SEED=0
10/22 12:49:46 AM | SHORT_CONNECT=False
10/22 12:49:46 AM | SYNC_PARAM=True
10/22 12:49:46 AM | TEACHER2STUDENT=True
10/22 12:49:46 AM | TEST_DIR=/data/imagenet/val
10/22 12:49:46 AM | TRAIN_DIR=/data/imagenet/train
10/22 12:49:46 AM | TRAIN_PORTION=0.5
10/22 12:49:46 AM | UNROLLED=False
10/22 12:49:46 AM | USE_BETA=True
10/22 12:49:46 AM | VAL_DIR=/data/imagenet/train
10/22 12:49:46 AM | W_GRAD_CLIP=5.0
10/22 12:49:46 AM | W_LR=0.05
10/22 12:49:46 AM | W_LR_MIN=0.001
10/22 12:49:46 AM | W_MOMENTUM=0.9
10/22 12:49:46 AM | W_WEIGHT_DECAY=0.0003
10/22 12:49:46 AM | WORKERS=1
10/22 12:49:46 AM | WORLD_SIZE=1
10/22 12:49:46 AM |
10/22 12:49:46 AM | Logger is set - training start
####### ALPHA #######
# Alpha - normal
tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252],
[0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250],
[0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251],
[0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251],
[0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251],
[0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250],
[0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251],
[0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250],
[0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249],
[0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252],
[0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248],
[0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251],
[0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250],
[0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252],
[0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252],
[0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250],
[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250],
[0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251],
[0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5007, 0.4993], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.4997, 0.5003], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250],
[0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250],
[0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251],
[0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251],
[0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251],
[0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249],
[0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250],
[0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249],
[0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]],
device='cuda:0')
tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249],
[0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252],
[0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252],
[0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250],
[0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]],
device='cuda:0')
#####################
####### BETA #######
# Beta - normal
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5002, 0.4998], device='cuda:0')
tensor([0.3336, 0.3333, 0.3331], device='cuda:0')
tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0')
tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0')
#####################
Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%)
Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%)
Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375%
Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Final best Prec@1 = 0.0000%
Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
10/22 12:51:31 AM |
10/22 12:51:31 AM | Parameters:
10/22 12:51:31 AM | ALPHA_LR=0.0003
10/22 12:51:31 AM | ALPHA_WEIGHT_DECAY=0.001
10/22 12:51:31 AM | AUX_WEIGHT=0.4
10/22 12:51:31 AM | BATCH_SIZE=64
10/22 12:51:31 AM | CELLS_NUM=3
10/22 12:51:31 AM | CLEAN_ARCH=True
10/22 12:51:31 AM | CUTOUT_LENGTH=16
10/22 12:51:31 AM | DATA_DIR=./cifar
10/22 12:51:31 AM | DATA_PATH=./data/
10/22 12:51:31 AM | DATASET=cifar10
10/22 12:51:31 AM | DIST_URL=tcp://127.0.0.1:23343
10/22 12:51:31 AM | DISTRIBUTED=True
10/22 12:51:31 AM | DROP_PATH_PROB=0.2
10/22 12:51:31 AM | ENSEMBLE=True
10/22 12:51:31 AM | GPUS=[0]
10/22 12:51:31 AM | INIT_CHANNELS=16
10/22 12:51:31 AM | INPUT_CHANNELS=3
10/22 12:51:31 AM | LAYER_NUM=3
10/22 12:51:31 AM | LOCAL_RANK=0
10/22 12:51:31 AM | LR_RATIO=0.5
10/22 12:51:31 AM | MODEL_TYPE=cifar
10/22 12:51:31 AM | N_CLASSES=10
10/22 12:51:31 AM | NAME=cifar10-search
10/22 12:51:31 AM | NO_REPEAT=False
10/22 12:51:31 AM | PATH=searchs/cifar10-search
10/22 12:51:31 AM | PLOT_PATH=searchs/cifar10-search/plots
10/22 12:51:31 AM | PRETRAIN_DECAY=0
10/22 12:51:31 AM | PRETRAIN_EPOCHS=0
10/22 12:51:31 AM | PRINT_FREQ=10
10/22 12:51:31 AM | RETRAIN_EPOCHS=1
10/22 12:51:31 AM | RETRAIN_PATH=searchs/cifar10-search/retrains
10/22 12:51:31 AM | RETRAIN_SETTING=0
10/22 12:51:31 AM | RETRAIN_UPDATE_W=True
10/22 12:51:31 AM | SAME_STRUCTURE=True
10/22 12:51:31 AM | SAMPLE_RATIO=0.2
10/22 12:51:31 AM | SEARCH_ITER=25
10/22 12:51:31 AM | SEARCH_ITER_EPOCHS=1
10/22 12:51:31 AM | SEED=0
10/22 12:51:31 AM | SHORT_CONNECT=False
10/22 12:51:31 AM | SYNC_PARAM=True
10/22 12:51:31 AM | TEACHER2STUDENT=True
10/22 12:51:31 AM | TEST_DIR=/data/imagenet/val
10/22 12:51:31 AM | TRAIN_DIR=/data/imagenet/train
10/22 12:51:31 AM | TRAIN_PORTION=0.5
10/22 12:51:31 AM | UNROLLED=False
10/22 12:51:31 AM | USE_BETA=True
10/22 12:51:31 AM | VAL_DIR=/data/imagenet/train
10/22 12:51:31 AM | W_GRAD_CLIP=5.0
10/22 12:51:31 AM | W_LR=0.05
10/22 12:51:31 AM | W_LR_MIN=0.001
10/22 12:51:31 AM | W_MOMENTUM=0.9
10/22 12:51:31 AM | W_WEIGHT_DECAY=0.0003
10/22 12:51:31 AM | WORKERS=1
10/22 12:51:31 AM | WORLD_SIZE=1
10/22 12:51:31 AM |
10/22 12:51:31 AM | Logger is set - training start
####### ALPHA #######
# Alpha - normal
tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252],
[0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250],
[0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251],
[0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251],
[0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251],
[0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250],
[0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251],
[0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250],
[0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249],
[0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252],
[0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248],
[0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251],
[0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250],
[0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252],
[0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252],
[0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250],
[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250],
[0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251],
[0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5007, 0.4993], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.4997, 0.5003], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250],
[0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250],
[0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251],
[0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251],
[0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251],
[0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249],
[0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250],
[0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249],
[0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]],
device='cuda:0')
tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249],
[0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252],
[0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252],
[0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250],
[0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]],
device='cuda:0')
#####################
####### BETA #######
# Beta - normal
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5002, 0.4998], device='cuda:0')
tensor([0.3336, 0.3333, 0.3331], device='cuda:0')
tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0')
tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0')
#####################
Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%)
Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%)
Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375%
Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Final best Prec@1 = 0.0000%
Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
10/22 12:52:27 AM |
10/22 12:52:27 AM | Parameters:
10/22 12:52:27 AM | ALPHA_LR=0.0003
10/22 12:52:27 AM | ALPHA_WEIGHT_DECAY=0.001
10/22 12:52:27 AM | AUX_WEIGHT=0.4
10/22 12:52:27 AM | BATCH_SIZE=64
10/22 12:52:27 AM | CELLS_NUM=3
10/22 12:52:27 AM | CLEAN_ARCH=True
10/22 12:52:27 AM | CUTOUT_LENGTH=16
10/22 12:52:27 AM | DATA_DIR=./cifar
10/22 12:52:27 AM | DATA_PATH=./data/
10/22 12:52:27 AM | DATASET=cifar10
10/22 12:52:27 AM | DIST_URL=tcp://127.0.0.1:23343
10/22 12:52:27 AM | DISTRIBUTED=True
10/22 12:52:27 AM | DROP_PATH_PROB=0.2
10/22 12:52:27 AM | ENSEMBLE=True
10/22 12:52:27 AM | GPUS=[0]
10/22 12:52:27 AM | INIT_CHANNELS=16
10/22 12:52:27 AM | INPUT_CHANNELS=3
10/22 12:52:27 AM | LAYER_NUM=3
10/22 12:52:27 AM | LOCAL_RANK=0
10/22 12:52:27 AM | LR_RATIO=0.5
10/22 12:52:27 AM | MODEL_TYPE=cifar
10/22 12:52:27 AM | N_CLASSES=10
10/22 12:52:27 AM | NAME=cifar10-search
10/22 12:52:27 AM | NO_REPEAT=False
10/22 12:52:27 AM | PATH=searchs/cifar10-search
10/22 12:52:27 AM | PLOT_PATH=searchs/cifar10-search/plots
10/22 12:52:27 AM | PRETRAIN_DECAY=0
10/22 12:52:27 AM | PRETRAIN_EPOCHS=0
10/22 12:52:27 AM | PRINT_FREQ=10
10/22 12:52:27 AM | RETRAIN_EPOCHS=1
10/22 12:52:27 AM | RETRAIN_PATH=searchs/cifar10-search/retrains
10/22 12:52:27 AM | RETRAIN_SETTING=0
10/22 12:52:27 AM | RETRAIN_UPDATE_W=True
10/22 12:52:27 AM | SAME_STRUCTURE=True
10/22 12:52:27 AM | SAMPLE_RATIO=0.2
10/22 12:52:27 AM | SEARCH_ITER=25
10/22 12:52:27 AM | SEARCH_ITER_EPOCHS=1
10/22 12:52:27 AM | SEED=0
10/22 12:52:27 AM | SHORT_CONNECT=False
10/22 12:52:27 AM | SYNC_PARAM=True
10/22 12:52:27 AM | TEACHER2STUDENT=True
10/22 12:52:27 AM | TEST_DIR=/data/imagenet/val
10/22 12:52:27 AM | TRAIN_DIR=/data/imagenet/train
10/22 12:52:27 AM | TRAIN_PORTION=0.5
10/22 12:52:27 AM | UNROLLED=False
10/22 12:52:27 AM | USE_BETA=True
10/22 12:52:27 AM | VAL_DIR=/data/imagenet/train
10/22 12:52:27 AM | W_GRAD_CLIP=5.0
10/22 12:52:27 AM | W_LR=0.05
10/22 12:52:27 AM | W_LR_MIN=0.001
10/22 12:52:27 AM | W_MOMENTUM=0.9
10/22 12:52:27 AM | W_WEIGHT_DECAY=0.0003
10/22 12:52:27 AM | WORKERS=1
10/22 12:52:27 AM | WORLD_SIZE=1
10/22 12:52:27 AM |
10/22 12:52:27 AM | Logger is set - training start
####### ALPHA #######
# Alpha - normal
tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252],
[0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250],
[0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251],
[0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251],
[0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251],
[0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250],
[0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251],
[0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250],
[0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249],
[0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252],
[0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248],
[0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251],
[0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250],
[0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252],
[0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252],
[0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250],
[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250],
[0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251],
[0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5007, 0.4993], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.4997, 0.5003], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250],
[0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250],
[0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251],
[0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251],
[0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251],
[0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249],
[0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250],
[0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249],
[0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]],
device='cuda:0')
tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249],
[0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252],
[0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252],
[0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250],
[0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]],
device='cuda:0')
#####################
####### BETA #######
# Beta - normal
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5002, 0.4998], device='cuda:0')
tensor([0.3336, 0.3333, 0.3331], device='cuda:0')
tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0')
tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0')
#####################
Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%)
Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%)
Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375%
Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Final best Prec@1 = 0.0000%
Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
10/22 12:54:00 AM |
10/22 12:54:00 AM | Parameters:
10/22 12:54:00 AM | ALPHA_LR=0.0003
10/22 12:54:00 AM | ALPHA_WEIGHT_DECAY=0.001
10/22 12:54:00 AM | AUX_WEIGHT=0.4
10/22 12:54:00 AM | BATCH_SIZE=64
10/22 12:54:00 AM | CELLS_NUM=3
10/22 12:54:00 AM | CLEAN_ARCH=True
10/22 12:54:00 AM | CUTOUT_LENGTH=16
10/22 12:54:00 AM | DATA_DIR=./cifar
10/22 12:54:00 AM | DATA_PATH=./data/
10/22 12:54:00 AM | DATASET=cifar10
10/22 12:54:00 AM | DIST_URL=tcp://127.0.0.1:23343
10/22 12:54:00 AM | DISTRIBUTED=True
10/22 12:54:00 AM | DROP_PATH_PROB=0.2
10/22 12:54:00 AM | ENSEMBLE=True
10/22 12:54:00 AM | GPUS=[0]
10/22 12:54:00 AM | INIT_CHANNELS=16
10/22 12:54:00 AM | INPUT_CHANNELS=3
10/22 12:54:00 AM | LAYER_NUM=3
10/22 12:54:00 AM | LOCAL_RANK=0
10/22 12:54:00 AM | LR_RATIO=0.5
10/22 12:54:00 AM | MODEL_TYPE=cifar
10/22 12:54:00 AM | N_CLASSES=10
10/22 12:54:00 AM | NAME=cifar10-search
10/22 12:54:00 AM | NO_REPEAT=False
10/22 12:54:00 AM | PATH=searchs/cifar10-search
10/22 12:54:00 AM | PLOT_PATH=searchs/cifar10-search/plots
10/22 12:54:00 AM | PRETRAIN_DECAY=0
10/22 12:54:00 AM | PRETRAIN_EPOCHS=0
10/22 12:54:00 AM | PRINT_FREQ=10
10/22 12:54:00 AM | RETRAIN_EPOCHS=1
10/22 12:54:00 AM | RETRAIN_PATH=searchs/cifar10-search/retrains
10/22 12:54:00 AM | RETRAIN_SETTING=0
10/22 12:54:00 AM | RETRAIN_UPDATE_W=True
10/22 12:54:00 AM | SAME_STRUCTURE=True
10/22 12:54:00 AM | SAMPLE_RATIO=0.2
10/22 12:54:00 AM | SEARCH_ITER=25
10/22 12:54:00 AM | SEARCH_ITER_EPOCHS=1
10/22 12:54:00 AM | SEED=0
10/22 12:54:00 AM | SHORT_CONNECT=False
10/22 12:54:00 AM | SYNC_PARAM=True
10/22 12:54:00 AM | TEACHER2STUDENT=True
10/22 12:54:00 AM | TEST_DIR=/data/imagenet/val
10/22 12:54:00 AM | TRAIN_DIR=/data/imagenet/train
10/22 12:54:00 AM | TRAIN_PORTION=0.5
10/22 12:54:00 AM | UNROLLED=False
10/22 12:54:00 AM | USE_BETA=True
10/22 12:54:00 AM | VAL_DIR=/data/imagenet/train
10/22 12:54:00 AM | W_GRAD_CLIP=5.0
10/22 12:54:00 AM | W_LR=0.05
10/22 12:54:00 AM | W_LR_MIN=0.001
10/22 12:54:00 AM | W_MOMENTUM=0.9
10/22 12:54:00 AM | W_WEIGHT_DECAY=0.0003
10/22 12:54:00 AM | WORKERS=1
10/22 12:54:00 AM | WORLD_SIZE=1
10/22 12:54:00 AM |
10/22 12:54:00 AM | Logger is set - training start
####### ALPHA #######
# Alpha - normal
tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252],
[0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250],
[0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251],
[0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251],
[0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251],
[0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250],
[0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251],
[0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250],
[0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249],
[0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252],
[0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248],
[0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251],
[0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250],
[0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252],
[0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252],
[0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250],
[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250],
[0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251],
[0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5007, 0.4993], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.4997, 0.5003], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250],
[0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250],
[0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251],
[0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251],
[0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251],
[0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249],
[0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250],
[0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249],
[0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]],
device='cuda:0')
tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249],
[0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252],
[0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252],
[0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250],
[0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]],
device='cuda:0')
#####################
####### BETA #######
# Beta - normal
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5002, 0.4998], device='cuda:0')
tensor([0.3336, 0.3333, 0.3331], device='cuda:0')
tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0')
tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0')
#####################
Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%)
Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%)
Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375%
Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Final best Prec@1 = 0.0000%
Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Retrain Epoch 0 LR 0.05
10/22 12:55:34 AM |
10/22 12:55:34 AM | Parameters:
10/22 12:55:34 AM | ALPHA_LR=0.0003
10/22 12:55:34 AM | ALPHA_WEIGHT_DECAY=0.001
10/22 12:55:34 AM | AUX_WEIGHT=0.4
10/22 12:55:34 AM | BATCH_SIZE=64
10/22 12:55:34 AM | CELLS_NUM=3
10/22 12:55:34 AM | CLEAN_ARCH=True
10/22 12:55:34 AM | CUTOUT_LENGTH=16
10/22 12:55:34 AM | DATA_DIR=./cifar
10/22 12:55:34 AM | DATA_PATH=./data/
10/22 12:55:34 AM | DATASET=cifar10
10/22 12:55:34 AM | DIST_URL=tcp://127.0.0.1:23343
10/22 12:55:34 AM | DISTRIBUTED=True
10/22 12:55:34 AM | DROP_PATH_PROB=0.2
10/22 12:55:34 AM | ENSEMBLE=True
10/22 12:55:34 AM | GPUS=[0]
10/22 12:55:34 AM | INIT_CHANNELS=16
10/22 12:55:34 AM | INPUT_CHANNELS=3
10/22 12:55:34 AM | LAYER_NUM=3
10/22 12:55:34 AM | LOCAL_RANK=0
10/22 12:55:34 AM | LR_RATIO=0.5
10/22 12:55:34 AM | MODEL_TYPE=cifar
10/22 12:55:34 AM | N_CLASSES=10
10/22 12:55:34 AM | NAME=cifar10-search
10/22 12:55:34 AM | NO_REPEAT=False
10/22 12:55:34 AM | PATH=searchs/cifar10-search
10/22 12:55:34 AM | PLOT_PATH=searchs/cifar10-search/plots
10/22 12:55:34 AM | PRETRAIN_DECAY=0
10/22 12:55:34 AM | PRETRAIN_EPOCHS=0
10/22 12:55:34 AM | PRINT_FREQ=10
10/22 12:55:34 AM | RETRAIN_EPOCHS=1
10/22 12:55:34 AM | RETRAIN_PATH=searchs/cifar10-search/retrains
10/22 12:55:34 AM | RETRAIN_SETTING=0
10/22 12:55:34 AM | RETRAIN_UPDATE_W=True
10/22 12:55:34 AM | SAME_STRUCTURE=True
10/22 12:55:34 AM | SAMPLE_RATIO=0.2
10/22 12:55:34 AM | SEARCH_ITER=25
10/22 12:55:34 AM | SEARCH_ITER_EPOCHS=1
10/22 12:55:34 AM | SEED=0
10/22 12:55:34 AM | SHORT_CONNECT=False
10/22 12:55:34 AM | SYNC_PARAM=True
10/22 12:55:34 AM | TEACHER2STUDENT=True
10/22 12:55:34 AM | TEST_DIR=/data/imagenet/val
10/22 12:55:34 AM | TRAIN_DIR=/data/imagenet/train
10/22 12:55:34 AM | TRAIN_MAIN_FIRST=False
10/22 12:55:34 AM | TRAIN_PORTION=0.5
10/22 12:55:34 AM | UNROLLED=False
10/22 12:55:34 AM | USE_BETA=True
10/22 12:55:34 AM | VAL_DIR=/data/imagenet/train
10/22 12:55:34 AM | W_GRAD_CLIP=5.0
10/22 12:55:34 AM | W_LR=0.05
10/22 12:55:34 AM | W_LR_MIN=0.001
10/22 12:55:34 AM | W_MOMENTUM=0.9
10/22 12:55:34 AM | W_WEIGHT_DECAY=0.0003
10/22 12:55:34 AM | WORKERS=1
10/22 12:55:34 AM | WORLD_SIZE=1
10/22 12:55:34 AM |
10/22 12:55:34 AM | Logger is set - training start
####### ALPHA #######
# Alpha - normal
tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252],
[0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250],
[0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251],
[0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251],
[0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251],
[0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250],
[0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251],
[0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250],
[0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249],
[0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252],
[0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248],
[0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251],
[0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250],
[0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252],
[0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252],
[0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250],
[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250],
[0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251],
[0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5007, 0.4993], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.4997, 0.5003], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250],
[0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250],
[0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251],
[0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251],
[0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251],
[0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249],
[0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250],
[0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249],
[0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]],
device='cuda:0')
tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249],
[0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252],
[0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252],
[0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250],
[0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]],
device='cuda:0')
#####################
####### BETA #######
# Beta - normal
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5002, 0.4998], device='cuda:0')
tensor([0.3336, 0.3333, 0.3331], device='cuda:0')
tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0')
tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0')
#####################
Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%)
Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%)
Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375%
Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Final best Prec@1 = 0.0000%
Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Retrain Epoch 0 LR 0.05
10/22 12:57:28 AM |
10/22 12:57:28 AM | Parameters:
10/22 12:57:28 AM | ALPHA_LR=0.0003
10/22 12:57:28 AM | ALPHA_WEIGHT_DECAY=0.001
10/22 12:57:28 AM | AUX_WEIGHT=0.4
10/22 12:57:28 AM | BATCH_SIZE=64
10/22 12:57:28 AM | CELLS_NUM=3
10/22 12:57:28 AM | CLEAN_ARCH=True
10/22 12:57:28 AM | CUTOUT_LENGTH=16
10/22 12:57:28 AM | DATA_DIR=./cifar
10/22 12:57:28 AM | DATA_PATH=./data/
10/22 12:57:28 AM | DATASET=cifar10
10/22 12:57:28 AM | DIST_URL=tcp://127.0.0.1:23343
10/22 12:57:28 AM | DISTRIBUTED=True
10/22 12:57:28 AM | DROP_PATH_PROB=0.2
10/22 12:57:28 AM | ENSEMBLE=True
10/22 12:57:28 AM | GPUS=[0]
10/22 12:57:28 AM | INIT_CHANNELS=16
10/22 12:57:28 AM | INPUT_CHANNELS=3
10/22 12:57:28 AM | LAYER_NUM=3
10/22 12:57:28 AM | LOCAL_RANK=0
10/22 12:57:28 AM | LR_RATIO=0.5
10/22 12:57:28 AM | MODEL_TYPE=cifar
10/22 12:57:28 AM | N_CLASSES=10
10/22 12:57:28 AM | NAME=cifar10-search
10/22 12:57:28 AM | NO_REPEAT=False
10/22 12:57:28 AM | PATH=searchs/cifar10-search
10/22 12:57:28 AM | PLOT_PATH=searchs/cifar10-search/plots
10/22 12:57:28 AM | PRETRAIN_DECAY=0
10/22 12:57:28 AM | PRETRAIN_EPOCHS=0
10/22 12:57:28 AM | PRINT_FREQ=10
10/22 12:57:28 AM | RETRAIN_EPOCHS=1
10/22 12:57:28 AM | RETRAIN_PATH=searchs/cifar10-search/retrains
10/22 12:57:28 AM | RETRAIN_SETTING=0
10/22 12:57:28 AM | RETRAIN_UPDATE_W=True
10/22 12:57:28 AM | SAME_STRUCTURE=True
10/22 12:57:28 AM | SAMPLE_RATIO=0.2
10/22 12:57:28 AM | SEARCH_ITER=25
10/22 12:57:28 AM | SEARCH_ITER_EPOCHS=1
10/22 12:57:28 AM | SEED=0
10/22 12:57:28 AM | SHORT_CONNECT=False
10/22 12:57:28 AM | SYNC_PARAM=True
10/22 12:57:28 AM | TEACHER2STUDENT=True
10/22 12:57:28 AM | TEST_DIR=/data/imagenet/val
10/22 12:57:28 AM | TRAIN_DIR=/data/imagenet/train
10/22 12:57:28 AM | TRAIN_MAIN_FIRST=False
10/22 12:57:28 AM | TRAIN_PORTION=0.5
10/22 12:57:28 AM | UNROLLED=False
10/22 12:57:28 AM | USE_BETA=True
10/22 12:57:28 AM | VAL_DIR=/data/imagenet/train
10/22 12:57:28 AM | W_GRAD_CLIP=5.0
10/22 12:57:28 AM | W_LR=0.05
10/22 12:57:28 AM | W_LR_MIN=0.001
10/22 12:57:28 AM | W_MOMENTUM=0.9
10/22 12:57:28 AM | W_WEIGHT_DECAY=0.0003
10/22 12:57:28 AM | WORKERS=1
10/22 12:57:28 AM | WORLD_SIZE=1
10/22 12:57:28 AM |
10/22 12:57:28 AM | Logger is set - training start
####### ALPHA #######
# Alpha - normal
tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252],
[0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250],
[0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251],
[0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251],
[0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251],
[0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250],
[0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251],
[0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250],
[0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249],
[0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252],
[0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248],
[0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251],
[0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250],
[0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252],
[0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252],
[0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250],
[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250],
[0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251],
[0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5007, 0.4993], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.4997, 0.5003], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250],
[0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250],
[0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251],
[0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251],
[0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251],
[0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249],
[0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250],
[0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249],
[0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]],
device='cuda:0')
tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249],
[0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252],
[0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252],
[0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250],
[0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]],
device='cuda:0')
#####################
####### BETA #######
# Beta - normal
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5002, 0.4998], device='cuda:0')
tensor([0.3336, 0.3333, 0.3331], device='cuda:0')
tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0')
tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0')
#####################
Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%)
Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%)
Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375%
Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Final best Prec@1 = 0.0000%
Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Retrain Epoch 0 LR 0.05
10/22 12:58:12 AM |
10/22 12:58:12 AM | Parameters:
10/22 12:58:12 AM | ALPHA_LR=0.0003
10/22 12:58:12 AM | ALPHA_WEIGHT_DECAY=0.001
10/22 12:58:12 AM | AUX_WEIGHT=0.4
10/22 12:58:12 AM | BATCH_SIZE=64
10/22 12:58:12 AM | CELLS_NUM=3
10/22 12:58:12 AM | CLEAN_ARCH=True
10/22 12:58:12 AM | CUTOUT_LENGTH=16
10/22 12:58:12 AM | DATA_DIR=./cifar
10/22 12:58:12 AM | DATA_PATH=./data/
10/22 12:58:12 AM | DATASET=cifar10
10/22 12:58:12 AM | DIST_URL=tcp://127.0.0.1:23343
10/22 12:58:12 AM | DISTRIBUTED=True
10/22 12:58:12 AM | DROP_PATH_PROB=0.2
10/22 12:58:12 AM | ENSEMBLE=True
10/22 12:58:12 AM | GPUS=[0]
10/22 12:58:12 AM | INIT_CHANNELS=16
10/22 12:58:12 AM | INPUT_CHANNELS=3
10/22 12:58:12 AM | LAYER_NUM=3
10/22 12:58:12 AM | LOCAL_RANK=0
10/22 12:58:12 AM | LR_RATIO=0.5
10/22 12:58:12 AM | MODEL_TYPE=cifar
10/22 12:58:12 AM | N_CLASSES=10
10/22 12:58:12 AM | NAME=cifar10-search
10/22 12:58:12 AM | NO_REPEAT=False
10/22 12:58:12 AM | PATH=searchs/cifar10-search
10/22 12:58:12 AM | PLOT_PATH=searchs/cifar10-search/plots
10/22 12:58:12 AM | PRETRAIN_DECAY=0
10/22 12:58:12 AM | PRETRAIN_EPOCHS=0
10/22 12:58:12 AM | PRINT_FREQ=10
10/22 12:58:12 AM | RETRAIN_EPOCHS=1
10/22 12:58:12 AM | RETRAIN_PATH=searchs/cifar10-search/retrains
10/22 12:58:12 AM | RETRAIN_SETTING=0
10/22 12:58:12 AM | RETRAIN_UPDATE_W=True
10/22 12:58:12 AM | SAME_STRUCTURE=True
10/22 12:58:12 AM | SAMPLE_RATIO=0.2
10/22 12:58:12 AM | SEARCH_ITER=25
10/22 12:58:12 AM | SEARCH_ITER_EPOCHS=1
10/22 12:58:12 AM | SEED=0
10/22 12:58:12 AM | SHORT_CONNECT=False
10/22 12:58:12 AM | SYNC_PARAM=True
10/22 12:58:12 AM | TEACHER2STUDENT=True
10/22 12:58:12 AM | TEST_DIR=/data/imagenet/val
10/22 12:58:12 AM | TRAIN_DIR=/data/imagenet/train
10/22 12:58:12 AM | TRAIN_MAIN_FIRST=False
10/22 12:58:12 AM | TRAIN_PORTION=0.5
10/22 12:58:12 AM | UNROLLED=False
10/22 12:58:12 AM | USE_BETA=True
10/22 12:58:12 AM | VAL_DIR=/data/imagenet/train
10/22 12:58:12 AM | W_GRAD_CLIP=5.0
10/22 12:58:12 AM | W_LR=0.05
10/22 12:58:12 AM | W_LR_MIN=0.001
10/22 12:58:12 AM | W_MOMENTUM=0.9
10/22 12:58:12 AM | W_WEIGHT_DECAY=0.0003
10/22 12:58:12 AM | WORKERS=1
10/22 12:58:12 AM | WORLD_SIZE=1
10/22 12:58:12 AM |
10/22 12:58:12 AM | Logger is set - training start
####### ALPHA #######
# Alpha - normal
tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252],
[0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250],
[0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251],
[0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251],
[0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251],
[0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250],
[0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251],
[0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250],
[0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249],
[0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252],
[0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248],
[0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251],
[0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250],
[0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252],
[0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252],
[0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250],
[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250],
[0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251],
[0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5007, 0.4993], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.4997, 0.5003], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250],
[0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250],
[0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251],
[0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251],
[0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251],
[0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249],
[0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250],
[0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249],
[0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]],
device='cuda:0')
tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249],
[0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252],
[0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252],
[0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250],
[0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]],
device='cuda:0')
#####################
####### BETA #######
# Beta - normal
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5002, 0.4998], device='cuda:0')
tensor([0.3336, 0.3333, 0.3331], device='cuda:0')
tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0')
tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0')
#####################
Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%)
Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%)
Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375%
Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Final best Prec@1 = 0.0000%
Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Retrain Epoch 0 LR 0.05
10/22 12:59:06 AM |
10/22 12:59:06 AM | Parameters:
10/22 12:59:06 AM | ALPHA_LR=0.0003
10/22 12:59:06 AM | ALPHA_WEIGHT_DECAY=0.001
10/22 12:59:06 AM | AUX_WEIGHT=0.4
10/22 12:59:06 AM | BATCH_SIZE=64
10/22 12:59:06 AM | CELLS_NUM=3
10/22 12:59:06 AM | CLEAN_ARCH=True
10/22 12:59:06 AM | CUTOUT_LENGTH=16
10/22 12:59:06 AM | DATA_DIR=./cifar
10/22 12:59:06 AM | DATA_PATH=./data/
10/22 12:59:06 AM | DATASET=cifar10
10/22 12:59:06 AM | DIST_URL=tcp://127.0.0.1:23343
10/22 12:59:06 AM | DISTRIBUTED=True
10/22 12:59:06 AM | DROP_PATH_PROB=0.2
10/22 12:59:06 AM | ENSEMBLE=True
10/22 12:59:06 AM | GPUS=[0]
10/22 12:59:06 AM | INIT_CHANNELS=16
10/22 12:59:06 AM | INPUT_CHANNELS=3
10/22 12:59:06 AM | LAYER_NUM=3
10/22 12:59:06 AM | LOCAL_RANK=0
10/22 12:59:06 AM | LR_RATIO=0.5
10/22 12:59:06 AM | MODEL_TYPE=cifar
10/22 12:59:06 AM | N_CLASSES=10
10/22 12:59:06 AM | NAME=cifar10-search
10/22 12:59:06 AM | NO_REPEAT=False
10/22 12:59:06 AM | PATH=searchs/cifar10-search
10/22 12:59:06 AM | PLOT_PATH=searchs/cifar10-search/plots
10/22 12:59:06 AM | PRETRAIN_DECAY=0
10/22 12:59:06 AM | PRETRAIN_EPOCHS=0
10/22 12:59:06 AM | PRINT_FREQ=10
10/22 12:59:06 AM | RETRAIN_EPOCHS=1
10/22 12:59:06 AM | RETRAIN_PATH=searchs/cifar10-search/retrains
10/22 12:59:06 AM | RETRAIN_SETTING=0
10/22 12:59:06 AM | RETRAIN_UPDATE_W=True
10/22 12:59:06 AM | SAME_STRUCTURE=True
10/22 12:59:06 AM | SAMPLE_RATIO=0.2
10/22 12:59:06 AM | SEARCH_ITER=25
10/22 12:59:06 AM | SEARCH_ITER_EPOCHS=1
10/22 12:59:06 AM | SEED=0
10/22 12:59:06 AM | SHORT_CONNECT=False
10/22 12:59:06 AM | SYNC_PARAM=True
10/22 12:59:06 AM | TEACHER2STUDENT=True
10/22 12:59:06 AM | TEST_DIR=/data/imagenet/val
10/22 12:59:06 AM | TRAIN_DIR=/data/imagenet/train
10/22 12:59:06 AM | TRAIN_MAIN_FIRST=False
10/22 12:59:06 AM | TRAIN_PORTION=0.5
10/22 12:59:06 AM | UNROLLED=False
10/22 12:59:06 AM | USE_BETA=True
10/22 12:59:06 AM | VAL_DIR=/data/imagenet/train
10/22 12:59:06 AM | W_GRAD_CLIP=5.0
10/22 12:59:06 AM | W_LR=0.05
10/22 12:59:06 AM | W_LR_MIN=0.001
10/22 12:59:06 AM | W_MOMENTUM=0.9
10/22 12:59:06 AM | W_WEIGHT_DECAY=0.0003
10/22 12:59:06 AM | WORKERS=1
10/22 12:59:06 AM | WORLD_SIZE=1
10/22 12:59:06 AM |
10/22 12:59:06 AM | Logger is set - training start
####### ALPHA #######
# Alpha - normal
tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252],
[0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250],
[0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251],
[0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251],
[0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251],
[0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250],
[0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251],
[0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250],
[0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249],
[0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252],
[0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248],
[0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251],
[0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250],
[0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252],
[0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252],
[0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250],
[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250],
[0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251],
[0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5007, 0.4993], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.4997, 0.5003], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250],
[0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250],
[0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251],
[0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251],
[0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251],
[0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249],
[0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250],
[0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249],
[0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]],
device='cuda:0')
tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249],
[0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252],
[0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252],
[0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250],
[0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]],
device='cuda:0')
#####################
####### BETA #######
# Beta - normal
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5002, 0.4998], device='cuda:0')
tensor([0.3336, 0.3333, 0.3331], device='cuda:0')
tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0')
tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0')
#####################
Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%)
Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%)
Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375%
Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Final best Prec@1 = 0.0000%
Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Retrain Epoch 0 LR 0.05
10/22 01:00:02 AM |
10/22 01:00:02 AM | Parameters:
10/22 01:00:02 AM | ALPHA_LR=0.0003
10/22 01:00:02 AM | ALPHA_WEIGHT_DECAY=0.001
10/22 01:00:02 AM | AUX_WEIGHT=0.4
10/22 01:00:02 AM | BATCH_SIZE=64
10/22 01:00:02 AM | CELLS_NUM=3
10/22 01:00:02 AM | CLEAN_ARCH=True
10/22 01:00:02 AM | CUTOUT_LENGTH=16
10/22 01:00:02 AM | DATA_DIR=./cifar
10/22 01:00:02 AM | DATA_PATH=./data/
10/22 01:00:02 AM | DATASET=cifar10
10/22 01:00:02 AM | DIST_URL=tcp://127.0.0.1:23343
10/22 01:00:02 AM | DISTRIBUTED=True
10/22 01:00:02 AM | DROP_PATH_PROB=0.2
10/22 01:00:02 AM | ENSEMBLE=True
10/22 01:00:02 AM | GPUS=[0]
10/22 01:00:02 AM | INIT_CHANNELS=16
10/22 01:00:02 AM | INPUT_CHANNELS=3
10/22 01:00:02 AM | LAYER_NUM=3
10/22 01:00:02 AM | LOCAL_RANK=0
10/22 01:00:02 AM | LR_RATIO=0.5
10/22 01:00:02 AM | MODEL_TYPE=cifar
10/22 01:00:02 AM | N_CLASSES=10
10/22 01:00:02 AM | NAME=cifar10-search
10/22 01:00:02 AM | NO_REPEAT=False
10/22 01:00:02 AM | PATH=searchs/cifar10-search
10/22 01:00:02 AM | PLOT_PATH=searchs/cifar10-search/plots
10/22 01:00:02 AM | PRETRAIN_DECAY=0
10/22 01:00:02 AM | PRETRAIN_EPOCHS=0
10/22 01:00:02 AM | PRINT_FREQ=10
10/22 01:00:02 AM | RETRAIN_EPOCHS=1
10/22 01:00:02 AM | RETRAIN_PATH=searchs/cifar10-search/retrains
10/22 01:00:02 AM | RETRAIN_SETTING=0
10/22 01:00:02 AM | RETRAIN_UPDATE_W=True
10/22 01:00:02 AM | SAME_STRUCTURE=True
10/22 01:00:02 AM | SAMPLE_RATIO=0.2
10/22 01:00:02 AM | SEARCH_ITER=25
10/22 01:00:02 AM | SEARCH_ITER_EPOCHS=1
10/22 01:00:02 AM | SEED=0
10/22 01:00:02 AM | SHORT_CONNECT=False
10/22 01:00:02 AM | SYNC_PARAM=True
10/22 01:00:02 AM | TEACHER2STUDENT=True
10/22 01:00:02 AM | TEST_DIR=/data/imagenet/val
10/22 01:00:02 AM | TRAIN_DIR=/data/imagenet/train
10/22 01:00:02 AM | TRAIN_MAIN_FIRST=False
10/22 01:00:02 AM | TRAIN_PORTION=0.5
10/22 01:00:02 AM | UNROLLED=False
10/22 01:00:02 AM | USE_BETA=True
10/22 01:00:02 AM | VAL_DIR=/data/imagenet/train
10/22 01:00:02 AM | W_GRAD_CLIP=5.0
10/22 01:00:02 AM | W_LR=0.05
10/22 01:00:02 AM | W_LR_MIN=0.001
10/22 01:00:02 AM | W_MOMENTUM=0.9
10/22 01:00:02 AM | W_WEIGHT_DECAY=0.0003
10/22 01:00:02 AM | WORKERS=1
10/22 01:00:02 AM | WORLD_SIZE=1
10/22 01:00:02 AM |
10/22 01:00:02 AM | Logger is set - training start
####### ALPHA #######
# Alpha - normal
tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252],
[0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250],
[0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251],
[0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251],
[0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251],
[0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250],
[0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251],
[0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250],
[0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249],
[0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252],
[0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248],
[0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251],
[0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250],
[0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252],
[0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252],
[0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250],
[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250],
[0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251],
[0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5007, 0.4993], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.4997, 0.5003], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250],
[0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250],
[0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251],
[0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251],
[0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251],
[0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249],
[0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250],
[0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249],
[0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]],
device='cuda:0')
tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249],
[0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252],
[0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252],
[0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250],
[0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]],
device='cuda:0')
#####################
####### BETA #######
# Beta - normal
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5002, 0.4998], device='cuda:0')
tensor([0.3336, 0.3333, 0.3331], device='cuda:0')
tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0')
tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0')
#####################
Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%)
Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%)
Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375%
Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Final best Prec@1 = 0.0000%
Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Retrain Epoch 0 LR 0.05
Retrain: Layer 1/3 Epoch 1/25 Step 000/002 Loss 3.463 Loss_distill 1.154 Prec@(1,5) (9.4%, 48.4%)
Retrain: Layer 1/3 Epoch 1/25 Final Prec@1 10.1562%
10/22 01:01:30 AM |
10/22 01:01:30 AM | Parameters:
10/22 01:01:30 AM | ALPHA_LR=0.0003
10/22 01:01:30 AM | ALPHA_WEIGHT_DECAY=0.001
10/22 01:01:30 AM | AUX_WEIGHT=0.4
10/22 01:01:30 AM | BATCH_SIZE=64
10/22 01:01:30 AM | CELLS_NUM=3
10/22 01:01:30 AM | CLEAN_ARCH=True
10/22 01:01:30 AM | CUTOUT_LENGTH=16
10/22 01:01:30 AM | DATA_DIR=./cifar
10/22 01:01:30 AM | DATA_PATH=./data/
10/22 01:01:30 AM | DATASET=cifar10
10/22 01:01:30 AM | DIST_URL=tcp://127.0.0.1:23343
10/22 01:01:30 AM | DISTRIBUTED=True
10/22 01:01:30 AM | DROP_PATH_PROB=0.2
10/22 01:01:30 AM | ENSEMBLE=True
10/22 01:01:30 AM | GPUS=[0]
10/22 01:01:30 AM | INIT_CHANNELS=16
10/22 01:01:30 AM | INPUT_CHANNELS=3
10/22 01:01:30 AM | LAYER_NUM=3
10/22 01:01:30 AM | LOCAL_RANK=0
10/22 01:01:30 AM | LR_RATIO=0.5
10/22 01:01:30 AM | MODEL_TYPE=cifar
10/22 01:01:30 AM | N_CLASSES=10
10/22 01:01:30 AM | NAME=cifar10-search
10/22 01:01:30 AM | NO_REPEAT=False
10/22 01:01:30 AM | PATH=searchs/cifar10-search
10/22 01:01:30 AM | PLOT_PATH=searchs/cifar10-search/plots
10/22 01:01:30 AM | PRETRAIN_DECAY=0
10/22 01:01:30 AM | PRETRAIN_EPOCHS=0
10/22 01:01:30 AM | PRINT_FREQ=10
10/22 01:01:30 AM | RETRAIN_EPOCHS=1
10/22 01:01:30 AM | RETRAIN_PATH=searchs/cifar10-search/retrains
10/22 01:01:30 AM | RETRAIN_SETTING=0
10/22 01:01:30 AM | RETRAIN_UPDATE_W=True
10/22 01:01:30 AM | SAME_STRUCTURE=True
10/22 01:01:30 AM | SAMPLE_RATIO=0.2
10/22 01:01:30 AM | SEARCH_ITER=25
10/22 01:01:30 AM | SEARCH_ITER_EPOCHS=1
10/22 01:01:30 AM | SEED=0
10/22 01:01:30 AM | SHORT_CONNECT=False
10/22 01:01:30 AM | SYNC_PARAM=True
10/22 01:01:30 AM | TEACHER2STUDENT=True
10/22 01:01:30 AM | TEST_DIR=/data/imagenet/val
10/22 01:01:30 AM | TRAIN_DIR=/data/imagenet/train
10/22 01:01:30 AM | TRAIN_MAIN_FIRST=False
10/22 01:01:30 AM | TRAIN_PORTION=0.5
10/22 01:01:30 AM | UNROLLED=False
10/22 01:01:30 AM | USE_BETA=True
10/22 01:01:30 AM | VAL_DIR=/data/imagenet/train
10/22 01:01:30 AM | W_GRAD_CLIP=5.0
10/22 01:01:30 AM | W_LR=0.05
10/22 01:01:30 AM | W_LR_MIN=0.001
10/22 01:01:30 AM | W_MOMENTUM=0.9
10/22 01:01:30 AM | W_WEIGHT_DECAY=0.0003
10/22 01:01:30 AM | WORKERS=1
10/22 01:01:30 AM | WORLD_SIZE=1
10/22 01:01:30 AM |
10/22 01:01:30 AM | Logger is set - training start
####### ALPHA #######
# Alpha - normal
tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252],
[0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250],
[0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251],
[0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251],
[0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251],
[0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250],
[0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251],
[0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250],
[0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249],
[0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252],
[0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248],
[0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251],
[0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250],
[0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252],
[0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252],
[0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250],
[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250],
[0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251],
[0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5007, 0.4993], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.4997, 0.5003], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250],
[0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250],
[0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251],
[0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251],
[0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251],
[0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249],
[0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250],
[0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249],
[0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]],
device='cuda:0')
tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249],
[0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252],
[0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252],
[0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250],
[0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]],
device='cuda:0')
#####################
####### BETA #######
# Beta - normal
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5002, 0.4998], device='cuda:0')
tensor([0.3336, 0.3333, 0.3331], device='cuda:0')
tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0')
tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0')
#####################
Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%)
Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%)
Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375%
Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Final best Prec@1 = 0.0000%
Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Retrain Epoch 0 LR 0.05
Retrain: Layer 1/3 Epoch 1/25 Step 000/002 Loss 3.463 Loss_distill 1.154 Prec@(1,5) (9.4%, 48.4%)
Retrain: Layer 1/3 Epoch 1/25 Final Prec@1 10.1562%
10/22 01:02:56 AM |
10/22 01:02:56 AM | Parameters:
10/22 01:02:56 AM | ALPHA_LR=0.0003
10/22 01:02:56 AM | ALPHA_WEIGHT_DECAY=0.001
10/22 01:02:56 AM | AUX_WEIGHT=0.4
10/22 01:02:56 AM | BATCH_SIZE=64
10/22 01:02:56 AM | CELLS_NUM=3
10/22 01:02:56 AM | CLEAN_ARCH=True
10/22 01:02:56 AM | CUTOUT_LENGTH=16
10/22 01:02:56 AM | DATA_DIR=./cifar
10/22 01:02:56 AM | DATA_PATH=./data/
10/22 01:02:56 AM | DATASET=cifar10
10/22 01:02:56 AM | DIST_URL=tcp://127.0.0.1:23343
10/22 01:02:56 AM | DISTRIBUTED=True
10/22 01:02:56 AM | DROP_PATH_PROB=0.2
10/22 01:02:56 AM | ENSEMBLE=True
10/22 01:02:56 AM | GPUS=[0]
10/22 01:02:56 AM | INIT_CHANNELS=16
10/22 01:02:56 AM | INPUT_CHANNELS=3
10/22 01:02:56 AM | LAYER_NUM=3
10/22 01:02:56 AM | LOCAL_RANK=0
10/22 01:02:56 AM | LR_RATIO=0.5
10/22 01:02:56 AM | MODEL_TYPE=cifar
10/22 01:02:56 AM | N_CLASSES=10
10/22 01:02:56 AM | NAME=cifar10-search
10/22 01:02:56 AM | NO_REPEAT=False
10/22 01:02:56 AM | PATH=searchs/cifar10-search
10/22 01:02:56 AM | PLOT_PATH=searchs/cifar10-search/plots
10/22 01:02:56 AM | PRETRAIN_DECAY=0
10/22 01:02:56 AM | PRETRAIN_EPOCHS=0
10/22 01:02:56 AM | PRINT_FREQ=10
10/22 01:02:56 AM | RETRAIN_EPOCHS=1
10/22 01:02:56 AM | RETRAIN_PATH=searchs/cifar10-search/retrains
10/22 01:02:56 AM | RETRAIN_SETTING=0
10/22 01:02:56 AM | RETRAIN_UPDATE_W=True
10/22 01:02:56 AM | SAME_STRUCTURE=True
10/22 01:02:56 AM | SAMPLE_RATIO=0.2
10/22 01:02:56 AM | SEARCH_ITER=25
10/22 01:02:56 AM | SEARCH_ITER_EPOCHS=1
10/22 01:02:56 AM | SEED=0
10/22 01:02:56 AM | SHORT_CONNECT=False
10/22 01:02:56 AM | SYNC_PARAM=True
10/22 01:02:56 AM | TEACHER2STUDENT=True
10/22 01:02:56 AM | TEST_DIR=/data/imagenet/val
10/22 01:02:56 AM | TRAIN_DIR=/data/imagenet/train
10/22 01:02:56 AM | TRAIN_MAIN_FIRST=False
10/22 01:02:56 AM | TRAIN_PORTION=0.5
10/22 01:02:56 AM | UNROLLED=False
10/22 01:02:56 AM | USE_BETA=True
10/22 01:02:56 AM | VAL_DIR=/data/imagenet/train
10/22 01:02:56 AM | W_GRAD_CLIP=5.0
10/22 01:02:56 AM | W_LR=0.05
10/22 01:02:56 AM | W_LR_MIN=0.001
10/22 01:02:56 AM | W_MOMENTUM=0.9
10/22 01:02:56 AM | W_WEIGHT_DECAY=0.0003
10/22 01:02:56 AM | WORKERS=1
10/22 01:02:56 AM | WORLD_SIZE=1
10/22 01:02:56 AM |
10/22 01:02:56 AM | Logger is set - training start
####### ALPHA #######
# Alpha - normal
tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252],
[0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250],
[0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251],
[0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251],
[0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251],
[0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250],
[0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251],
[0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250],
[0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249],
[0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252],
[0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248],
[0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251],
[0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250],
[0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252],
[0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252],
[0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250],
[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250],
[0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251],
[0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5007, 0.4993], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.4997, 0.5003], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250],
[0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250],
[0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251],
[0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251],
[0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251],
[0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249],
[0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250],
[0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249],
[0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]],
device='cuda:0')
tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249],
[0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252],
[0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252],
[0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250],
[0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]],
device='cuda:0')
#####################
####### BETA #######
# Beta - normal
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5002, 0.4998], device='cuda:0')
tensor([0.3336, 0.3333, 0.3331], device='cuda:0')
tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0')
tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0')
#####################
Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%)
Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%)
Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375%
Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Final best Prec@1 = 0.0000%
Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Retrain Epoch 0 LR 0.05
Retrain: Layer 1/3 Epoch 1/25 Step 000/002 Loss 3.463 Loss_distill 1.154 Prec@(1,5) (9.4%, 48.4%)
Retrain: Layer 1/3 Epoch 1/25 Final Prec@1 10.1562%
10/22 01:03:57 AM |
10/22 01:03:57 AM | Parameters:
10/22 01:03:57 AM | ALPHA_LR=0.0003
10/22 01:03:57 AM | ALPHA_WEIGHT_DECAY=0.001
10/22 01:03:57 AM | AUX_WEIGHT=0.4
10/22 01:03:57 AM | BATCH_SIZE=64
10/22 01:03:57 AM | CELLS_NUM=3
10/22 01:03:57 AM | CLEAN_ARCH=True
10/22 01:03:57 AM | CUTOUT_LENGTH=16
10/22 01:03:57 AM | DATA_DIR=./cifar
10/22 01:03:57 AM | DATA_PATH=./data/
10/22 01:03:57 AM | DATASET=cifar10
10/22 01:03:57 AM | DIST_URL=tcp://127.0.0.1:23343
10/22 01:03:57 AM | DISTRIBUTED=True
10/22 01:03:57 AM | DROP_PATH_PROB=0.2
10/22 01:03:57 AM | ENSEMBLE=True
10/22 01:03:57 AM | GPUS=[0]
10/22 01:03:57 AM | INIT_CHANNELS=16
10/22 01:03:57 AM | INPUT_CHANNELS=3
10/22 01:03:57 AM | LAYER_NUM=3
10/22 01:03:57 AM | LOCAL_RANK=0
10/22 01:03:57 AM | LR_RATIO=0.5
10/22 01:03:57 AM | MODEL_TYPE=cifar
10/22 01:03:57 AM | N_CLASSES=10
10/22 01:03:57 AM | NAME=cifar10-search
10/22 01:03:57 AM | NO_REPEAT=False
10/22 01:03:57 AM | PATH=searchs/cifar10-search
10/22 01:03:57 AM | PLOT_PATH=searchs/cifar10-search/plots
10/22 01:03:57 AM | PRETRAIN_DECAY=0
10/22 01:03:57 AM | PRETRAIN_EPOCHS=0
10/22 01:03:57 AM | PRINT_FREQ=10
10/22 01:03:57 AM | RETRAIN_EPOCHS=1
10/22 01:03:57 AM | RETRAIN_PATH=searchs/cifar10-search/retrains
10/22 01:03:57 AM | RETRAIN_SETTING=0
10/22 01:03:57 AM | RETRAIN_UPDATE_W=True
10/22 01:03:57 AM | SAME_STRUCTURE=True
10/22 01:03:57 AM | SAMPLE_RATIO=0.2
10/22 01:03:57 AM | SEARCH_ITER=25
10/22 01:03:57 AM | SEARCH_ITER_EPOCHS=1
10/22 01:03:57 AM | SEED=0
10/22 01:03:57 AM | SHORT_CONNECT=False
10/22 01:03:57 AM | SYNC_PARAM=True
10/22 01:03:57 AM | TEACHER2STUDENT=True
10/22 01:03:57 AM | TEST_DIR=/data/imagenet/val
10/22 01:03:57 AM | TRAIN_DIR=/data/imagenet/train
10/22 01:03:57 AM | TRAIN_MAIN_FIRST=False
10/22 01:03:57 AM | TRAIN_PORTION=0.5
10/22 01:03:57 AM | UNROLLED=False
10/22 01:03:57 AM | USE_BETA=True
10/22 01:03:57 AM | VAL_DIR=/data/imagenet/train
10/22 01:03:57 AM | W_GRAD_CLIP=5.0
10/22 01:03:57 AM | W_LR=0.05
10/22 01:03:57 AM | W_LR_MIN=0.001
10/22 01:03:57 AM | W_MOMENTUM=0.9
10/22 01:03:57 AM | W_WEIGHT_DECAY=0.0003
10/22 01:03:57 AM | WORKERS=1
10/22 01:03:57 AM | WORLD_SIZE=1
10/22 01:03:57 AM |
10/22 01:03:57 AM | Logger is set - training start
####### ALPHA #######
# Alpha - normal
tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252],
[0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250],
[0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251],
[0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251],
[0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249],
[0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251],
[0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250],
[0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251],
[0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250],
[0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249],
[0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252],
[0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248],
[0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251],
[0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250],
[0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252],
[0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250],
[0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252],
[0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249],
[0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250],
[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250],
[0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251],
[0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251],
[0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251],
[0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5007, 0.4993], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.4997, 0.5003], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250],
[0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250],
[0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251],
[0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251],
[0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251],
[0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249],
[0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249],
[0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250],
[0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248],
[0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249],
[0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250],
[0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]],
device='cuda:0')
tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249],
[0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252],
[0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]],
device='cuda:0')
tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252],
[0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251],
[0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250],
[0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250],
[0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]],
device='cuda:0')
#####################
####### BETA #######
# Beta - normal
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5002, 0.4998], device='cuda:0')
tensor([0.3336, 0.3333, 0.3331], device='cuda:0')
tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0')
tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0')
#####################
Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%)
Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%)
Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375%
Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Final best Prec@1 = 0.0000%
Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Retrain Epoch 0 LR 0.05
Retrain: Layer 1/3 Epoch 1/25 Step 000/002 Loss 3.463 Loss_distill 1.154 Prec@(1,5) (9.4%, 48.4%)
Retrain: Layer 1/3 Epoch 1/25 Final Prec@1 10.1562%
Valid: Layer 1/3 Epoch 1/25 Step 000/157 Loss 2.302 Prec@(1,5) (6.2%, 51.6%)
Valid: Layer 1/3 Epoch 1/25 Step 010/157 Loss 2.303 Prec@(1,5) (10.4%, 49.1%)
Valid: Layer 1/3 Epoch 1/25 Step 020/157 Loss 2.303 Prec@(1,5) (9.7%, 47.9%)
Valid: Layer 1/3 Epoch 1/25 Step 030/157 Loss 2.303 Prec@(1,5) (9.6%, 48.9%)
Valid: Layer 1/3 Epoch 1/25 Step 040/157 Loss 2.303 Prec@(1,5) (9.5%, 49.8%)
Valid: Layer 1/3 Epoch 1/25 Step 050/157 Loss 2.303 Prec@(1,5) (9.9%, 49.8%)
Valid: Layer 1/3 Epoch 1/25 Step 060/157 Loss 2.303 Prec@(1,5) (9.8%, 49.4%)
Valid: Layer 1/3 Epoch 1/25 Step 070/157 Loss 2.303 Prec@(1,5) (9.7%, 49.6%)
Valid: Layer 1/3 Epoch 1/25 Step 080/157 Loss 2.303 Prec@(1,5) (9.8%, 49.3%)
Valid: Layer 1/3 Epoch 1/25 Step 090/157 Loss 2.303 Prec@(1,5) (9.9%, 49.3%)
Valid: Layer 1/3 Epoch 1/25 Step 100/157 Loss 2.303 Prec@(1,5) (9.6%, 49.1%)
Valid: Layer 1/3 Epoch 1/25 Step 110/157 Loss 2.303 Prec@(1,5) (9.8%, 49.6%)
Valid: Layer 1/3 Epoch 1/25 Step 120/157 Loss 2.303 Prec@(1,5) (9.8%, 49.8%)
Valid: Layer 1/3 Epoch 1/25 Step 130/157 Loss 2.303 Prec@(1,5) (9.8%, 50.0%)
Valid: Layer 1/3 Epoch 1/25 Step 140/157 Loss 2.303 Prec@(1,5) (9.8%, 50.0%)
Valid: Layer 1/3 Epoch 1/25 Step 150/157 Loss 2.303 Prec@(1,5) (10.0%, 50.0%)
Valid: Layer 1/3 Epoch 1/25 Step 156/157 Loss 2.303 Prec@(1,5) (10.0%, 50.0%)
Valid: Layer 1/3 Epoch 1/25 Final Prec@1 10.0000%
Final best Prec@1 = 10.0000%
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3334, 0.3333, 0.3333], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2500, 0.2499, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2000, 0.2000, 0.2000, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3332, 0.3334, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2500, 0.2500, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2000, 0.2000, 0.2000, 0.2001], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.4999, 0.5001], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3333, 0.3333, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2499, 0.2501, 0.2500, 0.2501], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2000, 0.2000, 0.2001, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3333, 0.3334, 0.3333], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2500, 0.2500, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2000, 0.2000, 0.2000, 0.2001], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0')
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0')
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0')
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0')
#####################
####### BETA #######
# Beta - normal
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3333, 0.3334, 0.3333], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2500, 0.2499, 0.2501], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2000, 0.2000, 0.2000, 0.2001], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0')
tensor([0.3333, 0.3333, 0.3333], device='cuda:0')
tensor([0.2500, 0.2500, 0.2500, 0.2500], device='cuda:0')
tensor([0.2000, 0.2000, 0.2000, 0.2000, 0.2000], device='cuda:0')
#####################
Train: Layer 1/3 Epoch 2/25 Step 000/002 Loss 2.273 Prec@(1,5) (15.6%, 62.5%)
Train: Layer 1/3 Epoch 2/25 Step 001/002 Loss 2.264 Prec@(1,5) (18.0%, 57.8%)
Train: Layer 1/3 Epoch 2/25 Final Prec@1 17.9688%
Stage: 0 Layer: 1 genotype = Genotype(normal=[[('sep_conv_3x3', 1), ('sep_conv_5x5', 0)], [('sep_conv_5x5', 2), ('skip_connect', 1)], [('dil_conv_5x5', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 4), ('dil_conv_5x5', 0)]], normal_concat=range(2, 6), reduce=[[('max_pool_3x3', 1), ('max_pool_3x3', 0)], [('sep_conv_3x3', 1), ('max_pool_3x3', 2)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 0), ('dil_conv_3x3', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 genotype = Genotype(normal=[[('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 1), ('sep_conv_5x5', 0)], [('sep_conv_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_3x3', 3), ('avg_pool_3x3', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('max_pool_3x3', 0)], [('avg_pool_3x3', 2), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 4), ('max_pool_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('dil_conv_5x5', 0)], [('dil_conv_3x3', 3), ('sep_conv_5x5', 1)], [('dil_conv_5x5', 4), ('sep_conv_5x5', 0)]], normal_concat=range(2, 6), reduce=[[('max_pool_3x3', 1), ('max_pool_3x3', 0)], [('max_pool_3x3', 1), ('max_pool_3x3', 0)], [('max_pool_3x3', 1), ('max_pool_3x3', 0)], [('max_pool_3x3', 1), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Final best Prec@1 = 10.0000%
Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Retrain Epoch 1 LR 0.05
Retrain: Layer 1/3 Epoch 2/25 Step 000/002 Loss 3.433 Loss_distill 1.145 Prec@(1,5) (17.2%, 53.1%)
Retrain: Layer 1/3 Epoch 2/25 Final Prec@1 10.9375%
Valid: Layer 1/3 Epoch 2/25 Step 000/157 Loss 2.301 Prec@(1,5) (15.6%, 59.4%)
Valid: Layer 1/3 Epoch 2/25 Step 010/157 Loss 2.303 Prec@(1,5) (14.9%, 49.7%)
Valid: Layer 1/3 Epoch 2/25 Step 020/157 Loss 2.304 Prec@(1,5) (12.1%, 49.9%)
Valid: Layer 1/3 Epoch 2/25 Step 030/157 Loss 2.304 Prec@(1,5) (10.8%, 49.7%)
Valid: Layer 1/3 Epoch 2/25 Step 040/157 Loss 2.303 Prec@(1,5) (11.1%, 50.0%)
Valid: Layer 1/3 Epoch 2/25 Step 050/157 Loss 2.303 Prec@(1,5) (10.4%, 49.9%)
Valid: Layer 1/3 Epoch 2/25 Step 060/157 Loss 2.303 Prec@(1,5) (9.9%, 49.8%)
Valid: Layer 1/3 Epoch 2/25 Step 070/157 Loss 2.303 Prec@(1,5) (10.0%, 49.8%)
Valid: Layer 1/3 Epoch 2/25 Step 080/157 Loss 2.303 Prec@(1,5) (10.2%, 50.0%)
Valid: Layer 1/3 Epoch 2/25 Step 090/157 Loss 2.303 Prec@(1,5) (10.0%, 49.6%)
Valid: Layer 1/3 Epoch 2/25 Step 100/157 Loss 2.303 Prec@(1,5) (9.9%, 49.8%)
Valid: Layer 1/3 Epoch 2/25 Step 110/157 Loss 2.303 Prec@(1,5) (9.9%, 49.8%)
Valid: Layer 1/3 Epoch 2/25 Step 120/157 Loss 2.303 Prec@(1,5) (9.8%, 49.9%)
Valid: Layer 1/3 Epoch 2/25 Step 130/157 Loss 2.303 Prec@(1,5) (10.0%, 50.2%)
Valid: Layer 1/3 Epoch 2/25 Step 140/157 Loss 2.303 Prec@(1,5) (10.0%, 50.1%)
Valid: Layer 1/3 Epoch 2/25 Step 150/157 Loss 2.303 Prec@(1,5) (10.0%, 50.0%)
Valid: Layer 1/3 Epoch 2/25 Step 156/157 Loss 2.303 Prec@(1,5) (10.0%, 50.0%)
Valid: Layer 1/3 Epoch 2/25 Final Prec@1 10.0000%
Final best Prec@1 = 10.0000%
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.4999, 0.5001], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3332, 0.3334, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2500, 0.2500, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2000, 0.1999, 0.2000, 0.2001], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.4999, 0.5001], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3333, 0.3334, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2501, 0.2499, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2000, 0.2000, 0.2000, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3333, 0.3334, 0.3333], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2499, 0.2500, 0.2501], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2000, 0.2000, 0.2000, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3334, 0.3333, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2500, 0.2500, 0.2501], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1999, 0.1999, 0.2000, 0.2000, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0')
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0')
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0')
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0')
#####################
####### BETA #######
# Beta - normal
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3333, 0.3335, 0.3333], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2500, 0.2499, 0.2501], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2000, 0.2000, 0.1999, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0')
tensor([0.3333, 0.3333, 0.3333], device='cuda:0')
tensor([0.2500, 0.2500, 0.2500, 0.2500], device='cuda:0')
tensor([0.2000, 0.2000, 0.2000, 0.2000, 0.2000], device='cuda:0')
#####################
Train: Layer 1/3 Epoch 3/25 Step 000/002 Loss 2.251 Prec@(1,5) (18.8%, 68.8%)
Train: Layer 1/3 Epoch 3/25 Step 001/002 Loss 2.230 Prec@(1,5) (25.8%, 72.7%)
Train: Layer 1/3 Epoch 3/25 Final Prec@1 25.7812%
Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('sep_conv_3x3', 1)], [('sep_conv_5x5', 2), ('avg_pool_3x3', 0)], [('dil_conv_5x5', 0), ('sep_conv_3x3', 2)], [('avg_pool_3x3', 0), ('dil_conv_3x3', 4)]], normal_concat=range(2, 6), reduce=[[('sep_conv_5x5', 1), ('max_pool_3x3', 0)], [('sep_conv_3x3', 1), ('avg_pool_3x3', 2)], [('sep_conv_5x5', 1), ('dil_conv_5x5', 3)], [('dil_conv_3x3', 4), ('avg_pool_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 0), ('sep_conv_3x3', 2)], [('sep_conv_3x3', 1), ('dil_conv_3x3', 2)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_5x5', 1)], [('dil_conv_3x3', 0), ('dil_conv_5x5', 1)], [('avg_pool_3x3', 3), ('dil_conv_5x5', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 genotype = Genotype(normal=[[('avg_pool_3x3', 0), ('avg_pool_3x3', 1)], [('sep_conv_5x5', 1), ('dil_conv_5x5', 0)], [('sep_conv_5x5', 3), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 4), ('sep_conv_5x5', 0)]], normal_concat=range(2, 6), reduce=[[('max_pool_3x3', 1), ('max_pool_3x3', 0)], [('max_pool_3x3', 1), ('max_pool_3x3', 0)], [('max_pool_3x3', 1), ('max_pool_3x3', 0)], [('max_pool_3x3', 1), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Final best Prec@1 = 10.0000%
Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Retrain Epoch 2 LR 0.05
Retrain: Layer 1/3 Epoch 3/25 Step 000/002 Loss 3.435 Loss_distill 1.145 Prec@(1,5) (10.9%, 67.2%)
Retrain: Layer 1/3 Epoch 3/25 Final Prec@1 7.8125%
Valid: Layer 1/3 Epoch 3/25 Step 000/157 Loss 2.303 Prec@(1,5) (10.9%, 45.3%)
Valid: Layer 1/3 Epoch 3/25 Step 010/157 Loss 2.301 Prec@(1,5) (10.4%, 52.4%)
Valid: Layer 1/3 Epoch 3/25 Step 020/157 Loss 2.303 Prec@(1,5) (9.2%, 51.9%)
Valid: Layer 1/3 Epoch 3/25 Step 030/157 Loss 2.303 Prec@(1,5) (9.3%, 51.0%)
Valid: Layer 1/3 Epoch 3/25 Step 040/157 Loss 2.304 Prec@(1,5) (9.1%, 50.3%)
Valid: Layer 1/3 Epoch 3/25 Step 050/157 Loss 2.303 Prec@(1,5) (9.1%, 50.3%)
Valid: Layer 1/3 Epoch 3/25 Step 060/157 Loss 2.304 Prec@(1,5) (9.5%, 50.3%)
Valid: Layer 1/3 Epoch 3/25 Step 070/157 Loss 2.303 Prec@(1,5) (9.7%, 50.3%)
Valid: Layer 1/3 Epoch 3/25 Step 080/157 Loss 2.303 Prec@(1,5) (9.7%, 50.3%)
Valid: Layer 1/3 Epoch 3/25 Step 090/157 Loss 2.303 Prec@(1,5) (9.4%, 50.1%)
Valid: Layer 1/3 Epoch 3/25 Step 100/157 Loss 2.303 Prec@(1,5) (9.6%, 50.4%)
Valid: Layer 1/3 Epoch 3/25 Step 110/157 Loss 2.303 Prec@(1,5) (9.9%, 50.6%)
Valid: Layer 1/3 Epoch 3/25 Step 120/157 Loss 2.303 Prec@(1,5) (10.0%, 50.4%)
Valid: Layer 1/3 Epoch 3/25 Step 130/157 Loss 2.303 Prec@(1,5) (10.1%, 50.2%)
Valid: Layer 1/3 Epoch 3/25 Step 140/157 Loss 2.303 Prec@(1,5) (10.0%, 50.0%)
Valid: Layer 1/3 Epoch 3/25 Step 150/157 Loss 2.303 Prec@(1,5) (10.0%, 50.1%)
Valid: Layer 1/3 Epoch 3/25 Step 156/157 Loss 2.303 Prec@(1,5) (10.0%, 50.0%)
Valid: Layer 1/3 Epoch 3/25 Final Prec@1 10.0000%
Final best Prec@1 = 10.0000%
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1249, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1251, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3334, 0.3332, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2500, 0.2500, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2000, 0.2000, 0.2000, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.4999, 0.5001], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3333, 0.3334, 0.3333], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2500, 0.2499, 0.2501], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2000, 0.2000, 0.2000, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1249, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.4999, 0.5001], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3333, 0.3334, 0.3333], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2499, 0.2500, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2001, 0.2000, 0.2000, 0.1999], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3333, 0.3334, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2500, 0.2500, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1999, 0.2000, 0.2000, 0.1999, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1249, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0')
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0')
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0')
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0')
#####################
####### BETA #######
# Beta - normal
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3333, 0.3334, 0.3333], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2500, 0.2500, 0.2501], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2000, 0.2000, 0.1999, 0.2001], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0')
tensor([0.3333, 0.3333, 0.3333], device='cuda:0')
tensor([0.2500, 0.2500, 0.2500, 0.2500], device='cuda:0')
tensor([0.2000, 0.2000, 0.2000, 0.2000, 0.2000], device='cuda:0')
#####################
Train: Layer 1/3 Epoch 4/25 Step 000/002 Loss 2.214 Prec@(1,5) (26.6%, 68.8%)
Train: Layer 1/3 Epoch 4/25 Step 001/002 Loss 2.188 Prec@(1,5) (29.7%, 75.8%)
Train: Layer 1/3 Epoch 4/25 Final Prec@1 29.6875%
Stage: 0 Layer: 1 genotype = Genotype(normal=[[('sep_conv_3x3', 1), ('skip_connect', 0)], [('sep_conv_5x5', 2), ('sep_conv_5x5', 1)], [('sep_conv_3x3', 2), ('skip_connect', 1)], [('dil_conv_5x5', 2), ('sep_conv_5x5', 0)]], normal_concat=range(2, 6), reduce=[[('avg_pool_3x3', 1), ('max_pool_3x3', 0)], [('avg_pool_3x3', 1), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 1), ('sep_conv_5x5', 2)], [('avg_pool_3x3', 1), ('sep_conv_5x5', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('avg_pool_3x3', 1)], [('avg_pool_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_3x3', 0), ('sep_conv_3x3', 3)], [('avg_pool_3x3', 1), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('avg_pool_3x3', 0), ('dil_conv_3x3', 1)], [('sep_conv_3x3', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('dil_conv_3x3', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 genotype = Genotype(normal=[[('avg_pool_3x3', 0), ('avg_pool_3x3', 1)], [('avg_pool_3x3', 1), ('avg_pool_3x3', 2)], [('sep_conv_5x5', 3), ('avg_pool_3x3', 0)], [('avg_pool_3x3', 4), ('max_pool_3x3', 0)]], normal_concat=range(2, 6), reduce=[[('max_pool_3x3', 1), ('max_pool_3x3', 0)], [('max_pool_3x3', 1), ('max_pool_3x3', 0)], [('max_pool_3x3', 1), ('max_pool_3x3', 0)], [('max_pool_3x3', 1), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Final best Prec@1 = 10.0000%
Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6))
Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6))
Retrain Epoch 3 LR 0.05
Retrain: Layer 1/3 Epoch 4/25 Step 000/002 Loss 3.459 Loss_distill 1.153 Prec@(1,5) (9.4%, 48.4%)
Retrain: Layer 1/3 Epoch 4/25 Final Prec@1 10.9375%
Valid: Layer 1/3 Epoch 4/25 Step 000/157 Loss 2.298 Prec@(1,5) (10.9%, 54.7%)
Valid: Layer 1/3 Epoch 4/25 Step 010/157 Loss 2.304 Prec@(1,5) (9.8%, 50.7%)
Valid: Layer 1/3 Epoch 4/25 Step 020/157 Loss 2.303 Prec@(1,5) (10.3%, 50.7%)
Valid: Layer 1/3 Epoch 4/25 Step 030/157 Loss 2.303 Prec@(1,5) (10.0%, 49.8%)
Valid: Layer 1/3 Epoch 4/25 Step 040/157 Loss 2.304 Prec@(1,5) (9.9%, 49.5%)
Valid: Layer 1/3 Epoch 4/25 Step 050/157 Loss 2.304 Prec@(1,5) (9.7%, 49.8%)
Valid: Layer 1/3 Epoch 4/25 Step 060/157 Loss 2.304 Prec@(1,5) (10.2%, 50.1%)
Valid: Layer 1/3 Epoch 4/25 Step 070/157 Loss 2.304 Prec@(1,5) (10.2%, 50.3%)
Valid: Layer 1/3 Epoch 4/25 Step 080/157 Loss 2.304 Prec@(1,5) (9.9%, 50.3%)
Valid: Layer 1/3 Epoch 4/25 Step 090/157 Loss 2.303 Prec@(1,5) (9.8%, 50.7%)
Valid: Layer 1/3 Epoch 4/25 Step 100/157 Loss 2.303 Prec@(1,5) (9.9%, 50.4%)
Valid: Layer 1/3 Epoch 4/25 Step 110/157 Loss 2.303 Prec@(1,5) (9.9%, 50.3%)
Valid: Layer 1/3 Epoch 4/25 Step 120/157 Loss 2.303 Prec@(1,5) (10.0%, 50.3%)
Valid: Layer 1/3 Epoch 4/25 Step 130/157 Loss 2.303 Prec@(1,5) (10.0%, 50.2%)
Valid: Layer 1/3 Epoch 4/25 Step 140/157 Loss 2.303 Prec@(1,5) (10.0%, 50.3%)
Valid: Layer 1/3 Epoch 4/25 Step 150/157 Loss 2.304 Prec@(1,5) (10.0%, 50.1%)
Valid: Layer 1/3 Epoch 4/25 Step 156/157 Loss 2.304 Prec@(1,5) (10.0%, 50.0%)
Valid: Layer 1/3 Epoch 4/25 Final Prec@1 10.0000%
Final best Prec@1 = 10.0000%
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1249, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3332, 0.3333, 0.3334], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2501, 0.2500, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2000, 0.2000, 0.2000, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3334, 0.3334, 0.3332], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2499, 0.2501, 0.2500, 0.2499], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2000, 0.2000, 0.2000, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1251, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
#####################
####### BETA #######
# Beta - normal
tensor([0.5000, 0.5000], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3333, 0.3334, 0.3333], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2501, 0.2500, 0.2500, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.1999, 0.2000, 0.2000, 0.2000, 0.2001], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5002, 0.4998], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3334, 0.3334, 0.3332], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2501, 0.2499, 0.2500], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2000, 0.2000, 0.2000, 0.1999, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
#####################
####### ALPHA #######
# Alpha - normal
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1249],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0', grad_fn=<SoftmaxBackward>)
# Alpha - reduce
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0')
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0')
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0')
tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250],
[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]],
device='cuda:0')
#####################
####### BETA #######
# Beta - normal
tensor([0.5001, 0.4999], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.3333, 0.3334, 0.3333], device='cuda:0', grad_fn=<SoftmaxBackward>)
tensor([0.2500, 0.2500, 0.2499, 0.2501], device='cuda:0',
grad_fn=<SoftmaxBackward>)
tensor([0.2001, 0.2000, 0.2000, 0.2000, 0.2000], device='cuda:0',
grad_fn=<SoftmaxBackward>)
# Beta - reduce
tensor([0.5000, 0.5000], device='cuda:0')
tensor([0.3333, 0.3333, 0.3333], device='cuda:0')
tensor([0.2500, 0.2500, 0.2500, 0.2500], device='cuda:0')
tensor([0.2000, 0.2000, 0.2000, 0.2000, 0.2000], device='cuda:0')
#####################
|
Cream/CDARTS/benchmark201/search/cifar10-search/cifar10-search.log/0
|
{
"file_path": "Cream/CDARTS/benchmark201/search/cifar10-search/cifar10-search.log",
"repo_id": "Cream",
"token_count": 235681
}
| 295 |
Hiring research interns for neural architecture search projects: [email protected]
# Cream of the Crop: Distilling Prioritized Paths For One-Shot Neural Architecture Search
This is an official implementation for our Cream NAS work presented in NeurIPS'20.
**[[Paper]](https://papers.nips.cc/paper/2020/file/d072677d210ac4c03ba046120f0802ec-Paper.pdf) [[Models-Google Drive]](https://drive.google.com/drive/folders/1NLGAbBF9bA1IUAxKlk2VjgRXhr6RHvRW?usp=sharing)[[Models-Baidu Disk (password: wqw6)]](https://pan.baidu.com/s/1TqQNm2s14oEdyNPimw3T9g) [[Slides]]() [[BibTex]](https://scholar.googleusercontent.com/scholar.bib?q=info:ICWVXc_SsKAJ:scholar.google.com/&output=citation&scisdr=CgUmooXfEMfTi0cV5aU:AAGBfm0AAAAAX7sQ_aXoamdKRaBI12tAVN8REq1VKNwM&scisig=AAGBfm0AAAAAX7sQ_RdYtp6BSro3zgbXVJU2MCgsG730&scisf=4&ct=citation&cd=-1&hl=ja)** <br/>
In this work, we present a simple yet effective architecture distillation method. The central idea is that subnetworks can learn collaboratively and teach each other throughout the training process, aiming to boost the convergence of individual models. We introduce the concept of prioritized path, which refers to the architecture candidates exhibiting superior performance during training. Distilling knowledge from the prioritized paths is able to boost the training of subnetworks. Since the prioritized paths are changed on the fly depending on their performance and complexity, the final obtained paths are the cream of the crop.
<div >
<img src="./demo/intro.jpg" width="800"/>
</div>
## Environment Setup
To set up the enviroment you can easily run the following command:
```buildoutcfg
git clone https://github.com/mapleam/Cream.git
cd Cream
conda create -n Cream python=3.6
conda activate Cream
pip install -r requirements
# (required) install apex to accelerate the training, a little bit faster than pytorch DistributedDataParallel
cd lib
git clone https://github.com/NVIDIA/apex.git
python ./apex/setup.py install --cpp_ext --cuda_ext
```
## Data Preparation
You need to first download the [ImageNet-2012](http://www.image-net.org/) to the folder `./data/imagenet` and move the validation set to the subfolder `./data/imagenet/val`. To move the validation set, you cloud use the following script: <https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh> *[todo]*
Put the imagenet data in ./data. It should look like:
```buildoutcfg
./data/imagenet/train
./data/imagenet/val
...
```
## Checkpoints For Test
For quick test, we have stored the checkpoints of our models in [Google Drive](https://drive.google.com/drive/folders/1NLGAbBF9bA1IUAxKlk2VjgRXhr6RHvRW?usp=sharing) (or [Baidu Disk (password: wqw6)](https://pan.baidu.com/s/1TqQNm2s14oEdyNPimw3T9g)).
Just download the checkpoints from [Google Drive](https://drive.google.com/drive/folders/1NLGAbBF9bA1IUAxKlk2VjgRXhr6RHvRW?usp=sharing) (or [Baidu Disk](https://pan.baidu.com/s/1TqQNm2s14oEdyNPimw3T9g)) and put the checkpoints in `./experiments/workspace/ckps/`.
Then you can do the test right now (as described in the following *Quick Start, III. Test*).
Model download links:
Model | FLOPs | Top-1 Acc. % | Top-5 Acc. % | Link
--- |:---:|:---:|:---:|:---:
Cream_14 | 14M | 53.8 | 77.2 | [Google](https://drive.google.com/file/d/19knbGCUlU9DYJp9En8mzQ3o390Q0gJeB/view?usp=sharing) / [Baidu (password: wqw6)](https://pan.baidu.com/s/1TqQNm2s14oEdyNPimw3T9g)
Cream_43 | 43M | 66.3 | 86.7 | [Google](https://drive.google.com/file/d/1ILTXIuIqkN_WMfBd3lc6r-dNeADULYna/view?usp=sharing) / [Baidu (password: wqw6)](https://pan.baidu.com/s/1TqQNm2s14oEdyNPimw3T9g)
Cream_114 | 114M | 72.8 | 90.8 | [Google](https://drive.google.com/file/d/1DPoCEF0CufRsGyY5_iyCmeb0gT21knQG/view?usp=sharing) / [Baidu (password: wqw6)](https://pan.baidu.com/s/1TqQNm2s14oEdyNPimw3T9g)
Cream_287 | 287M | 77.6 | 93.3 | [Google](https://drive.google.com/file/d/1F3cbpmr91vwzlcoGZauqieRm5Ca0glZ_/view?usp=sharing) / [Baidu (password: wqw6)](https://pan.baidu.com/s/1TqQNm2s14oEdyNPimw3T9g)
Cream_481 | 481M | 79.2 | 94.2 | [Google](https://drive.google.com/file/d/1RzJBr7wc1XolNtw8TvMDRPeFzyRypuS9/view?usp=sharing) / [Baidu (password: wqw6)](https://pan.baidu.com/s/1TqQNm2s14oEdyNPimw3T9g)
Cream_604 | 604M | 80.0 | 94.7 | [Google](https://drive.google.com/file/d/18ZUkgxIGqQ0DaW1oTAVsL0JT1l0YAX1K/view?usp=sharing) / [Baidu (password: wqw6)](https://pan.baidu.com/s/1TqQNm2s14oEdyNPimw3T9g)
## Quick Start
We provide *search*, *retrain* and *test* code of our Cream NAS algorithm as follows.
### I. Search
To search for an architecture, you need to configure the parameters `FLOPS_MINIMUM` and `FLOPS_MAXIMUM` to specify the desired model Flops, for example, pick up a value from 0 to 600 MB Flops. You can specify the Flops interval [`FLOPS_MINIMUM`, `FLOPS_MAXIMUM`] in the YAML configure file: `./experiments/configs/train/train.yaml`.
```buildoutcfg
Line 51:FLOPS_MINIMUM: 0 # Minimum Flops of Architecture
Line 52:FLOPS_MAXIMUM: 600 # Maximum Flops of Architecture
```
After you specify the Flops of the architectures, you can search an architecture now by running:
```buildoutcfg
python ./tools/main.py train ./experiments/configs/train/train.yaml
```
### II. Retrain
We also give the architecture we searched. To train those architectures, you need to configure the parameter `MODEL_SELECTION` to specify the model Flops. To specify which model to train, you should add `MODEL_SELECTION` in `./experiments/configs/retrain/retrain.yaml`. You can select one from [14,43,114,287,481,604], which stands for different Flops(MB).
```buildoutcfg
MODEL_SELECTION: 43 # Retrain 43m model
MODEL_SELECTION: 481 # Retrain 481m model
......
```
After specifying the Flops, you need to choose the config settings in `./experiments/configs/retrain/retrain.yaml`. The config files are in `./expperiments/configs/retrain`
```buildoutcfg
./experiments/configs/retrain/43.yaml
./experiments/configs/retrain/481.yaml
......
```
After adding `MODEL_SELECTION` in `retrain.yaml`, you need to use the following command to train the model.
```buildoutcfg
python ./tools/main.py retrain ./experiments/configs/retrain/retrain.yaml
```
The trained model and log file will be saved in `./experiments/workspace/retrain`. You can configure the `SAVE_PATH` in `./experiments/configs/retrain/retrain.yaml` to specify a path for saving the model and log file.
### III. Test
To test our trained of models, you need to use `MODEL_SELECTION` in `./experiments/configs/test/test.yaml` to specify which model to test.
```buildoutcfg
MODEL_SELECTION: 43 # test 43m model
MODEL_SELECTION: 481 # test 481m model
......
```
After specifying the Flops of the model, you need to write the path to the resume model in `./experiments/configs/test/test.yaml`.
```buildoutcfg
RESUME_PATH: './experiments/workspace/ckps/43.pth.tar'
RESUME_PATH: './experiments/workspace/ckps/481.pth.tar'
......
```
After adding `MODEL_SELECTION` and `RESUME_PATH` in `./experiments/configs/test/test.yaml`, you need to use the following command to test the model.
```buildoutcfg
python ./tools/main.py test ./experiments/configs/test/test.yaml
```
The test result will be saved in `./experiments/workspace/test`. You can configure the `SAVE_PATH` in `./experiments/configs/test/test.yaml` to specify a path for it.
##### Test Rank Correlation
To perform a correlation analysis, we randomly sample 30 subnetworks from the hypernetwork and calculate the rank correlation between the weight sharing performance and the true performance of training from scratch. Unfortunately, training these subnetworks on ImageNet is very computational expensive, we thus construct a subImageNet dataset, which only consists of 100 classes randomly sampled from ImageNet. Each class has 250 training images and 50 validation images. We can generate the imagelist by running the following script:
```buildoutcfg
python ./tools/generate_subImageNet.py
```
Thus we get the subImageNet in `./data/subImagnet`. The class list is provided in `./data/subclass_list.txt`. The images list is provided in `./data/subimages_list.txt`
## Performance
The top-1 accuracy on ImageNet. Our method achieves very competitive performance, being superior to the recent MobileNetV3 and EfficientNet-B0/B1.
<div align="half">
<img src="./demo/results_600.jpg" width="400"/>
<img src="./demo/results_100.jpg" width="400"/>
</div>
## BibTex
```bibtex
@article{Cream,
title={Cream of the Crop: Distilling Prioritized Paths For One-Shot Neural Architecture Search},
author={Peng, Houwen and Du, Hao and Yu, Hongyuan and Li, Qi and Liao, Jing and Fu, Jianlong},
journal={Advances in Neural Information Processing Systems},
volume={33},
year={2020}
}
```
## License
License under an MIT license.
|
Cream/Cream/README.md/0
|
{
"file_path": "Cream/Cream/README.md",
"repo_id": "Cream",
"token_count": 3082
}
| 296 |
AUTO_RESUME: False
DATA_DIR: './data/imagenet'
MODEL: 'Supernet_Training'
RESUME_PATH: './experiments/workspace/train/resume.pth.tar'
SAVE_PATH: './experiments/workspace/train'
SEED: 42
LOG_INTERVAL: 50
RECOVERY_INTERVAL: 0
WORKERS: 8
NUM_GPU: 8
SAVE_IMAGES: False
AMP: False
OUTPUT: 'None'
EVAL_METRICS: 'prec1'
TTA: 0
LOCAL_RANK: 0
DATASET:
NUM_CLASSES: 1000
IMAGE_SIZE: 224 # image patch size
INTERPOLATION: 'bilinear' # Image resize interpolation type
BATCH_SIZE: 128 # batch size
NET:
GP: 'avg'
DROPOUT_RATE: 0.0
EMA:
USE: True
FORCE_CPU: False # force model ema to be tracked on CPU
DECAY: 0.9998
OPT: 'sgd'
LR: 1.0
EPOCHS: 120
META_LR: 1e-4
BATCHNORM:
SYNC_BN: False
SUPERNET:
UPDATE_ITER: 200
SLICE: 4
POOL_SIZE: 10
RESUNIT: False
DIL_CONV: False
UPDATE_2ND: True
FLOPS_MINIMUM: 0
FLOPS_MAXIMUM: 600
PICK_METHOD: 'meta'
META_STA_EPOCH: 20
HOW_TO_PROB: 'pre_prob'
PRE_PROB: (0.05,0.2,0.05,0.5,0.05,0.15)
|
Cream/Cream/experiments/configs/train/train.yaml/0
|
{
"file_path": "Cream/Cream/experiments/configs/train/train.yaml",
"repo_id": "Cream",
"token_count": 461
}
| 297 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Written by Hao Du and Houwen Peng
# email: [email protected] and [email protected]
from lib.utils.builder_util import *
from lib.utils.search_structure_supernet import *
from lib.models.builders.build_supernet import *
from lib.utils.op_by_layer_dict import flops_op_dict
from timm.models.layers import SelectAdaptivePool2d
from timm.models.layers.activations import hard_sigmoid
# Supernet Structures
class SuperNet(nn.Module):
def __init__(
self,
block_args,
choices,
num_classes=1000,
in_chans=3,
stem_size=16,
num_features=1280,
head_bias=True,
channel_multiplier=1.0,
pad_type='',
act_layer=nn.ReLU,
drop_rate=0.,
drop_path_rate=0.,
slice=4,
se_kwargs=None,
norm_layer=nn.BatchNorm2d,
logger=None,
norm_kwargs=None,
global_pool='avg',
resunit=False,
dil_conv=False,
verbose=False):
super(SuperNet, self).__init__()
self.num_classes = num_classes
self.num_features = num_features
self.drop_rate = drop_rate
self._in_chs = in_chans
self.logger = logger
# Stem
stem_size = round_channels(stem_size, channel_multiplier)
self.conv_stem = create_conv2d(
self._in_chs, stem_size, 3, stride=2, padding=pad_type)
self.bn1 = norm_layer(stem_size, **norm_kwargs)
self.act1 = act_layer(inplace=True)
self._in_chs = stem_size
# Middle stages (IR/ER/DS Blocks)
builder = SuperNetBuilder(
choices,
channel_multiplier,
8,
None,
32,
pad_type,
act_layer,
se_kwargs,
norm_layer,
norm_kwargs,
drop_path_rate,
verbose=verbose,
resunit=resunit,
dil_conv=dil_conv,
logger=self.logger)
self.blocks = builder(self._in_chs, block_args)
self._in_chs = builder.in_chs
# Head + Pooling
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.conv_head = create_conv2d(
self._in_chs,
self.num_features,
1,
padding=pad_type,
bias=head_bias)
self.act2 = act_layer(inplace=True)
# Classifier
self.classifier = nn.Linear(
self.num_features *
self.global_pool.feat_mult(),
self.num_classes)
self.meta_layer = nn.Linear(self.num_classes * slice, 1)
efficientnet_init_weights(self)
def get_classifier(self):
return self.classifier
def reset_classifier(self, num_classes, global_pool='avg'):
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.num_classes = num_classes
self.classifier = nn.Linear(
self.num_features * self.global_pool.feat_mult(),
num_classes) if self.num_classes else None
def forward_features(self, x, architecture):
x = self.conv_stem(x)
x = self.bn1(x)
x = self.act1(x)
for layer, layer_arch in zip(self.blocks, architecture):
for blocks, arch in zip(layer, layer_arch):
if arch == -1:
continue
x = blocks[arch](x)
x = self.global_pool(x)
x = self.conv_head(x)
x = self.act2(x)
return x
def forward(self, x, architecture):
x = self.forward_features(x, architecture)
x = x.flatten(1)
if self.drop_rate > 0.:
x = F.dropout(x, p=self.drop_rate, training=self.training)
return self.classifier(x)
def forward_meta(self, features):
return self.meta_layer(features.view(1, -1))
def rand_parameters(self, architecture, meta=False):
for name, param in self.named_parameters(recurse=True):
if 'meta' in name and meta:
yield param
elif 'blocks' not in name and 'meta' not in name and (not meta):
yield param
if not meta:
for layer, layer_arch in zip(self.blocks, architecture):
for blocks, arch in zip(layer, layer_arch):
if arch == -1:
continue
for name, param in blocks[arch].named_parameters(
recurse=True):
yield param
class Classifier(nn.Module):
def __init__(self, num_classes=1000):
super(Classifier, self).__init__()
self.classifier = nn.Linear(num_classes, num_classes)
def forward(self, x):
return self.classifier(x)
def gen_supernet(flops_minimum=0, flops_maximum=600, **kwargs):
choices = {'kernel_size': [3, 5, 7], 'exp_ratio': [4, 6]}
num_features = 1280
# act_layer = HardSwish
act_layer = Swish
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_e1_c16_se0.25'],
# stage 1, 112x112 in
['ir_r1_k3_s2_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25',
'ir_r1_k3_s1_e4_c24_se0.25'],
# stage 2, 56x56 in
['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s1_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25',
'ir_r1_k5_s2_e4_c40_se0.25'],
# stage 3, 28x28 in
['ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25',
'ir_r2_k3_s1_e4_c80_se0.25'],
# stage 4, 14x14in
['ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25',
'ir_r1_k3_s1_e6_c96_se0.25'],
# stage 5, 14x14in
['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25',
'ir_r1_k5_s2_e6_c192_se0.25'],
# stage 6, 7x7 in
['cn_r1_k1_s1_c320_se0.25'],
]
sta_num, arch_def, resolution = search_for_layer(flops_op_dict, arch_def, flops_minimum, flops_maximum)
if sta_num is None or arch_def is None or resolution is None:
raise ValueError('Invalid FLOPs Settings')
model_kwargs = dict(
block_args=decode_arch_def(arch_def),
choices=choices,
num_features=num_features,
stem_size=16,
norm_kwargs=resolve_bn_args(kwargs),
act_layer=act_layer,
se_kwargs=dict(
act_layer=nn.ReLU,
gate_fn=hard_sigmoid,
reduce_mid=True,
divisor=8),
**kwargs,
)
model = SuperNet(**model_kwargs)
return model, sta_num, resolution
|
Cream/Cream/lib/models/structures/supernet.py/0
|
{
"file_path": "Cream/Cream/lib/models/structures/supernet.py",
"repo_id": "Cream",
"token_count": 3647
}
| 298 |
"""
Misc functions, including distributed helpers and model loaders
Also include a model loader specified for finetuning EfficientViT
"""
import io
import os
import time
from collections import defaultdict, deque
import datetime
import torch
import torch.distributed as dist
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total],
dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
log_msg = [
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
]
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
def _load_checkpoint_for_ema(model_ema, checkpoint):
"""
Workaround for ModelEma._load_checkpoint to accept an already-loaded object
"""
mem_file = io.BytesIO()
torch.save(checkpoint, mem_file)
mem_file.seek(0)
model_ema._load_checkpoint(mem_file)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
def replace_batchnorm(net):
for child_name, child in net.named_children():
if hasattr(child, 'fuse'):
setattr(net, child_name, child.fuse())
elif isinstance(child, torch.nn.BatchNorm2d):
setattr(net, child_name, torch.nn.Identity())
else:
replace_batchnorm(child)
def replace_layernorm(net):
import apex
for child_name, child in net.named_children():
if isinstance(child, torch.nn.LayerNorm):
setattr(net, child_name, apex.normalization.FusedLayerNorm(
child.weight.size(0)))
else:
replace_layernorm(child)
def load_model(modelpath, model):
'''
A function to load model from a checkpoint, which is used
for fine-tuning on a different resolution.
'''
checkpoint = torch.load(modelpath, map_location='cpu')
state_dict = checkpoint['model']
model_state_dict = model.state_dict()
# bicubic interpolate attention_biases if not match
rpe_idx_keys = [
k for k in state_dict.keys() if "attention_bias_idxs" in k]
for k in rpe_idx_keys:
print("deleting key: ", k)
del state_dict[k]
relative_position_bias_table_keys = [
k for k in state_dict.keys() if "attention_biases" in k]
for k in relative_position_bias_table_keys:
relative_position_bias_table_pretrained = state_dict[k]
relative_position_bias_table_current = model_state_dict[k]
nH1, L1 = relative_position_bias_table_pretrained.size()
nH2, L2 = relative_position_bias_table_current.size()
if nH1 != nH2:
logger.warning(f"Error in loading {k} due to different number of heads")
else:
if L1 != L2:
# bicubic interpolate relative_position_bias_table if not match
S1 = int(L1 ** 0.5)
S2 = int(L2 ** 0.5)
relative_position_bias_table_pretrained_resized = torch.nn.functional.interpolate(
relative_position_bias_table_pretrained.view(1, nH1, S1, S1), size=(S2, S2),
mode='bicubic')
state_dict[k] = relative_position_bias_table_pretrained_resized.view(
nH2, L2)
checkpoint['model'] = state_dict
return checkpoint
|
Cream/EfficientViT/classification/utils.py/0
|
{
"file_path": "Cream/EfficientViT/classification/utils.py",
"repo_id": "Cream",
"token_count": 4480
}
| 299 |
# model settings
model = dict(
type='FastRCNN',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)))
|
Cream/EfficientViT/downstream/configs/_base_/models/fast_rcnn_r50_fpn.py/0
|
{
"file_path": "Cream/EfficientViT/downstream/configs/_base_/models/fast_rcnn_r50_fpn.py",
"repo_id": "Cream",
"token_count": 1210
}
| 300 |
import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
import torch.distributed as dist
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmcv.utils import get_git_hash
from mmdet import __version__
from mmdet.apis import init_random_seed, set_random_seed # , train_detector
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
from mmdet.utils import (collect_env, get_device, get_root_logger,
setup_multi_processes, update_data_root)
from mmdet_custom.apis.train import train_detector
import mmcv_custom.runner.epoch_based_runner
import mmcv_custom.runner.optimizer
import sys
import efficientvit
import efficientvit_fpn
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--auto-resume',
action='store_true',
help='resume from the latest checkpoint automatically')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='(Deprecated, please use --gpu-id) number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='(Deprecated, please use --gpu-id) ids of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-id',
type=int,
default=0,
help='id of gpu to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--diff-seed',
action='store_true',
help='Whether or not set different seeds for different ranks')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--auto-scale-lr',
action='store_true',
help='enable automatically scaling LR.')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
if args.auto_scale_lr:
if 'auto_scale_lr' in cfg and \
'enable' in cfg.auto_scale_lr and \
'base_batch_size' in cfg.auto_scale_lr:
cfg.auto_scale_lr.enable = True
else:
warnings.warn('Can not find "auto_scale_lr" or '
'"auto_scale_lr.enable" or '
'"auto_scale_lr.base_batch_size" in your'
' configuration file. Please update all the '
'configuration files to mmdet >= 2.24.1.')
# set multi-process settings
setup_multi_processes(cfg)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
cfg.auto_resume = args.auto_resume
if args.gpus is not None:
cfg.gpu_ids = range(1)
warnings.warn('`--gpus` is deprecated because we only support '
'single GPU mode in non-distributed training. '
'Use `gpus=1` now.')
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids[0:1]
warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. '
'Because we only support single GPU mode in '
'non-distributed training. Use the first GPU '
'in `gpu_ids` now.')
if args.gpus is None and args.gpu_ids is None:
cfg.gpu_ids = [args.gpu_id]
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# re-set gpu_ids with distributed training mode
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
cfg.device = get_device()
# set random seeds
seed = init_random_seed(args.seed, device=cfg.device)
seed = seed + dist.get_rank() if args.diff_seed else seed
logger.info(f'Set random seed to {seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(seed, deterministic=args.deterministic)
cfg.seed = seed
meta['seed'] = seed
meta['exp_name'] = osp.basename(args.config)
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
model.init_weights()
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__ + get_git_hash()[:7],
CLASSES=datasets[0].CLASSES)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_detector(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
|
Cream/EfficientViT/downstream/train.py/0
|
{
"file_path": "Cream/EfficientViT/downstream/train.py",
"repo_id": "Cream",
"token_count": 3905
}
| 301 |
MODEL:
TYPE: swin_minivit_distill
NAME: swin_small_patch4_window7_224_minivit
DROP_PATH_RATE: 0.1
SWIN:
EMBED_DIM: 96
DEPTHS: [ 2, 2, 18, 2 ]
NUM_HEADS: [ 3, 6, 12, 24 ]
WINDOW_SIZE: 7
MINIVIT:
SEPARATE_LAYERNUM_LIST: [1, 1, 9, 1]
|
Cream/MiniViT/Mini-Swin/configs/swin_small_patch4_window7_224_minivit_sharenum2.yaml/0
|
{
"file_path": "Cream/MiniViT/Mini-Swin/configs/swin_small_patch4_window7_224_minivit_sharenum2.yaml",
"repo_id": "Cream",
"token_count": 140
}
| 302 |
# TinyCLIP Training
In this document, we introduce ***auto weight inheritance*** and ***manual weight inheritance method*** to train a TinyCLIP model with the proposed ***cross-modalities distillation***.
:star: **[Notice]** Please replace the training data loader with the one loading LAION-400M or YFCC-15M.
Reference: [OpenCLIP Data](https://github.com/mlfoundations/open_clip?tab=readme-ov-file#data)
### Auto weight inheritance training
In this part, we compress OpenCLIP ViT-B/32 to 25% of origin size using three stages, where the model is compressed from 100% to 75%, from 75% to 50% and from 50% to 25% in the three stages, respectively.
One bash script corresponds to one stage training, training for the next stage begins after the completion of the previous stage. We use 4 nodes (8 GPUs per node) to train the model with auto weight inheritance:
```bash
sh script/auto_weight_inherit_100to75.sh # first stage
sh script/auto_weight_inherit_75to50.sh # second stage
sh script/auto_weight_inherit_50to25.sh # third stage
```
### Manual weight inheritance training
In this part, we compress OpenCLIP ViT-B/32 to 50% of origin size using two stages, where the model is compressed from 100% to 75% and from 75% to 50% in the two stages, respectively.
The training with manual weight inheritance is conducted using four nodes, just as in the case of automatic weight inheritance.
```bash
sh script/manual_weight_inherit_100to75.sh # first stage
sh script/manual_weight_inherit_75to50.sh # second stage
```
|
Cream/TinyCLIP/docs/PRETRAINING.md/0
|
{
"file_path": "Cream/TinyCLIP/docs/PRETRAINING.md",
"repo_id": "Cream",
"token_count": 426
}
| 303 |
import torch
import torch.distributed.nn
from torch import distributed as dist, nn as nn
from torch.nn import functional as F
from open_clip.loss import gather_features, gather_feature
from contextlib import nullcontext
import numpy as np
class ClipSoftLoss(nn.Module):
def __init__(
self,
local_loss=False,
gather_with_grad=False,
cache_labels=False,
rank=None,
world_size=None,
use_horovod=False,
):
super().__init__()
self.local_loss = local_loss
self.gather_with_grad = gather_with_grad
if rank is None:
assert world_size is None
rank, world_size = dist.get_rank(), dist.get_world_size()
self.rank = rank
self.world_size = world_size
self.use_horovod = use_horovod
assert self.local_loss
# cache state
self.feat_buffer = dict()
def compute_sim(self, image_features, text_features):
all_image_features = self.gather_feature(image_features)
all_text_features = self.gather_feature(text_features)
# calculate logits
# with torch.cuda.amp.autocast(enabled=False):
with nullcontext():
logits_per_image = image_features @ all_text_features.T
logits_per_text = text_features @ all_image_features.T
return logits_per_image, logits_per_text
def gather_feature(self, feat):
feat_id = id(feat)
if feat_id not in self.feat_buffer:
args = (self.local_loss, self.gather_with_grad,
self.rank, self.world_size, self.use_horovod)
all_feat = gather_feature(feat, *args)
self.feat_buffer[feat_id] = all_feat
return self.feat_buffer[feat_id]
def forward(self,
image_features, text_features, logit_scale,
teacher_image_features, teacher_text_features, teacher_logit_scale,
average_two_losses=True,
labels=None,
):
# calculated ground-truth and cache if enabled
logits_per_image, logits_per_text = self.compute_sim(
image_features, text_features)
teacher_logits_per_image, teacher_logits_per_text = self.compute_sim(
teacher_image_features, teacher_text_features)
self.feat_buffer.clear()
# with torch.cuda.amp.autocast(enabled=False):
with nullcontext():
logits_per_image = logit_scale * logits_per_image
logits_per_text = logit_scale * logits_per_text
teacher_logits_per_image = teacher_logit_scale * teacher_logits_per_image
teacher_logits_per_text = teacher_logit_scale * teacher_logits_per_text
def single_loss_fn(logits, teacher_logits):
teacher_probs = F.softmax(teacher_logits, -1)
return F.cross_entropy(logits, teacher_probs)
if average_two_losses:
total_loss = (single_loss_fn(logits_per_image, teacher_logits_per_image) +
single_loss_fn(logits_per_text, teacher_logits_per_text)) / 2
return total_loss
else:
img2text_loss = single_loss_fn(
logits_per_image, teacher_logits_per_image)
text2img_loss = single_loss_fn(
logits_per_text, teacher_logits_per_text)
return img2text_loss, text2img_loss
|
Cream/TinyCLIP/src/open_clip/clip_soft_loss.py/0
|
{
"file_path": "Cream/TinyCLIP/src/open_clip/clip_soft_loss.py",
"repo_id": "Cream",
"token_count": 1652
}
| 304 |
""" OpenAI pretrained model functions
Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
"""
import os
import warnings
from typing import Union, List
import torch
from .model import build_model_from_openai_state_dict
from .pretrained import get_pretrained_url, list_pretrained_tag_models, download_pretrained_from_url
__all__ = ["list_openai_models", "load_openai_model"]
def list_openai_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list_pretrained_tag_models('openai')
def load_openai_model(
name: str,
device: Union[str, torch.device] = "cuda" if torch.cuda.is_available(
) else "cpu",
jit=True,
cache_dir=None,
):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model (default) or more hackable non-JIT model.
cache_dir : Optional[str]
The directory to cache the downloaded model weights
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if get_pretrained_url(name, 'openai'):
model_path = download_pretrained_from_url(
get_pretrained_url(name, 'openai'), cache_dir=cache_dir)
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(
f"Model {name} not found; available models = {list_openai_models()}")
try:
# loading JIT archive
model = torch.jit.load(
model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(
f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
try:
model = build_model_from_openai_state_dict(
state_dict or model.state_dict()).to(device)
except KeyError:
sd = {k[7:]: v for k, v in state_dict["state_dict"].items()}
model = build_model_from_openai_state_dict(sd).to(device)
if str(device) == "cpu":
model.float()
return model
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones(
[]).to(torch.device(device)), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes(
"prim::Constant") if "Device" in repr(n)][-1]
def patch_device(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(
lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
# ensure image_size attr available at consistent location for both jit and non-jit
model.visual.image_size = model.input_resolution.item()
return model
|
Cream/TinyCLIP/src/open_clip/openai.py/0
|
{
"file_path": "Cream/TinyCLIP/src/open_clip/openai.py",
"repo_id": "Cream",
"token_count": 2042
}
| 305 |
from __future__ import division
import torch
import math
import sys
from .aug_random import random
from PIL import Image
try:
import accimage
except ImportError:
accimage = None
import numpy as np
import numbers
import types
import collections
import warnings
from torchvision.transforms import functional as F
if sys.version_info < (3, 3):
Sequence = collections.Sequence
Iterable = collections.Iterable
else:
Sequence = collections.abc.Sequence
Iterable = collections.abc.Iterable
__all__ = ["Compose", "ToTensor", "ToPILImage", "Normalize", "Resize", "Scale", "CenterCrop", "Pad",
"Lambda", "RandomApply", "RandomChoice", "RandomOrder", "RandomCrop", "RandomHorizontalFlip",
"RandomVerticalFlip", "RandomResizedCrop", "RandomSizedCrop", "FiveCrop", "TenCrop", "LinearTransformation",
"ColorJitter", "RandomRotation", "RandomAffine", "Grayscale", "RandomGrayscale"]
_pil_interpolation_to_str = {
Image.NEAREST: 'PIL.Image.NEAREST',
Image.BILINEAR: 'PIL.Image.BILINEAR',
Image.BICUBIC: 'PIL.Image.BICUBIC',
Image.LANCZOS: 'PIL.Image.LANCZOS',
Image.HAMMING: 'PIL.Image.HAMMING',
Image.BOX: 'PIL.Image.BOX',
}
class Compose(object):
"""Composes several transforms together.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
Example:
>>> transforms.Compose([
>>> transforms.CenterCrop(10),
>>> transforms.ToTensor(),
>>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img):
for t in self.transforms:
img = t(img)
return img
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class ToTensor(object):
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
Converts a PIL Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)
or if the numpy.ndarray has dtype = np.uint8
In the other cases, tensors are returned without scaling.
"""
def __call__(self, pic):
"""
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
return F.to_tensor(pic)
def __repr__(self):
return self.__class__.__name__ + '()'
class ToPILImage(object):
"""Convert a tensor or an ndarray to PIL Image.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL Image while preserving the value range.
Args:
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
- If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
- If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
- If the input has 2 channels, the ``mode`` is assumed to be ``LA``.
- If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,
``short``).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
"""
def __init__(self, mode=None):
self.mode = mode
def __call__(self, pic):
"""
Args:
pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
Returns:
PIL Image: Image converted to PIL Image.
"""
return F.to_pil_image(pic, self.mode)
def __repr__(self):
format_string = self.__class__.__name__ + '('
if self.mode is not None:
format_string += 'mode={0}'.format(self.mode)
format_string += ')'
return format_string
class Normalize(object):
"""Normalize a tensor image with mean and standard deviation.
Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this transform
will normalize each channel of the input ``torch.*Tensor`` i.e.
``input[channel] = (input[channel] - mean[channel]) / std[channel]``
.. note::
This transform acts out of place, i.e., it does not mutates the input tensor.
Args:
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
"""
def __init__(self, mean, std, inplace=False):
self.mean = mean
self.std = std
self.inplace = inplace
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized Tensor image.
"""
return F.normalize(tensor, self.mean, self.std, self.inplace)
def __repr__(self):
return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
class Resize(object):
"""Resize the input PIL Image to the given size.
Args:
size (sequence or int): Desired output size. If size is a sequence like
(h, w), output size will be matched to this. If size is an int,
smaller edge of the image will be matched to this number.
i.e, if height > width, then image will be rescaled to
(size * height / width, size)
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
"""
def __init__(self, size, interpolation=Image.BILINEAR):
assert isinstance(size, int) or (isinstance(size, Iterable) and len(size) == 2)
self.size = size
self.interpolation = interpolation
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be scaled.
Returns:
PIL Image: Rescaled image.
"""
return F.resize(img, self.size, self.interpolation)
def __repr__(self):
interpolate_str = _pil_interpolation_to_str[self.interpolation]
return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, interpolate_str)
class Scale(Resize):
"""
Note: This transform is deprecated in favor of Resize.
"""
def __init__(self, *args, **kwargs):
warnings.warn("The use of the transforms.Scale transform is deprecated, " +
"please use transforms.Resize instead.")
super(Scale, self).__init__(*args, **kwargs)
class CenterCrop(object):
"""Crops the given PIL Image at the center.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped.
Returns:
PIL Image: Cropped image.
"""
return F.center_crop(img, self.size)
def __repr__(self):
return self.__class__.__name__ + '(size={0})'.format(self.size)
class Pad(object):
"""Pad the given PIL Image on all sides with the given "pad" value.
Args:
padding (int or tuple): Padding on each border. If a single int is provided this
is used to pad all borders. If tuple of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a tuple of length 4 is provided
this is the padding for the left, top, right and bottom borders
respectively.
fill (int or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant
padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric.
Default is constant.
- constant: pads with a constant value, this value is specified with fill
- edge: pads with the last value at the edge of the image
- reflect: pads with reflection of image without repeating the last value on the edge
For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
- symmetric: pads with reflection of image repeating the last value on the edge
For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
"""
def __init__(self, padding, fill=0, padding_mode='constant'):
assert isinstance(padding, (numbers.Number, tuple))
assert isinstance(fill, (numbers.Number, str, tuple))
assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric']
if isinstance(padding, Sequence) and len(padding) not in [2, 4]:
raise ValueError("Padding must be an int or a 2, or 4 element tuple, not a " +
"{} element tuple".format(len(padding)))
self.padding = padding
self.fill = fill
self.padding_mode = padding_mode
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be padded.
Returns:
PIL Image: Padded image.
"""
return F.pad(img, self.padding, self.fill, self.padding_mode)
def __repr__(self):
return self.__class__.__name__ + '(padding={0}, fill={1}, padding_mode={2})'.\
format(self.padding, self.fill, self.padding_mode)
class Lambda(object):
"""Apply a user-defined lambda as a transform.
Args:
lambd (function): Lambda/function to be used for transform.
"""
def __init__(self, lambd):
assert callable(lambd), repr(type(lambd).__name__) + " object is not callable"
self.lambd = lambd
def __call__(self, img):
return self.lambd(img)
def __repr__(self):
return self.__class__.__name__ + '()'
class RandomTransforms(object):
"""Base class for a list of transformations with randomness
Args:
transforms (list or tuple): list of transformations
"""
def __init__(self, transforms):
assert isinstance(transforms, (list, tuple))
self.transforms = transforms
def __call__(self, *args, **kwargs):
raise NotImplementedError()
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class RandomApply(RandomTransforms):
"""Apply randomly a list of transformations with a given probability
Args:
transforms (list or tuple): list of transformations
p (float): probability
"""
def __init__(self, transforms, p=0.5):
super(RandomApply, self).__init__(transforms)
self.p = p
def __call__(self, img):
if self.p < random.random():
return img
for t in self.transforms:
img = t(img)
return img
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += '\n p={}'.format(self.p)
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class RandomOrder(RandomTransforms):
"""Apply a list of transformations in a random order
"""
def __call__(self, img):
order = list(range(len(self.transforms)))
random.shuffle(order)
for i in order:
img = self.transforms[i](img)
return img
class RandomChoice(RandomTransforms):
"""Apply single transformation randomly picked from a list
"""
def __call__(self, img):
t = random.choice(self.transforms)
return t(img)
class RandomCrop(object):
"""Crop the given PIL Image at a random location.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
padding (int or sequence, optional): Optional padding on each border
of the image. Default is None, i.e no padding. If a sequence of length
4 is provided, it is used to pad left, top, right, bottom borders
respectively. If a sequence of length 2 is provided, it is used to
pad left/right, top/bottom borders, respectively.
pad_if_needed (boolean): It will pad the image if smaller than the
desired size to avoid raising an exception.
fill: Pixel fill value for constant fill. Default is 0. If a tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant
padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
- constant: pads with a constant value, this value is specified with fill
- edge: pads with the last value on the edge of the image
- reflect: pads with reflection of image (without repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
- symmetric: pads with reflection of image (repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
"""
def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode='constant'):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.padding = padding
self.pad_if_needed = pad_if_needed
self.fill = fill
self.padding_mode = padding_mode
@staticmethod
def get_params(img, output_size):
"""Get parameters for ``crop`` for a random crop.
Args:
img (PIL Image): Image to be cropped.
output_size (tuple): Expected output size of the crop.
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
"""
w, h = img.size
th, tw = output_size
if w == tw and h == th:
return 0, 0, h, w
i = random.randint(0, h - th)
j = random.randint(0, w - tw)
return i, j, th, tw
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped.
Returns:
PIL Image: Cropped image.
"""
if self.padding is not None:
img = F.pad(img, self.padding, self.fill, self.padding_mode)
# pad the width if needed
if self.pad_if_needed and img.size[0] < self.size[1]:
img = F.pad(img, (self.size[1] - img.size[0], 0), self.fill, self.padding_mode)
# pad the height if needed
if self.pad_if_needed and img.size[1] < self.size[0]:
img = F.pad(img, (0, self.size[0] - img.size[1]), self.fill, self.padding_mode)
i, j, h, w = self.get_params(img, self.size)
return F.crop(img, i, j, h, w)
def __repr__(self):
return self.__class__.__name__ + '(size={0}, padding={1})'.format(self.size, self.padding)
class RandomHorizontalFlip(object):
"""Horizontally flip the given PIL Image randomly with a given probability.
Args:
p (float): probability of the image being flipped. Default value is 0.5
"""
def __init__(self, p=0.5):
self.p = p
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: Randomly flipped image.
"""
if random.random() < self.p:
return F.hflip(img)
return img
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class RandomVerticalFlip(object):
"""Vertically flip the given PIL Image randomly with a given probability.
Args:
p (float): probability of the image being flipped. Default value is 0.5
"""
def __init__(self, p=0.5):
self.p = p
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: Randomly flipped image.
"""
if random.random() < self.p:
return F.vflip(img)
return img
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class RandomResizedCrop(object):
"""Crop the given PIL Image to random size and aspect ratio.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: expected output size of each edge
scale: range of size of the origin size cropped
ratio: range of aspect ratio of the origin aspect ratio cropped
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=Image.BILINEAR):
if isinstance(size, tuple):
self.size = size
else:
self.size = (size, size)
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
warnings.warn("range should be of kind (min, max)")
self.interpolation = interpolation
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
area = img.size[0] * img.size[1]
for attempt in range(10):
target_area = random.uniform(*scale) * area
aspect_ratio = random.uniform(*ratio)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if random.random() < 0.5 and min(ratio) <= (h / w) <= max(ratio):
w, h = h, w
if w <= img.size[0] and h <= img.size[1]:
i = random.randint(0, img.size[1] - h)
j = random.randint(0, img.size[0] - w)
return i, j, h, w
# Fallback
w = min(img.size[0], img.size[1])
i = (img.size[1] - w) // 2
j = (img.size[0] - w) // 2
return i, j, w, w
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped and resized.
Returns:
PIL Image: Randomly cropped and resized image.
"""
i, j, h, w = self.get_params(img, self.scale, self.ratio)
return F.resized_crop(img, i, j, h, w, self.size, self.interpolation)
def __repr__(self):
interpolate_str = _pil_interpolation_to_str[self.interpolation]
format_string = self.__class__.__name__ + '(size={0}'.format(self.size)
format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))
format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))
format_string += ', interpolation={0})'.format(interpolate_str)
return format_string
class RandomSizedCrop(RandomResizedCrop):
"""
Note: This transform is deprecated in favor of RandomResizedCrop.
"""
def __init__(self, *args, **kwargs):
warnings.warn("The use of the transforms.RandomSizedCrop transform is deprecated, " +
"please use transforms.RandomResizedCrop instead.")
super(RandomSizedCrop, self).__init__(*args, **kwargs)
class FiveCrop(object):
"""Crop the given PIL Image into four corners and the central crop
.. Note::
This transform returns a tuple of images and there may be a mismatch in the number of
inputs and targets your Dataset returns. See below for an example of how to deal with
this.
Args:
size (sequence or int): Desired output size of the crop. If size is an ``int``
instead of sequence like (h, w), a square crop of size (size, size) is made.
Example:
>>> transform = Compose([
>>> FiveCrop(size), # this is a list of PIL Images
>>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor
>>> ])
>>> #In your test loop you can do the following:
>>> input, target = batch # input is a 5d tensor, target is 2d
>>> bs, ncrops, c, h, w = input.size()
>>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops
>>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops
"""
def __init__(self, size):
self.size = size
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
self.size = size
def __call__(self, img):
return F.five_crop(img, self.size)
def __repr__(self):
return self.__class__.__name__ + '(size={0})'.format(self.size)
class TenCrop(object):
"""Crop the given PIL Image into four corners and the central crop plus the flipped version of
these (horizontal flipping is used by default)
.. Note::
This transform returns a tuple of images and there may be a mismatch in the number of
inputs and targets your Dataset returns. See below for an example of how to deal with
this.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
vertical_flip(bool): Use vertical flipping instead of horizontal
Example:
>>> transform = Compose([
>>> TenCrop(size), # this is a list of PIL Images
>>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor
>>> ])
>>> #In your test loop you can do the following:
>>> input, target = batch # input is a 5d tensor, target is 2d
>>> bs, ncrops, c, h, w = input.size()
>>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops
>>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops
"""
def __init__(self, size, vertical_flip=False):
self.size = size
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
self.size = size
self.vertical_flip = vertical_flip
def __call__(self, img):
return F.ten_crop(img, self.size, self.vertical_flip)
def __repr__(self):
return self.__class__.__name__ + '(size={0}, vertical_flip={1})'.format(self.size, self.vertical_flip)
class LinearTransformation(object):
"""Transform a tensor image with a square transformation matrix computed
offline.
Given transformation_matrix, will flatten the torch.*Tensor, compute the dot
product with the transformation matrix and reshape the tensor to its
original shape.
Applications:
- whitening: zero-center the data, compute the data covariance matrix
[D x D] with np.dot(X.T, X), perform SVD on this matrix and
pass it as transformation_matrix.
Args:
transformation_matrix (Tensor): tensor [D x D], D = C x H x W
"""
def __init__(self, transformation_matrix):
if transformation_matrix.size(0) != transformation_matrix.size(1):
raise ValueError("transformation_matrix should be square. Got " +
"[{} x {}] rectangular matrix.".format(*transformation_matrix.size()))
self.transformation_matrix = transformation_matrix
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be whitened.
Returns:
Tensor: Transformed image.
"""
if tensor.size(0) * tensor.size(1) * tensor.size(2) != self.transformation_matrix.size(0):
raise ValueError("tensor and transformation matrix have incompatible shape." +
"[{} x {} x {}] != ".format(*tensor.size()) +
"{}".format(self.transformation_matrix.size(0)))
flat_tensor = tensor.view(1, -1)
transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix)
tensor = transformed_tensor.view(tensor.size())
return tensor
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += (str(self.transformation_matrix.numpy().tolist()) + ')')
return format_string
class ColorJitter(object):
"""Randomly change the brightness, contrast and saturation of an image.
Args:
brightness (float or tuple of float (min, max)): How much to jitter brightness.
brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]
or the given [min, max]. Should be non negative numbers.
contrast (float or tuple of float (min, max)): How much to jitter contrast.
contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]
or the given [min, max]. Should be non negative numbers.
saturation (float or tuple of float (min, max)): How much to jitter saturation.
saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]
or the given [min, max]. Should be non negative numbers.
hue (float or tuple of float (min, max)): How much to jitter hue.
hue_factor is chosen uniformly from [-hue, hue] or the given [min, max].
Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = self._check_input(brightness, 'brightness')
self.contrast = self._check_input(contrast, 'contrast')
self.saturation = self._check_input(saturation, 'saturation')
self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5),
clip_first_on_zero=False)
def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True):
if isinstance(value, numbers.Number):
if value < 0:
raise ValueError("If {} is a single number, it must be non negative.".format(name))
value = [center - value, center + value]
if clip_first_on_zero:
value[0] = max(value[0], 0)
elif isinstance(value, (tuple, list)) and len(value) == 2:
if not bound[0] <= value[0] <= value[1] <= bound[1]:
raise ValueError("{} values should be between {}".format(name, bound))
else:
raise TypeError("{} should be a single number or a list/tuple with lenght 2.".format(name))
# if value is 0 or (1., 1.) for brightness/contrast/saturation
# or (0., 0.) for hue, do nothing
if value[0] == value[1] == center:
value = None
return value
@staticmethod
def get_params(brightness, contrast, saturation, hue):
"""Get a randomized transform to be applied on image.
Arguments are same as that of __init__.
Returns:
Transform which randomly adjusts brightness, contrast and
saturation in a random order.
"""
transforms = []
if brightness is not None:
brightness_factor = random.uniform(brightness[0], brightness[1])
transforms.append(Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))
if contrast is not None:
contrast_factor = random.uniform(contrast[0], contrast[1])
transforms.append(Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))
if saturation is not None:
saturation_factor = random.uniform(saturation[0], saturation[1])
transforms.append(Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))
if hue is not None:
hue_factor = random.uniform(hue[0], hue[1])
transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor)))
random.shuffle(transforms)
transform = Compose(transforms)
return transform
def __call__(self, img):
"""
Args:
img (PIL Image): Input image.
Returns:
PIL Image: Color jittered image.
"""
transform = self.get_params(self.brightness, self.contrast,
self.saturation, self.hue)
return transform(img)
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += 'brightness={0}'.format(self.brightness)
format_string += ', contrast={0}'.format(self.contrast)
format_string += ', saturation={0}'.format(self.saturation)
format_string += ', hue={0})'.format(self.hue)
return format_string
class RandomRotation(object):
"""Rotate the image by angle.
Args:
degrees (sequence or float or int): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees).
resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
An optional resampling filter. See `filters`_ for more information.
If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
expand (bool, optional): Optional expansion flag.
If true, expands the output to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple, optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
.. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters
"""
def __init__(self, degrees, resample=False, expand=False, center=None):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError("If degrees is a single number, it must be positive.")
self.degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError("If degrees is a sequence, it must be of len 2.")
self.degrees = degrees
self.resample = resample
self.expand = expand
self.center = center
@staticmethod
def get_params(degrees):
"""Get parameters for ``rotate`` for a random rotation.
Returns:
sequence: params to be passed to ``rotate`` for random rotation.
"""
angle = random.uniform(degrees[0], degrees[1])
return angle
def __call__(self, img):
"""
img (PIL Image): Image to be rotated.
Returns:
PIL Image: Rotated image.
"""
angle = self.get_params(self.degrees)
return F.rotate(img, angle, self.resample, self.expand, self.center)
def __repr__(self):
format_string = self.__class__.__name__ + '(degrees={0}'.format(self.degrees)
format_string += ', resample={0}'.format(self.resample)
format_string += ', expand={0}'.format(self.expand)
if self.center is not None:
format_string += ', center={0}'.format(self.center)
format_string += ')'
return format_string
class RandomAffine(object):
"""Random affine transformation of the image keeping center invariant
Args:
degrees (sequence or float or int): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees). Set to 0 to deactivate rotations.
translate (tuple, optional): tuple of maximum absolute fraction for horizontal
and vertical translations. For example translate=(a, b), then horizontal shift
is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is
randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default.
scale (tuple, optional): scaling factor interval, e.g (a, b), then scale is
randomly sampled from the range a <= scale <= b. Will keep original scale by default.
shear (sequence or float or int, optional): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees). Will not apply shear by default
resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
An optional resampling filter. See `filters`_ for more information.
If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
fillcolor (int): Optional fill color for the area outside the transform in the output image. (Pillow>=5.0.0)
.. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters
"""
def __init__(self, degrees, translate=None, scale=None, shear=None, resample=False, fillcolor=0):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError("If degrees is a single number, it must be positive.")
self.degrees = (-degrees, degrees)
else:
assert isinstance(degrees, (tuple, list)) and len(degrees) == 2, \
"degrees should be a list or tuple and it must be of length 2."
self.degrees = degrees
if translate is not None:
assert isinstance(translate, (tuple, list)) and len(translate) == 2, \
"translate should be a list or tuple and it must be of length 2."
for t in translate:
if not (0.0 <= t <= 1.0):
raise ValueError("translation values should be between 0 and 1")
self.translate = translate
if scale is not None:
assert isinstance(scale, (tuple, list)) and len(scale) == 2, \
"scale should be a list or tuple and it must be of length 2."
for s in scale:
if s <= 0:
raise ValueError("scale values should be positive")
self.scale = scale
if shear is not None:
if isinstance(shear, numbers.Number):
if shear < 0:
raise ValueError("If shear is a single number, it must be positive.")
self.shear = (-shear, shear)
else:
assert isinstance(shear, (tuple, list)) and len(shear) == 2, \
"shear should be a list or tuple and it must be of length 2."
self.shear = shear
else:
self.shear = shear
self.resample = resample
self.fillcolor = fillcolor
@staticmethod
def get_params(degrees, translate, scale_ranges, shears, img_size):
"""Get parameters for affine transformation
Returns:
sequence: params to be passed to the affine transformation
"""
angle = random.uniform(degrees[0], degrees[1])
if translate is not None:
max_dx = translate[0] * img_size[0]
max_dy = translate[1] * img_size[1]
translations = (np.round(random.uniform(-max_dx, max_dx)),
np.round(random.uniform(-max_dy, max_dy)))
else:
translations = (0, 0)
if scale_ranges is not None:
scale = random.uniform(scale_ranges[0], scale_ranges[1])
else:
scale = 1.0
if shears is not None:
shear = random.uniform(shears[0], shears[1])
else:
shear = 0.0
return angle, translations, scale, shear
def __call__(self, img):
"""
img (PIL Image): Image to be transformed.
Returns:
PIL Image: Affine transformed image.
"""
ret = self.get_params(self.degrees, self.translate, self.scale, self.shear, img.size)
return F.affine(img, *ret, resample=self.resample, fillcolor=self.fillcolor)
def __repr__(self):
s = '{name}(degrees={degrees}'
if self.translate is not None:
s += ', translate={translate}'
if self.scale is not None:
s += ', scale={scale}'
if self.shear is not None:
s += ', shear={shear}'
if self.resample > 0:
s += ', resample={resample}'
if self.fillcolor != 0:
s += ', fillcolor={fillcolor}'
s += ')'
d = dict(self.__dict__)
d['resample'] = _pil_interpolation_to_str[d['resample']]
return s.format(name=self.__class__.__name__, **d)
class Grayscale(object):
"""Convert image to grayscale.
Args:
num_output_channels (int): (1 or 3) number of channels desired for output image
Returns:
PIL Image: Grayscale version of the input.
- If num_output_channels == 1 : returned image is single channel
- If num_output_channels == 3 : returned image is 3 channel with r == g == b
"""
def __init__(self, num_output_channels=1):
self.num_output_channels = num_output_channels
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be converted to grayscale.
Returns:
PIL Image: Randomly grayscaled image.
"""
return F.to_grayscale(img, num_output_channels=self.num_output_channels)
def __repr__(self):
return self.__class__.__name__ + '(num_output_channels={0})'.format(self.num_output_channels)
class RandomGrayscale(object):
"""Randomly convert image to grayscale with a probability of p (default 0.1).
Args:
p (float): probability that image should be converted to grayscale.
Returns:
PIL Image: Grayscale version of the input image with probability p and unchanged
with probability (1-p).
- If input image is 1 channel: grayscale version is 1 channel
- If input image is 3 channel: grayscale version is 3 channel with r == g == b
"""
def __init__(self, p=0.1):
self.p = p
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be converted to grayscale.
Returns:
PIL Image: Randomly grayscaled image.
"""
num_output_channels = 1 if img.mode == 'L' else 3
if random.random() < self.p:
return F.to_grayscale(img, num_output_channels=num_output_channels)
return img
def __repr__(self):
return self.__class__.__name__ + '(p={0})'.format(self.p)
|
Cream/TinyViT/data/augmentation/aug_tv_transforms.py/0
|
{
"file_path": "Cream/TinyViT/data/augmentation/aug_tv_transforms.py",
"repo_id": "Cream",
"token_count": 17077
}
| 306 |
""" A dataset parser that reads images from folders
Folders are scannerd recursively to find image files. Labels are based
on the folder hierarchy, just leaf folders by default.
Hacked together by / Copyright 2020 Ross Wightman
"""
import os
from timm.utils.misc import natural_key
from .parser import Parser
from .class_map import load_class_map
from .constants import IMG_EXTENSIONS
def find_images_and_targets(folder, types=IMG_EXTENSIONS, class_to_idx=None, leaf_name_only=True, sort=True):
labels = []
filenames = []
for root, subdirs, files in os.walk(folder, topdown=False, followlinks=True):
rel_path = os.path.relpath(root, folder) if (root != folder) else ''
label = os.path.basename(rel_path) if leaf_name_only else rel_path.replace(os.path.sep, '_')
for f in files:
base, ext = os.path.splitext(f)
if ext.lower() in types:
filenames.append(os.path.join(root, f))
labels.append(label)
if class_to_idx is None:
# building class index
unique_labels = set(labels)
sorted_labels = list(sorted(unique_labels, key=natural_key))
class_to_idx = {c: idx for idx, c in enumerate(sorted_labels)}
images_and_targets = [(f, class_to_idx[l]) for f, l in zip(filenames, labels) if l in class_to_idx]
if sort:
images_and_targets = sorted(images_and_targets, key=lambda k: natural_key(k[0]))
return images_and_targets, class_to_idx
class ParserImageFolder(Parser):
def __init__(
self,
root,
class_map=''):
super().__init__()
self.root = root
class_to_idx = None
if class_map:
class_to_idx = load_class_map(class_map, root)
self.samples, self.class_to_idx = find_images_and_targets(root, class_to_idx=class_to_idx)
if len(self.samples) == 0:
raise RuntimeError(
f'Found 0 images in subfolders of {root}. Supported image extensions are {", ".join(IMG_EXTENSIONS)}')
def __getitem__(self, index):
path, target = self.samples[index]
return open(path, 'rb'), target
def __len__(self):
return len(self.samples)
def _filename(self, index, basename=False, absolute=False):
filename = self.samples[index][0]
if basename:
filename = os.path.basename(filename)
elif not absolute:
filename = os.path.relpath(filename, self.root)
return filename
|
Cream/TinyViT/data/augmentation/parsers/parser_image_folder.py/0
|
{
"file_path": "Cream/TinyViT/data/augmentation/parsers/parser_image_folder.py",
"repo_id": "Cream",
"token_count": 1087
}
| 307 |
# Training TinyViT
In this document, we introduce how to pretrain TinyViT with the proposed fast pretraining distillation.
Note: If the GPU memory is not enough to fit the batch size, you can use `Gradient accumulation steps` by adding the argument `--accumulation-steps <acc_steps>`. For example, the accumulated batch size per GPU is 128 (= 32 x 4) when passing the arguments `--batch-size 32 --accumulation-steps 4`.
## Pretrain the model on ImageNet-22k with the proposed fast pretraining distillation.
Before training with the proposed fast pretraining distillation, we need to store the teacher sparse soft labels by [the tutorial](./SAVE_TEACHER_LOGITS.md).
Assume that the teacher sparse soft labels are stored in the path `./teacher_logits/`, and the IN-22k dataset is stored in the folder `./ImageNet-22k`.
We use 4 nodes (8 GPUs per node) to pretrain the model on IN-22k with the distillation of stored soft labels.
```bash
python -m torch.distributed.launch --master_addr=$MASTER_ADDR --nproc_per_node 8 --nnodes=4 --node_rank=$NODE_RANK main.py --cfg configs/22k_distill/tiny_vit_21m_22k_distill.yaml --data-path ./ImageNet-22k --batch-size 128 --output ./output --opts DISTILL.TEACHER_LOGITS_PATH ./teacher_logits/
```
where `$NODE_RANK` and `$MASTER_ADDR` are the rank of a node and the IP address of the master node.
## Finetune on ImageNet-1k
- Finetune the pretrained model from IN-22k to IN-1k
After pretrained on IN-22k, the model can be finetuned on IN-1k by the following command.
```
python -m torch.distributed.launch --nproc_per_node 8 main.py --cfg configs/22kto1k/tiny_vit_21m_22kto1k.yaml --data-path ./ImageNet --batch-size 128 --pretrained ./checkpoints/tiny_vit_21m_22k_distill.pth --output ./output
```
where `tiny_vit_21m_22k.pth` is the checkpoint of pretrained TinyViT-21M on IN-22k dataset.
- Finetune with higher resolution
To obtain better accuracy, we finetune the model to higher resolution progressively (224 -> 384 -> 512).
<details>
<summary>Finetune with higher resolution from 224 to 384</summary>
<pre><code> python -m torch.distributed.launch --nproc_per_node 8 main.py --cfg configs/higher_resolution/tiny_vit_21m_224to384.yaml --data-path ./ImageNet --batch-size 32 --pretrained ./checkpoints/tiny_vit_21m_22kto1k_distill.pth --output ./output --accumulation-steps 4
</code></pre>
</details>
<details>
<summary>Finetune with higher resolution from 384 to 512</summary>
<pre><code> python -m torch.distributed.launch --nproc_per_node 8 main.py --cfg configs/higher_resolution/tiny_vit_21m_384to512.yaml --data-path ./ImageNet --batch-size 32 --pretrained ./checkpoints/tiny_vit_21m_22kto1k_384_distill.pth --output ./output --accumulation-steps 4
</code></pre>
</details>
## Train the model from scratch on ImageNet-1k
Here is the command to train TinyViT from scratch on ImageNet-1k.
```bash
python -m torch.distributed.launch --nproc_per_node 8 main.py --cfg configs/1k/tiny_vit_21m.yaml --data-path ./ImageNet --batch-size 128 --output ./output
```
|
Cream/TinyViT/docs/TRAINING.md/0
|
{
"file_path": "Cream/TinyViT/docs/TRAINING.md",
"repo_id": "Cream",
"token_count": 986
}
| 308 |
# ---------------------------------------------------------------
# TinyViT Utils
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on the code: Swin Transformer
# (https://github.com/microsoft/swin-transformer)
# Add `LRSchedulerWrapper` and `divide_param_groups_by_lr_scale`
# ---------------------------------------------------------------
import copy
import torch.distributed as dist
def is_main_process():
return dist.get_rank() == 0
class LRSchedulerWrapper:
"""
LR Scheduler Wrapper
This class attaches the pre-hook on the `step` functions (including `step`, `step_update`, `step_frac`) of a lr scheduler.
When `step` functions are called, the learning rates of all layers are updated.
Usage:
```
lr_scheduler = LRSchedulerWrapper(lr_scheduler, optimizer)
```
"""
def __init__(self, lr_scheduler, optimizer):
self.lr_scheduler = lr_scheduler
self.optimizer = optimizer
def step(self, epoch):
self.lr_scheduler.step(epoch)
self.update_lr()
def step_update(self, it):
self.lr_scheduler.step_update(it)
self.update_lr()
def step_frac(self, frac):
if hasattr(self.lr_scheduler, 'step_frac'):
self.lr_scheduler.step_frac(frac)
self.update_lr()
def update_lr(self):
param_groups = self.optimizer.param_groups
for group in param_groups:
if 'lr_scale' not in group:
continue
params = group['params']
# update lr scale
lr_scale = None
for p in params:
if hasattr(p, 'lr_scale'):
if lr_scale is None:
lr_scale = p.lr_scale
else:
assert lr_scale == p.lr_scale, (lr_scale, p.lr_scale)
if lr_scale != group['lr_scale']:
if is_main_process():
print('=' * 30)
print("params:", [e.param_name for e in params])
print(
f"change lr scale: {group['lr_scale']} to {lr_scale}")
group['lr_scale'] = lr_scale
if lr_scale is not None:
group['lr'] *= lr_scale
def state_dict(self):
return self.lr_scheduler.state_dict()
def load_state_dict(self, *args, **kwargs):
self.lr_scheduler.load_state_dict(*args, **kwargs)
def divide_param_groups_by_lr_scale(param_groups):
"""
Divide parameters with different lr scale into different groups.
Inputs
------
param_groups: a list of dict of torch.nn.Parameter
```
# example:
param1.lr_scale = param2.lr_scale = param3.lr_scale = 0.6
param4.lr_scale = param5.lr_scale = param6.lr_scale = 0.3
param_groups = [{'params': [param1, param2, param4]},
{'params': [param3, param5, param6], 'weight_decay': 0.}]
param_groups = divide_param_groups_by_lr_scale(param_groups)
```
Outputs
-------
new_param_groups: a list of dict containing the key `lr_scale`
```
param_groups = [
{'params': [param1, param2], 'lr_scale': 0.6},
{'params': [param3], 'weight_decay': 0., 'lr_scale': 0.6}
{'params': [param4], 'lr_scale': 0.3},
{'params': [param5, param6], 'weight_decay': 0., 'lr_scale': 0.3}
]
```
"""
new_groups = []
for group in param_groups:
params = group.pop('params')
'''
divide parameters to different groups by lr_scale
'''
lr_scale_groups = dict()
for p in params:
lr_scale = getattr(p, 'lr_scale', 1.0)
# create a list if not existed
if lr_scale not in lr_scale_groups:
lr_scale_groups[lr_scale] = list()
# add the parameter with `lr_scale` into the specific group.
lr_scale_groups[lr_scale].append(p)
for lr_scale, params in lr_scale_groups.items():
# copy other parameter information like `weight_decay`
new_group = copy.copy(group)
new_group['params'] = params
new_group['lr_scale'] = lr_scale
new_groups.append(new_group)
return new_groups
def set_weight_decay(model):
skip_list = {}
skip_keywords = {}
if hasattr(model, 'no_weight_decay'):
skip_list = model.no_weight_decay()
if hasattr(model, 'no_weight_decay_keywords'):
skip_keywords = model.no_weight_decay_keywords()
has_decay = []
no_decay = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias") or (name in skip_list) or \
check_keywords_in_name(name, skip_keywords):
no_decay.append(param)
else:
has_decay.append(param)
return [{'params': has_decay},
{'params': no_decay, 'weight_decay': 0.}]
def check_keywords_in_name(name, keywords=()):
isin = False
for keyword in keywords:
if keyword in name:
isin = True
return isin
|
Cream/TinyViT/tinyvit_utils.py/0
|
{
"file_path": "Cream/TinyViT/tinyvit_utils.py",
"repo_id": "Cream",
"token_count": 2402
}
| 309 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Modules to compute the matching cost and solve the corresponding LSAP.
"""
import torch
from scipy.optimize import linear_sum_assignment
from torch import nn
from util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou
class HungarianMatcher(nn.Module):
"""This class computes an assignment between the targets and the predictions of the network
For efficiency reasons, the targets don't include the no_object. Because of this, in general,
there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
while the others are un-matched (and thus treated as non-objects).
"""
def __init__(self, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1):
"""Creates the matcher
Params:
cost_class: This is the relative weight of the classification error in the matching cost
cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost
cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost
"""
super().__init__()
self.cost_class = cost_class
self.cost_bbox = cost_bbox
self.cost_giou = cost_giou
assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0"
@torch.no_grad()
def forward(self, outputs, targets):
""" Performs the matching
Params:
outputs: This is a dict that contains at least these entries:
"pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
"pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates
targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
"labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth
objects in the target) containing the class labels
"boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates
Returns:
A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
"""
bs, num_queries = outputs["pred_logits"].shape[:2]
# We flatten to compute the cost matrices in a batch
out_prob = outputs["pred_logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes]
out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4]
# Also concat the target labels and boxes
tgt_ids = torch.cat([v["labels"] for v in targets])
tgt_bbox = torch.cat([v["boxes"] for v in targets])
# Compute the classification cost. Contrary to the loss, we don't use the NLL,
# but approximate it in 1 - proba[target class].
# The 1 is a constant that doesn't change the matching, it can be ommitted.
cost_class = -out_prob[:, tgt_ids]
# Compute the L1 cost between boxes
cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)
# Compute the giou cost betwen boxes
cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox))
# Final cost matrix
C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou
C = C.view(bs, num_queries, -1).cpu()
sizes = [len(v["boxes"]) for v in targets]
indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]
return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
def build_matcher(args):
return HungarianMatcher(cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou)
|
Cream/iRPE/DETR-with-iRPE/models/matcher.py/0
|
{
"file_path": "Cream/iRPE/DETR-with-iRPE/models/matcher.py",
"repo_id": "Cream",
"token_count": 1675
}
| 310 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Utilities for bounding box manipulation and GIoU.
"""
import torch
from torchvision.ops.boxes import box_area
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=-1)
def box_xyxy_to_cxcywh(x):
x0, y0, x1, y1 = x.unbind(-1)
b = [(x0 + x1) / 2, (y0 + y1) / 2,
(x1 - x0), (y1 - y0)]
return torch.stack(b, dim=-1)
# modified from torchvision to also return the union
def box_iou(boxes1, boxes2):
area1 = box_area(boxes1)
area2 = box_area(boxes2)
lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
wh = (rb - lt).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
union = area1[:, None] + area2 - inter
iou = inter / union
return iou, union
def generalized_box_iou(boxes1, boxes2):
"""
Generalized IoU from https://giou.stanford.edu/
The boxes should be in [x0, y0, x1, y1] format
Returns a [N, M] pairwise matrix, where N = len(boxes1)
and M = len(boxes2)
"""
# degenerate boxes gives inf / nan results
# so do an early check
assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
iou, union = box_iou(boxes1, boxes2)
lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
wh = (rb - lt).clamp(min=0) # [N,M,2]
area = wh[:, :, 0] * wh[:, :, 1]
return iou - (area - union) / area
def masks_to_boxes(masks):
"""Compute the bounding boxes around the provided masks
The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
Returns a [N, 4] tensors, with the boxes in xyxy format
"""
if masks.numel() == 0:
return torch.zeros((0, 4), device=masks.device)
h, w = masks.shape[-2:]
y = torch.arange(0, h, dtype=torch.float)
x = torch.arange(0, w, dtype=torch.float)
y, x = torch.meshgrid(y, x)
x_mask = (masks * x.unsqueeze(0))
x_max = x_mask.flatten(1).max(-1)[0]
x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
y_mask = (masks * y.unsqueeze(0))
y_max = y_mask.flatten(1).max(-1)[0]
y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
return torch.stack([x_min, y_min, x_max, y_max], 1)
|
Cream/iRPE/DETR-with-iRPE/util/box_ops.py/0
|
{
"file_path": "Cream/iRPE/DETR-with-iRPE/util/box_ops.py",
"repo_id": "Cream",
"token_count": 1193
}
| 311 |
_model_entrypoints = {}
def register_model(fn):
module_name_split = fn.__module__.split('.')
model_name = module_name_split[-1]
_model_entrypoints[model_name] = fn
return fn
def model_entrypoints(model_name):
return _model_entrypoints[model_name]
def is_model(model_name):
return model_name in _model_entrypoints
|
CvT/lib/models/registry.py/0
|
{
"file_path": "CvT/lib/models/registry.py",
"repo_id": "CvT",
"token_count": 128
}
| 312 |
VALUE_LOWER_BOUND = -1.0e100
VALUE_UPPER_BOUND = 1.0e100
MIN_POINTS = 12
|
anomalydetector/aml_component/constants.py/0
|
{
"file_path": "anomalydetector/aml_component/constants.py",
"repo_id": "anomalydetector",
"token_count": 37
}
| 313 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from archai.api.dataset_provider import DatasetProvider
__all__ = ['DatasetProvider']
|
archai/archai/api/__init__.py/0
|
{
"file_path": "archai/archai/api/__init__.py",
"repo_id": "archai",
"token_count": 48
}
| 314 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import time
import argparse
import json
import os
from typing import List, Dict
from azure.ai.ml.identity import AzureMLOnBehalfOfCredential
from azure.identity import DefaultAzureCredential
from archai.common.store import ArchaiStore
from azure.ai.ml import MLClient
class JobCompletionMonitor:
""" This helper class uses the ArchaiStore to monitor the status of some long running
training operations and the status of the Azure ML pipeline those jobs are running in
and waits for them to finish (either successfully or with a failure)"""
def __init__(self, store : ArchaiStore, ml_client : MLClient, metric_keys: List[str], pipeline_id=None, timeout=3600, throw_on_failure_rate=0.1):
"""
Initialize a JobCompletionMonitor instance.
:param store: an instance of ArchaiStore to monitor the status of some long running training operations
:param ml_client: an instance of MLClient to check the status of the Azure ML pipeline those jobs are running in
:param metric_keys: a list of column names to monitor and return from the Azure table.
:param pipeline_id: (optional) the ID of the Azure ML pipeline to monitor, if not provided we can get this from the ArchaiStore.
:param timeout: (optional) the timeout in seconds
:param throw_on_failure_rate: (optional) what ratio of jobs failed (between 0 and 1) should result in raising an an exception.
Zero means throw exception on any failure.
This is handy if you want to allow the search to continue even when a small percentage of jobs fails.
Default is 0.1, or 10% or more of jobs failed will raise an exception.
"""
self.store = store
self.ml_client = ml_client
self.timeout = timeout
self.pipeline_id = pipeline_id
self.metric_keys = metric_keys
self.throw_on_failure_rate = throw_on_failure_rate
def _check_entity_status(self, waiting, completed):
failed = 0
for i in range(len(waiting) - 1, -1, -1):
id = waiting[i]
e = self.store.get_status(id)
if self.pipeline_id is None and 'pipeline_id' in e:
self.pipeline_id = e['pipeline_id']
if e is not None and 'status' in e and (e['status'] == 'complete' or e['status'] == 'failed'):
del waiting[i]
completed[id] = e
if e['status'] == 'failed':
error = e['error']
print(f'Training job {id} failed with error: {error}')
failed += 1
else:
if len(self.metric_keys) > 0 and self.metric_keys[0] in e:
key = self.metric_keys[0]
metric = e[key]
print(f'Training job {id} completed with {key} = {metric}')
else:
print(f'Training job {id} completed')
return failed
def _get_model_results(self, model_ids, completed):
# stitch together the models.json file from our status table.
print('Top model results: ')
models = []
interesting_columns = self.metric_keys + ['status', 'error', 'epochs']
for id in model_ids:
row = {'id': id}
e = completed[id] if id in completed else {}
for key in interesting_columns:
if key in e:
row[key] = e[key]
models += [row]
return {
'models': models
}
def _cancel_waiting_list(self, waiting, pipeline_status):
# cancel any remaining jobs in the waiting list by marking an error status on the entity
for i in range(len(waiting) - 1, -1, -1):
id = waiting[i]
del waiting[i]
e = self.store.get_status(id)
if 'error' not in e:
e['error'] = f'Pipeline {pipeline_status}'
if 'status' not in e or e['status'] != 'complete':
e['status'] = pipeline_status.lower()
self.store.merge_status_entity(e)
def _get_pipeline_status(self):
# try and get the status of the Azure ML pipeline, it returns strings like
# 'Completed', 'Failed', 'Running', 'Preparing', 'Canceled' and so on.
try:
if self.pipeline_id is not None:
train_job = self.ml_client.jobs.get(self.pipeline_id)
if train_job is not None:
return train_job.status
except Exception as e:
print(f'Error getting pipeline status for pipeline {self.pipeline_id}: {e}')
def wait(self, model_ids: List[str]) -> List[Dict[str, str]]:
"""
Wait for all the training jobs to finish and return a list of dictionaries
containing details about each model, including their training validation accuracies.
:param model_ids: a list of training job IDs
:return: a list of dictionaries containing details about each model
"""
completed = {}
waiting = list(model_ids)
start = time.time()
failed = 0
while len(waiting) > 0:
failed += self._check_entity_status(waiting, completed)
if len(waiting) == 0:
break
# check the overall pipeline status just in case training jobs failed to even start.
pipeline_status = self._get_pipeline_status()
if pipeline_status is not None:
if pipeline_status == 'Completed':
# ok, all jobs are done, which means if we still have waiting tasks then they failed to
# even start.
self._cancel_waiting_list(waiting, 'failed to start')
elif pipeline_status == 'Failed' or pipeline_status == 'Canceled':
self._cancel_waiting_list(waiting, pipeline_status)
if len(waiting) > 0:
if time.time() > self.timeout + start:
break
print("AmlTrainingValAccuracy: Waiting 20 seconds for partial training to complete...")
time.sleep(20)
# awesome - they all completed!
if len(completed) == 0:
if time.time() > self.timeout + start:
raise Exception(f'Partial Training Pipeline timed out after {self.timeout} seconds')
else:
raise Exception('Partial Training Pipeline failed to start')
failure_rate = float(failed) / float(len(model_ids))
if failure_rate > self.throw_on_failure_rate:
raise Exception(f'Partial Training Pipeline failure rate {failure_rate} exceeds allowed threshold of {self.throw_on_failure_rate}')
results = self._get_model_results(model_ids, completed)
timespan = time.strftime('%H:%M:%S', time.gmtime(time.time() - start))
print(f'Training: Distributed training completed in {timespan} ')
print(f'Training: returning {len(results)} results:')
print(json.dumps(results, indent=2))
return results
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config', help='bin hexed config json info for MLClient')
parser.add_argument('--timeout', type=int, help='pipeline timeout in seconds (default 1 hour)', default=3600)
parser.add_argument('--model_path', required=True, help='mounted path containing the pending.json file')
parser.add_argument('--output', required=True, help='folder to write the results to')
parser.add_argument('--metrics', type=str, help='metrics to return from the azure table')
args = parser.parse_args()
output = args.output
timeout = args.timeout
model_path = args.model_path
metrics = []
if args.metrics:
metrics = [x.strip() for x in args.metrics.split(',')]
print(f"Monitor running with model_path={model_path}")
if not os.path.isdir(model_path):
raise Exception("### directory not found")
models_file = os.path.join(model_path, 'pending.json')
if not os.path.isfile(models_file):
raise Exception("### 'pending.json' not found in --model_path")
models = json.load(open(models_file))
model_ids = [m['id'] for m in models['models']]
identity = AzureMLOnBehalfOfCredential()
if args.config:
print("Using AzureMLOnBehalfOfCredential...")
workspace_config = str(bytes.fromhex(args.config), encoding='utf-8')
print(f"Config: {workspace_config}")
config = json.loads(workspace_config)
else:
print("Using DefaultAzureCredential...")
config_file = "../.azureml/config.json"
print(f"Config: {config_file}")
config = json.load(open(config_file, 'r'))
identity = DefaultAzureCredential()
subscription = config['subscription_id']
resource_group = config['resource_group']
workspace_name = config['workspace_name']
storage_account_key = config['storage_account_key']
storage_account_name = config['storage_account_name']
ml_client = MLClient(
identity,
subscription,
resource_group,
workspace_name
)
store = ArchaiStore(storage_account_name, storage_account_key)
monitor = JobCompletionMonitor(store, ml_client, metrics, timeout=timeout)
results = monitor.wait(model_ids)
if output is not None:
# save the results with updated validation accuracies to models.json
with open(os.path.join(output, 'models.json'), 'w') as f:
f.write(json.dumps(results, indent=2))
if __name__ == "__main__":
main()
|
archai/archai/common/monitor.py/0
|
{
"file_path": "archai/archai/common/monitor.py",
"repo_id": "archai",
"token_count": 3979
}
| 315 |
from pathlib import Path
from typing import Callable, Optional, Tuple
from overrides import overrides
import torch
import torchvision.transforms.functional as F
from torchvision.io import read_image
from archai.api.dataset_provider import DatasetProvider
from archai.common.utils import download_and_extract_zip
class FaceSyntheticsDataset(torch.utils.data.Dataset):
CLASSES = ['background', 'skin', 'nose', 'right_eye', 'left_eye', 'right_brow', 'left_brow',
'right_ear', 'left_ear', 'mouth_interior', 'top_lip', 'bottom_lip', 'neck', 'hair',
'beard', 'clothing', 'glasses', 'headwear', 'facewear']
def __init__(self, dataset_dir: str, img_size: Tuple[int, int] = (256, 256),
subset: str = 'train', val_size: int = 2000, ignore_index: int = 255,
mask_size: Optional[Tuple[int, int]] = None,
augmentation: Optional[Callable] = None):
"""Face Synthetics Dataset
Args:
dataset_dir (str): Dataset directory.
img_size (Tuple[int, int]): Image size (width, height). Defaults to (256, 256).
subset (str, optional): Subset ['train', 'test', 'validation']. Defaults to 'train'.
val_size (int, optional): Validation set size. Defaults to 2000.
mask_size (Optional[Tuple[int, int]], optional): Segmentation mask size (width, height). If `None`,
`img_size` is used. Defaults to None.
augmentation (Optional[Callable], optional): Augmentation function. Expects a callable object
with named arguments 'image' and 'mask' that returns a dictionary with 'image' and 'mask' as
keys. Defaults to None.
"""
dataset_dir = Path(dataset_dir)
assert dataset_dir.is_dir()
assert isinstance(img_size, tuple)
zip_url = "https://facesyntheticspubwedata.blob.core.windows.net/iccv-2021/dataset_100000.zip"
self.img_size = img_size
self.dataset_dir = dataset_dir
self.subset = subset
self.mask_size = mask_size
self.augmentation = augmentation
all_seg_files = [str(f) for f in sorted(self.dataset_dir.glob('*_seg.png'))]
if len(all_seg_files) < 100000:
download_and_extract_zip(zip_url, self.dataset_dir)
all_seg_files = [str(f) for f in sorted(self.dataset_dir.glob('*_seg.png'))]
train_subset, test_subset = all_seg_files[:90_000], all_seg_files[90_000:]
if subset == 'train':
self.seg_files = train_subset[:-val_size] if val_size > 0 else train_subset
elif subset == 'validation':
self.seg_files = train_subset[-val_size:] if val_size > 0 else None
elif subset == 'test':
self.seg_files = test_subset
self.img_files = [s.replace("_seg.png",".png") for s in self.seg_files]
self.ignore_index = ignore_index
def __len__(self):
return len(self.img_files)
def __getitem__(self, idx):
sample = {
'image': read_image(self.img_files[idx]),
'mask': read_image(self.seg_files[idx]).long()
}
if self.augmentation and self.subset == 'train':
sample = self.augmentation(**sample)
sample['image'] = sample['image']/255
mask_size = self.mask_size if self.mask_size else self.img_size
sample['mask'] = F.resize(
sample['mask'], mask_size[::-1],
interpolation=F.InterpolationMode.NEAREST
)
sample['image'] = F.resize(sample['image'], self.img_size[::-1])
return sample
class FaceSyntheticsDatasetProvider(DatasetProvider):
def __init__(self, dataset_dir: str):
self.dataset_dir = Path(dataset_dir)
assert self.dataset_dir.is_dir()
@overrides
def get_train_dataset(self, **kwargs) -> torch.utils.data.Dataset:
return FaceSyntheticsDataset(
self.dataset_dir, subset='train', **kwargs
)
@overrides
def get_test_dataset(self, **kwargs) -> torch.utils.data.Dataset:
return FaceSyntheticsDataset(
self.dataset_dir, subset='test', **kwargs
)
@overrides
def get_val_dataset(self, **kwargs) -> torch.utils.data.Dataset:
return FaceSyntheticsDataset(
self.dataset_dir, subset='validation', **kwargs
)
|
archai/archai/datasets/cv/face_synthetics.py/0
|
{
"file_path": "archai/archai/datasets/cv/face_synthetics.py",
"repo_id": "archai",
"token_count": 1953
}
| 316 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from __future__ import annotations
import json
import os
import pickle
import sys
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple, Union
import numpy as np
import torch
from datasets import load_dataset, load_from_disk
from datasets.dataset_dict import DatasetDict
from overrides import overrides
from transformers import AutoTokenizer, DataCollatorForLanguageModeling
from archai.api.dataset_provider import DatasetProvider
from archai.common.ordered_dict_logger import OrderedDictLogger
from archai.datasets.nlp.fast_hf_dataset_provider_utils import (
FastHfDataset,
SHMArray,
process_with_memory_map_files,
process_with_shared_memory,
xor,
)
from archai.datasets.nlp.hf_dataset_provider_utils import tokenize_concatenated_dataset
logger = OrderedDictLogger(source=__name__)
if sys.version_info.major == 3 and sys.version_info.minor >= 8:
ALLOW_SHARED_MEMORY = True
else:
logger.warn("Shared memory is not available in Python < 3.8.")
ALLOW_SHARED_MEMORY = False
class FastHfDatasetProvider(DatasetProvider):
"""Fast Hugging Face-based dataset provider."""
def __init__(
self,
train_file: str,
validation_file: str,
test_file: str,
tokenizer: Optional[AutoTokenizer] = None,
) -> None:
"""Initialize Fast Hugging Face-based dataset provider.
Args:
train_file: Path to the training array file (.npy).
validation_file: Path to the validation array file (.npy).
test_file: Path to the test array file (.npy).
tokenizer: Instance of tokenizer to use.
"""
super().__init__()
self.train_file = train_file
self.validation_file = validation_file
self.test_file = test_file
self.tokenizer = tokenizer
# Windows does not allow tests to memory map the same file
# when tests are running in parallel
self.mmap_mode = None if os.name == "nt" and os.getenv("PYTEST_CURRENT_TEST") else "r"
@staticmethod
def _create_splits(dataset_dict: DatasetDict, validation_split: float, shuffle: bool, seed: int) -> DatasetDict:
if "validation" not in dataset_dict:
logger.info("Creating validation split ...")
validation_split = validation_split or 0.1
tmp_dataset_dict = dataset_dict["train"].train_test_split(
test_size=validation_split, shuffle=shuffle, seed=seed
)
dataset_dict["train"] = tmp_dataset_dict["train"]
dataset_dict["validation"] = tmp_dataset_dict["test"]
if "test" not in dataset_dict:
logger.info("Creating test split ...")
tmp_dataset_dict = dataset_dict["validation"].train_test_split(test_size=0.25, shuffle=shuffle, seed=seed)
dataset_dict["validation"] = tmp_dataset_dict["train"]
dataset_dict["test"] = tmp_dataset_dict["test"]
return dataset_dict
@staticmethod
def _encode_dataset(
dataset_dict: DatasetDict,
tokenizer: AutoTokenizer,
mapping_fn: Callable[[Any], Dict[str, Any]],
mapping_fn_kwargs: Dict[str, Any],
mapping_column_name: List[str],
use_eos_token: bool,
dtype: np.dtype,
num_workers: int,
) -> DatasetDict:
logger.info("Encoding dataset ...")
logger.info(f"Number of workers: {num_workers} | EOS token: {use_eos_token}")
mapping_fn = mapping_fn or tokenize_concatenated_dataset
mapping_fn_kwargs = mapping_fn_kwargs or {
"tokenizer": tokenizer,
"mapping_column_name": mapping_column_name,
"use_eos_token": use_eos_token,
"dtype": dtype,
}
column_names = dataset_dict["train"].column_names
encoded_dataset_dict = dataset_dict.map(
mapping_fn,
fn_kwargs=mapping_fn_kwargs,
batched=True,
num_proc=num_workers,
remove_columns=column_names,
)
return encoded_dataset_dict
@staticmethod
def _close_mem_maps(processed_dataset_dict: DatasetDict) -> None:
for key in processed_dataset_dict:
dataset = processed_dataset_dict[key]
if isinstance(dataset, np.memmap) and dataset._mmap is not None:
dataset._mmap.close()
@staticmethod
def _process_dataset_to_memory(
dataset_dict: DatasetDict, cache_dir: str, dtype: np.dtype, num_workers: int, use_shared_memory: int
) -> Dict[str, Union[SHMArray, np.ndarray]]:
logger.info("Processing dataset to memory ...")
logger.info(f"Number of workers: {num_workers} | Shared memory: {use_shared_memory}")
if use_shared_memory:
return process_with_shared_memory(dataset_dict, dtype, num_proc=num_workers)
return process_with_memory_map_files(dataset_dict, cache_dir, dtype, num_proc=num_workers)
@staticmethod
def _save_dataset(
dataset_dict: Dict[str, Union[SHMArray, np.ndarray]],
tokenizer: AutoTokenizer,
cache_dir: str,
use_shared_memory: bool,
) -> Tuple[Path, Path, Path]:
logger.info(f"Saving dataset to: {cache_dir}")
cache_files = {}
for split, dataset in dataset_dict.items():
np.save(cache_dir / f"{split}.npy", dataset)
# If using shared memory, dataset needs to have its shared memory
# unlinked to prevent memory leak
if use_shared_memory:
dataset.shm.unlink()
# If not using shared memory, dataset needs to have its memory map
# closed to prevent an additional .bin file
if not use_shared_memory:
dataset._mmap.close()
Path(cache_dir / f"{split}.bin").unlink()
cache_files[f"{split}_file"] = cache_dir / f"{split}.npy"
with open(cache_dir / "tokenizer.pkl", "wb") as f:
pickle.dump(tokenizer, f)
return cache_files
@classmethod
def from_disk(
cls: FastHfDatasetProvider,
dataset_file_path: str,
tokenizer: Optional[AutoTokenizer] = None,
tokenizer_name: Optional[str] = None,
mapping_fn: Optional[Callable[[Any], Dict[str, Any]]] = None,
mapping_fn_kwargs: Optional[Dict[str, Any]] = None,
mapping_column_name: Optional[List[str]] = None,
validation_split: Optional[float] = 0.0,
shuffle: Optional[bool] = True,
seed: Optional[int] = 42,
num_workers: Optional[int] = 1,
use_eos_token: Optional[bool] = True,
use_shared_memory: Optional[bool] = True,
cache_dir: Optional[str] = "cache",
) -> FastHfDatasetProvider:
"""Load a dataset provider by loading and encoding data from disk.
Args:
dataset_file_path: Path to the dataset file stored in disk.
tokenizer: Instance of tokenizer to use.
tokenizer_name: Name of the tokenizer, if `tokenizer` has not been passed.
mapping_fn: A function that maps the dataset. If not provided,
the default `tokenize_concatenated_dataset` function will be used.
mapping_fn_kwargs: Keyword arguments to pass to `mapping_fn`.
mapping_column_name: The columns in the dataset to be tokenized.
If `str`, only one column will be tokenized.
If `List[str]`, multiple columns will be tokenized.
validation_split: Fraction of the dataset to use for validation.
shuffle: Whether to shuffle the dataset.
seed: Random seed.
num_workers: Number of workers to use for encoding.
use_eos_token: Whether to use EOS token to separate sequences.
use_shared_memory: Whether to use shared memory for caching.
cache_dir: Root path to the cache directory.
Returns:
Dataset provider.
"""
assert xor(tokenizer, tokenizer_name), "`tokenizer` and `tokenizer_name` are mutually exclusive."
tokenizer = tokenizer or AutoTokenizer.from_pretrained(tokenizer_name)
dtype = np.uint16 if tokenizer.vocab_size < 64 * 1024 else np.int32
use_shared_memory = use_shared_memory and ALLOW_SHARED_MEMORY
cache_dir = Path(cache_dir)
if cache_dir.is_dir():
logger.warn(f"Cache: {cache_dir} already exists and will be overritten.")
cache_dir.mkdir(parents=True, exist_ok=True)
# Ensure that loaded dataset is always a dictionary
logger.info(f"Loading dataset from: {dataset_file_path}")
disk_dataset_dict = load_from_disk(dataset_file_path)
if not isinstance(disk_dataset_dict, DatasetDict):
disk_dataset_dict = DatasetDict({"train": disk_dataset_dict})
# Ensure that `validation` and `test` splits are available
disk_dataset_dict = FastHfDatasetProvider._create_splits(disk_dataset_dict, validation_split, shuffle, seed)
encoded_dataset_dict = FastHfDatasetProvider._encode_dataset(
disk_dataset_dict,
tokenizer,
mapping_fn,
mapping_fn_kwargs,
mapping_column_name,
use_eos_token,
dtype,
num_workers,
)
processed_dataset_dict = FastHfDatasetProvider._process_dataset_to_memory(
encoded_dataset_dict, cache_dir, dtype, num_workers, use_shared_memory
)
cache_files = FastHfDatasetProvider._save_dataset(
processed_dataset_dict, tokenizer, cache_dir, use_shared_memory
)
FastHfDatasetProvider._close_mem_maps(processed_dataset_dict)
with open(cache_dir / "config.json", "w") as f:
json.dump(
{
"dataset_file_path": dataset_file_path,
"tokenizer": {
"name_or_path": tokenizer.name_or_path,
"model_max_length": None,
},
"mapping_column_name": mapping_column_name or ["text"],
"validation_split": validation_split,
"shuffle": shuffle,
"seed": seed,
"use_eos_token": use_eos_token,
},
f,
)
return FastHfDatasetProvider(**cache_files, tokenizer=tokenizer)
@classmethod
def from_hub(
cls: FastHfDatasetProvider,
dataset_name: str,
dataset_config_name: Optional[str] = None,
data_dir: Optional[str] = None,
data_files: Optional[Union[List[str], Dict[str, Union[str, List[str]]]]] = None,
tokenizer: Optional[AutoTokenizer] = None,
tokenizer_name: Optional[str] = None,
mapping_fn: Optional[Callable[[Any], Dict[str, Any]]] = None,
mapping_fn_kwargs: Optional[Dict[str, Any]] = None,
mapping_column_name: Optional[List[str]] = None,
validation_split: Optional[float] = 0.0,
shuffle: Optional[bool] = True,
seed: Optional[int] = 42,
num_workers: Optional[int] = 1,
use_eos_token: Optional[bool] = True,
use_shared_memory: Optional[bool] = True,
cache_dir: Optional[str] = "cache",
) -> FastHfDatasetProvider:
"""Load a dataset provider by downloading and encoding data from Hugging Face Hub.
Args:
dataset_name: Name of the dataset.
dataset_config_name: Name of the dataset configuration.
data_dir: Path to the data directory.
data_files: Path to the source data file(s).
tokenizer: Instance of tokenizer to use.
tokenizer_name: Name of the tokenizer, if `tokenizer` has not been passed.
mapping_fn: A function that maps the dataset. If not provided,
the default `tokenize_concatenated_dataset` function will be used.
mapping_fn_kwargs: Keyword arguments to pass to `mapping_fn`.
mapping_column_name: The columns in the dataset to be tokenized.
If `str`, only one column will be tokenized.
If `List[str]`, multiple columns will be tokenized.
validation_split: Fraction of the dataset to use for validation.
shuffle: Whether to shuffle the dataset.
seed: Random seed.
num_workers: Number of workers to use for encoding.
use_eos_token: Whether to use EOS token to separate sequences.
use_shared_memory: Whether to use shared memory for caching.
cache_dir: Root path to the cache directory.
Returns:
Dataset provider.
"""
assert xor(tokenizer, tokenizer_name), "`tokenizer` and `tokenizer_name` are mutually exclusive."
tokenizer = tokenizer or AutoTokenizer.from_pretrained(tokenizer_name)
dtype = np.uint16 if tokenizer.vocab_size < 64 * 1024 else np.int32
use_shared_memory = use_shared_memory and ALLOW_SHARED_MEMORY
cache_dir = Path(cache_dir)
if cache_dir.is_dir():
logger.warn(f"Cache: {cache_dir} already exists and will be overritten.")
cache_dir.mkdir(parents=True, exist_ok=True)
# Ensure that downloaded dataset is always a dictionary
logger.info("Downloading dataset ...")
hub_dataset_dict = load_dataset(
dataset_name, name=dataset_config_name, data_dir=data_dir, data_files=data_files
)
if not isinstance(hub_dataset_dict, DatasetDict):
hub_dataset_dict = DatasetDict({"train": hub_dataset_dict})
# Ensure that `validation` and `test` splits are available
hub_dataset_dict = FastHfDatasetProvider._create_splits(hub_dataset_dict, validation_split, shuffle, seed)
encoded_dataset_dict = FastHfDatasetProvider._encode_dataset(
hub_dataset_dict,
tokenizer,
mapping_fn,
mapping_fn_kwargs,
mapping_column_name,
use_eos_token,
dtype,
num_workers,
)
processed_dataset_dict = FastHfDatasetProvider._process_dataset_to_memory(
encoded_dataset_dict, cache_dir, dtype, num_workers, use_shared_memory
)
cache_files = FastHfDatasetProvider._save_dataset(
processed_dataset_dict, tokenizer, cache_dir, use_shared_memory
)
FastHfDatasetProvider._close_mem_maps(processed_dataset_dict)
with open(cache_dir / "config.json", "w") as f:
json.dump(
{
"dataset_name": dataset_name,
"dataset_config_name": dataset_config_name,
"data_dir": data_dir,
"data_files": data_files,
"tokenizer": {
"name_or_path": tokenizer.name_or_path,
"model_max_length": None,
},
"mapping_column_name": mapping_column_name or ["text"],
"validation_split": validation_split,
"shuffle": shuffle,
"seed": seed,
"use_eos_token": use_eos_token,
},
f,
)
return FastHfDatasetProvider(**cache_files, tokenizer=tokenizer)
@classmethod
def from_cache(cls: FastHfDatasetProvider, cache_dir: str) -> FastHfDatasetProvider:
"""Load a dataset provider from a cache directory.
Args:
cache_dir: Path to the cache directory.
Returns:
Dataset provider.
"""
logger.info(f"Loading dataset from: {cache_dir}")
cache_dir = Path(cache_dir)
cache_train_file = cache_dir / "train.npy"
cache_validation_file = cache_dir / "validation.npy"
cache_test_file = cache_dir / "test.npy"
tokenizer_file = cache_dir / "tokenizer.pkl"
try:
with open(tokenizer_file, "rb") as f:
tokenizer = pickle.load(f)
except:
logger.warn(f"Could not load tokenizer.pkl from {cache_dir}.")
tokenizer = None
return FastHfDatasetProvider(cache_train_file, cache_validation_file, cache_test_file, tokenizer=tokenizer)
@overrides
def get_train_dataset(self, seq_len: Optional[int] = 1) -> FastHfDataset:
input_ids = np.load(self.train_file, mmap_mode=self.mmap_mode)
return FastHfDataset(input_ids, seq_len=seq_len)
@overrides
def get_val_dataset(self, seq_len: Optional[int] = 1) -> FastHfDataset:
input_ids = np.load(self.validation_file, mmap_mode=self.mmap_mode)
return FastHfDataset(input_ids, seq_len=seq_len)
@overrides
def get_test_dataset(self, seq_len: Optional[int] = 1) -> FastHfDataset:
input_ids = np.load(self.test_file, mmap_mode=self.mmap_mode)
return FastHfDataset(input_ids, seq_len=seq_len)
@dataclass
class FastDataCollatorForLanguageModeling(DataCollatorForLanguageModeling):
"""Language modeling data collator compatible with FastHfDataset.
Args:
use_shifted_labels: Whether to use the original labels (shifted) or the non-shifted labels.
"""
use_shifted_labels: bool = False
def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
if isinstance(examples[0], Mapping):
return super().torch_call(examples)
batch = super().torch_call([example[0] for example in examples])
if self.use_shifted_labels:
batch["labels"] = torch.stack([example[1] for example in examples], dim=0)
return batch
|
archai/archai/datasets/nlp/fast_hf_dataset_provider.py/0
|
{
"file_path": "archai/archai/datasets/nlp/fast_hf_dataset_provider.py",
"repo_id": "archai",
"token_count": 8021
}
| 317 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.