text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
MODEL_PATH=${1}
SAVE_PATH=${2}
GPU_NUM=16
python -m torch.distributed.launch --nproc_per_node ${GPU_NUM} verifier_multi_es.py --model_path ${MODEL_PATH} --output_dir ${SAVE_PATH}
#python verifier_multi_es.py --model_path ${MODEL_PATH} --output_dir ${SAVE_PATH}
|
ContextualSP/logigan/pre-training/run_ver_es.sh/0
|
{
"file_path": "ContextualSP/logigan/pre-training/run_ver_es.sh",
"repo_id": "ContextualSP",
"token_count": 105
}
| 238 |
# coding=utf-8
import numpy as np
from collections import defaultdict
import re
from nltk.corpus import stopwords
from enum import Enum
from itertools import permutations
import re
import json
import random
from collections import OrderedDict
import pickle
# words = stopwords.words('english')
from collections import defaultdict
from functools import reduce
class Sample:
def __init__(self, query, sparql, tag):
self.query = query
self.sparql = sparql
self.tag = tag
def string():
return '\t'.join([self.query, self.sparql, self.tag])
class Helper:
def __init__(self):
self.stop_words = ["Did", "and", ",", "'s", "M0", "M1", "M2", "M3", "M4", "M5","M6", "whose", "Whose", \
"What", "did", "Was", "was", "Which", "Were", "were", "that", "M", "a"]
self.stop_words += stopwords.words('english')
self.dict = defaultdict(list)
data = open("./data/phrase_table")
for i in data:
i = eval(i.strip())[0]
self.dict[i[0]].append(i[1])
def count_var(self, lf):
value = {'?x0':2.5, '?x1':2, '?x2':1.5, '?x3':1, '?x4':0.5, '?x5':0}
a1, r, a2 = lf.split()
cn1, cn2, cn3, cn4 = 0, 0, 0, 0
if a1.startswith("?x"):
cn1 = 5
cn1 += value[a1]
if a2.startswith("?x"):
cn2 = 3
cn2 += value[a2]
if r == 'a':
cn3 = -1
return cn1 + cn2 + cn3
def term_extract(self, query, type):
## 0520
##改了新版的兼容 识别Did M
terms = []
entities = []
if query.startswith("Did M") or query.startswith("Was M") or query.startswith("Were M") or query.startswith("Was a"):
if type in ['mcd2', 'mcd3']:
nl_pattern = query.split()[0] +" " + query.split()[1]
terms.append((nl_pattern, [f'?x0#is#{query.split()[1]}'], (0, 1)))
else:
nl_pattern = query.split()[0] +" M"
terms.append((nl_pattern, ['?x0#is#M'], (0, 1)))
query = query.split()
idx = 0
####三元组
while idx < len(query):
if re.match(r'M[0-9]', query[idx]):
entities.append(( query[idx:idx+1],query[idx:idx+1] ,(idx, idx)))
idx += 1
elif idx +1 <= len(query) and ' '.join(query[idx:idx+1]) in self.dict:
terms.append((' '.join(query[idx:idx+1]), self.dict.get(' '.join(query[idx:idx+1])), (idx, idx)))
idx += 1
else:
idx +=1
## 二元组
idx = 0
while idx < len(query) - 3:
if idx +3 <= len(query) and ' '.join(query[idx:idx+3]) in self.dict:
terms.append((' '.join(query[idx:idx+3]), self.dict.get(' '.join(query[idx:idx+3])),(idx, idx+2)))
idx += 1
idx = 0
while idx < len(query) - 2:
if idx +2 <= len(query) and' '.join(query[idx:idx+2]) in self.dict:
terms.append(( ' '.join(query[idx:idx+2]), self.dict.get(' '.join(query[idx:idx+2])), (idx, idx+1)))
idx += 1
terms = sorted(terms, key = lambda s:s[2][0])
# print(query, entities, terms)
return entities, terms
pass
def fill_skeleton(self, query, skeleton, split):
## 通过query + 对齐的双语词典align的结果得到候选的candidate triples
## 候选的cndidate_triples通过给定的skeleton来过滤
## 就是?x a M, ?x nationality 以及 gender的都做区分
## v3的版本是把原始的M P ?x 换成了?x版本 无M开头的sparql
def preprocess_sparql(query):
tokens = []
for token in query:
# Replace 'ns:' prefixes.
if token.startswith('ns:'):
token = token[3:]
# Replace mid prefixes.
if token.startswith('m.'):
token = 'm_' + token[2:]
tokens.append(token)
return ' '.join(tokens)
def check_valid(skeleton_list, skeleton_pattern):
skeleton_pattern = re.sub(r'\?x[0-9]', "?x", skeleton_pattern)
# skeleton = re.sub(r'\?x[0-9]', "?x", skeleton)
for skeleton in skeleton_list:
if re.sub(r'\?x[0-9]', "?x", skeleton) not in skeleton_pattern:
return False
return True
def transform_term_to_pattern(term):
term_split = []
for i in term.split():
term_split += i.split("|||")
skeleton_list = []
term_list = []
for i in term_split:
if i.startswith("FILTER"):
continue
i = preprocess_sparql(i.split("#"))
a1, r, a2 = i.split()
if a1.startswith("?x") and a2.startswith("?x"):
## ?x P ?x
skeleton_list.append(f"{a1} P {a2}")
elif a1.startswith("?x") and a2.startswith("M"):
## ?x P M
skeleton_list.append(f"{a1} P M")
elif a1.startswith("?x") and r == "a":
## ?x a M => ?x P M
skeleton_list.append(f"{a1} a M")
else:
skeleton_list.append(f"{a1} V S")
term_list.append(i)
skeleton_str = []
return skeleton_list, ' . '.join(term_list)
entities, terms = self.term_extract(query, split)
candidate_terms = defaultdict(set)
for term in terms:
for sub_term in term[1]:
sub_pattern , sub_term = transform_term_to_pattern(sub_term)
if check_valid(sub_pattern, skeleton):
candidate_terms[" ".join(sub_pattern)].add(sub_term)
candidate_triplets = defaultdict(list)
# print("candidate_term:", candidate_terms)
for candidate_skeleton, candidate_terms in candidate_terms.items():
# a1, r, a2 = candidate_term.split("#")
for candidate_term in candidate_terms:
candidate_term = candidate_term.replace("#", " ")
if candidate_term.count("M") == 1:
if candidate_term.startswith("?x0 is M") and split in ['mcd2', 'mcd3']:
candidate_triplets[candidate_skeleton] += [candidate_term]
else:
candidate_triplets[candidate_skeleton] += [''.join(candidate_term.replace("M", entity[0][0])) for entity in entities]
elif candidate_term.count("M") == 2:
candidate_term = list(candidate_term)
index_m = candidate_term.index('M')
candidate_term[index_m] = 'W'
index_m = candidate_term.index('M')
candidate_term[index_m] = 'Y'
candidate_term = ''.join(candidate_term)
for i in permutations(entities, 2):
a1, a2 = i[0][0][0], i[1][0][0]
# print(a1, a2, candidate_term)
candidate_term_ = candidate_term.replace("W", a1)
candidate_term_ = candidate_term_.replace("Y", a2)
candidate_triplets[candidate_skeleton].append(candidate_term_)
else:
candidate_triplets[candidate_skeleton].append(candidate_term)
return candidate_triplets
def abstract_sparql_to_sketch(self,sparql):
## 20200519
## 把 M P ?x -> ?x0 is M ?x0 P M
##首先把M开头的三元组排在最前面
sparql = sparql.replace("SELECT count(*) WHERE { ", " ")
sparql = sparql.replace("SELECT DISTINCT ?x0 WHERE { ", " ")
sparql = sparql.replace("M", "?M")
sparql_list = [i.replace("?M", "M") for i in sorted(sparql.strip().split(" . "))]
Mflag = True if sparql_list[0].startswith("M") else False
FILTER_list, OTHER_list = [], []
skeleton_list = []
for item in sparql_list:
if item.startswith("FILTER"):
FILTER_list.append(item)
continue
OTHER_list.append(item)
a1, r, a2 = item.strip().split()
# print(a1, r, a2)
if a1.startswith("?x") and a2.startswith("?x"):
skeleton_list.append(f"{a1} P {a2}")
elif a1.startswith("?x") and a2.startswith("M"):
skeleton_list.append(f"{a1} P M")
elif a2.startswith("?x") and a1.startswith("M"):
skeleton_list.append("?xx P M")
skeleton_list.append(f"?xx P {a2}")
elif a1.startswith("M") and a2.startswith("M"):
##这里其实是?x0 is M . ?x0 P M
##合并一下就是?x0 P M
skeleton_list.append(f"?xx P M")
elif a1.startswith("?x") and r == "a":
skeleton_list.append(f"{a1} a M")
elif a1.startswith("M") and r == "a":
skeleton_list.append("?xx P M")
skeleton_list.append(f"?xx a M")
elif re.match(r'M[0-9]', a1):
skeleton_list.append("?xx P M")
skeleton_list.append(f"?xx V S")
else:
skeleton_list.append(f"{a1} V S")
skeleton_set = list(set(skeleton_list))
skeleton_set.sort()
skeleton_str = []
OTHER_list.sort()
sparql = ' . '.join(OTHER_list+FILTER_list)
if Mflag:
for token in " . ".join(skeleton_set).split():
if token.startswith("?x") and token !='?xx':
token = token[:2]+str(int(token[-1])+1)
skeleton_str.append(token)
skeleton_str = " ".join(skeleton_str).replace("?xx", "?x0").split(" . ")
skeleton_str.sort()
return sparql, " . ".join(skeleton_str)
else:
return sparql, " . ".join(skeleton_set)
def generate_traversal_path(self, sparql):
def trans_tuple_str(tuple_list):
t_all = tuple_list[0]
for i in range(1, len(tuple_list)):
if isinstance(tuple_list[i], tuple):
t_all += tuple_list[i]
else:
return False
return ' '.join(t_all)
results, triples, FILTER_triples = [], [], []
for clf in sparql.split(" . "):
if clf.startswith("FILTER"):
continue
elif len(clf.split()) != 3:
continue
a1, r, a2 = clf.split()
var_cnt = self.count_var(clf)
triples.append((a1, r, a2, var_cnt))
split_dict = defaultdict(list)
sorted_triples = sorted(triples, key=lambda k: k[-1])
##划分方法
for triple in sorted_triples:
if isinstance(triple, tuple) and len(triple) == 4:
arg1, rel, arg2, _ = triple
triple = (arg1, rel, arg2)
## 对于两个变量的三元组
## 把他们尽可能的插入之前已有的三元组中
## [?x0 ?x1] [?x1, ?x2] [?x2, ?x3]
if arg1.startswith('?x') and arg2.startswith('?x'):
##对于链式 的特定修正!!!
## 每次需要更新他们匹配的组
arg_max = arg1 if arg1 > arg2 else arg2
arg_min = arg2 if arg1 > arg2 else arg1
if len(split_dict[arg_max]) > 0:
for cur_list in split_dict[arg_max]:
cur_list_ = cur_list[:]
cur_list_.insert(0, triple)
split_dict[arg_min].append(cur_list_)
else:
split_dict[arg_max].append([triple])
## 如果只有一个变量
## 看能不能为之前添加的做补充
## 形如(?x, r, M)为之前(M, r, ?x)的做补充
elif arg1.startswith('?x') and not arg1.startswith("?x0"):
flag = True
for t in split_dict[arg1]:
if t[0][0] != arg1:
t.append(triple)
flag = False
if flag:
split_dict[arg1].append([triple])
# print("h:",split_dict)
##都没有 为该变量的第一个三元组关系
elif (arg1.startswith("M") and arg2.startswith("?x") and not arg2.startswith("?x0")):
flag =True
for t in split_dict[arg2]:
t.append(triple)
flag = False
if flag:
split_dict[arg2].append([triple])
else:
variable = arg2 if arg2.startswith("?x") else arg1
split_dict[variable].append([triple])
else:
split_dict[triple] = [triple]
final_split = []
for v in split_dict.values():
for vv in v:
vv_len = len(vv)
xidx, xflag = 0, False
for idx in range(vv_len):
vv[idx] = ' '.join(vv[idx])
if not xflag and vv[idx].startswith("?x"):
xidx, xflag = idx, True
vv = ' . '.join(vv[:xidx] + sorted(list(set(vv[xidx:vv_len]))) + vv[vv_len:])
if not (vv.startswith('?x') and int(vv[2])> 0):
##去掉不合法的?x
final_split.append(vv)
return final_split
def distribute_triples_to_skeleton(self, skeleton_groups, candidate_triplets):
fn = lambda x, code=',': reduce(lambda x, y: [str(i)+code+str(j) for i in x for j in y], x)
ans = []
def replace_variable(pattern, candidates):
a1, _, a2 = pattern.split()
modify_candidates = []
for idx, candidate in enumerate(candidates):
a1_c, r_c, a2_c = candidate.split()
a1_c = a1 if a1_c == "?x" else a1_c
a2_c = a2 if a2_c == "?x" else a2_c
modify_candidates.append(' '.join([a1_c, r_c, a2_c]))
# print("modify:", modify_candidates)
return modify_candidates
for skeleton_group in skeleton_groups:
skeleton_group = skeleton_group.split(" . ")
if len(skeleton_group) == 1:
if skeleton_group[0] in candidate_triplets:
ans += candidate_triplets.get(skeleton_group[0])
temp_candidates = candidate_triplets.get(re.sub(r'\?x[0-9]', '?x', skeleton_group[0]), [])
ans += replace_variable(skeleton_group[0], temp_candidates)
else:
triples_groups = [replace_variable(skeleton_item, candidate_triplets.get(re.sub(r'\?x[0-9]', '?x', skeleton_item), [])) for skeleton_item in skeleton_group]
ans += fn(triples_groups, ' . ')
return ans
def generate_samples(self, query, sparql, triples, type):
pos_ans, neg_ans = [], []
valid_cnt = len([i for i in sparql.split(" . ") if not i.startswith("FILTER")])
coverage_sparql = set()
for triple_group in triples:
flag = True
for triple in triple_group.split(" . "):
# print(triple)
if triple not in sparql:
flag = False
continue
else:
coverage_sparql.add(triple)
# print(flag, triple_group)
if flag:
pos_ans.append(Sample(query, triple_group, flag).__dict__)
# print(triple_group)
else:
neg_ans.append(Sample(query, triple_group, flag).__dict__)
coverage = True if len(coverage_sparql) == valid_cnt else False
if type == "train":
return coverage, pos_ans + random.sample(neg_ans, min(len(neg_ans), len(pos_ans)))
else:
return coverage, pos_ans+neg_ans
def mask(self, query, sparqls):
## return (orignal query, sparql), (masked query,sparql)
entities = re.findall(r"M[0-9]",query)
mask_query, mask_sparqls = query, []
if len(entities) <=1:
return (query, sparqls), (query, sparqls, dict())
else:
stack_tokens = []
entity_tokens = []
stack_state = False
mask_mapping = dict()
token_mapping = dict()
query_tokens = query.split()
for idx in range(len(query_tokens)):
token = query_tokens[idx]
if token.startswith("M") and (idx + 1 == len(query_tokens) or (idx+1 < len(query_tokens) and query_tokens[idx+1]!="'")):
stack_tokens.append(token)
entity_tokens.append(token)
stack_state = True
elif stack_state and (token == "," or token == "and"):
stack_tokens.append(token)
else:
if len(entity_tokens) > 1:
if stack_tokens[-1] == 'and' or stack_tokens[-1] == ',':
stack_tokens = stack_tokens[:-1]
mask_mapping[' '.join(stack_tokens)] = entity_tokens[0]
token_mapping[entity_tokens[0]] = entity_tokens[1:]
stack_tokens, stack_state, entity_tokens = [], False, []
if len(entity_tokens) > 1:
if stack_tokens[-1] == 'and' or stack_tokens[-1] == ',':
stack_tokens = stack_tokens[:-1]
mask_mapping[' '.join(stack_tokens)] = entity_tokens[0]
token_mapping[entity_tokens[0]] = entity_tokens[1:]
for key, v in mask_mapping.items():
mask_query = mask_query.replace(key, v)
if len(mask_mapping) == 0:
return (query, sparqls), (query, sparqls, dict())
for sparql_info in sparqls:
flag = True
for key, v in token_mapping.items():
for vv in v:
if vv in re.findall(r"M[0-9]",sparql_info[0]) :
flag = False
if flag:
mask_sparqls.append(sparql_info)
assert len(mask_sparqls) <= len(sparqls), print("mask mapping", mask_mapping,token_mapping, "\n",(query, sparqls),"\n", (mask_query, mask_sparqls, token_mapping))
return (query, sparqls), (mask_query, mask_sparqls, token_mapping)
if __name__ == '__main__':
helper = Helper()
for split in ["mcd1","mcd2","mcd3"]:
word_dict =[word.strip() for word in open(f"./data/{split}/vocab.cfq.tokens").readlines()]
src_vocab, sketch_vocab = set(), set()
for type in ['test']:
src_data = open(f'./data/{split}/{type}/{type}_encode.txt')
tgt_data = open(f'./data/{split}/{type}/{type}_decode.txt')
sketch_data = open(f'./output/{split}-sketch-output')
tgt_list, poset_sketch_list, data_samples = [], [], []
mapping_classification_data = defaultdict(list)
for src, trg, sketch in zip(src_data, tgt_data, sketch_data):
src, trg, sketch = src.strip(), trg.strip(), sketch.strip()
trg = re.findall(r'[{](.*?)[}]', trg)[0].strip()
## abstract sparql to sketch
poset_abstract_sketch = helper.generate_traversal_path(sketch)
#### primitive prediction
candidate_triplets = helper.fill_skeleton(src, sketch, split)
final_triplets = helper.distribute_triples_to_skeleton(poset_abstract_sketch, candidate_triplets)
_, samples = helper.generate_samples(src, trg, final_triplets, type)
data_samples += samples
json.dump(data_samples, open(f"./data/{split}/{type}/{type}_predict_classification.json", "w"))
mask_sample_info = [f"sentence1\tsentence2\tgold_label"]
mask_full_info = [f"ori_sentence1\tsentence1\tsentence2\tgold_label\tmapping_entities"]
for idx, item in enumerate(data_samples):
query, sparql, tag = item.get('query'), item.get('sparql'), item.get('tag')
mapping_classification_data[query].append((sparql, tag))
for key, v in mapping_classification_data.items():
query_info, mask_info = helper.mask(key, v)
for vv in mask_info[1]:
mask_sample_info.append(f"{mask_info[0]}\t{vv[0]}\t{vv[1]}")
mask_full_info.append(f"{key}\t{mask_info[0]}\t{vv[0]}\t{vv[1]}\t{mask_info[-1]}")
open(f"./data/{split}/{type}/{type}_mask_predict_classification.csv", "w").write("\n".join(mask_sample_info))
open(f"./data/{split}/{type}/{type}_mask_predict_mapping.csv", "w").write("\n".join(mask_full_info))
|
ContextualSP/poset_decoding/preprocess_hierarchical_inference.py/0
|
{
"file_path": "ContextualSP/poset_decoding/preprocess_hierarchical_inference.py",
"repo_id": "ContextualSP",
"token_count": 8130
}
| 239 |
Contributing to MatchZoo-py
----------
> Note: MatchZoo-py is developed under Python 3.6.
Welcome! MatchZoo-py is a community project that aims to work for a wide range of NLP and IR tasks such as Question Answering, Information Retrieval, Paraphrase identification etc. Your experience and what you can contribute are important to the project's success.
Discussion
----------
If you've run into behavior in MatchZoo-py you don't understand, or you're having trouble working out a good way to apply it to your code, or you've found a bug or would like a feature it doesn't have, we want to hear from you!
Our main forum for discussion is the project's [GitHub issue tracker](https://github.com/NTMC-Community/MatchZoo-py/issues). This is the right place to start a discussion of any of the above or most any other topic concerning the project.
For less formal discussion we have a chat room on WeChat (mostly Chinese speakers). MatchZoo-py core developers are almost always present; feel free to find us there and we're happy to chat. Please add *YQ-Cai1198593462* as your WeChat friend, she will invite you to join the chat room.
First Time Contributors
-----------------------
MatchZoo-py appreciates your contribution! If you are interested in helping improve MatchZoo-py, there are several ways to get started:
* Work on [new models](https://github.com/NTMC-Community/awaresome-neural-models-for-semantic-match).
* Work on [tutorials](https://github.com/NTMC-Community/MatchZoo-py/tree/master/tutorials).
* Work on [documentation](https://github.com/NTMC-Community/MatchZoo-py/tree/master/docs).
* Try to answer questions on [the issue tracker](https://github.com/NTMC-Community/MatchZoo-py/issues).
Submitting Changes
------------------
Even more excellent than a good bug report is a fix for a bug, or the implementation of a much-needed new model.
(*) We'd love to have your contributions.
(*) If your new feature will be a lot of work, we recommend talking to us early -- see below.
We use the usual GitHub pull-request flow, which may be familiar to you if you've contributed to other projects on GitHub -- see below.
Anyone interested in MatchZoo-py may review your code. One of the MatchZoo-py core developers will merge your pull request when they think it's ready.
For every pull request, we aim to promptly either merge it or say why it's not yet ready; if you go a few days without a reply, please feel
free to ping the thread by adding a new comment.
For a list of MatchZoo-py core developers, see [README](https://github.com/NTMC-Community/MatchZoo-py/blob/master/README.md).
Contributing Flow
------------------
1. Fork the latest version of [MatchZoo-py](https://github.com/NTMC-Community/MatchZoo-py) into your repo.
2. Create an issue under [NTMC-Community/MatchZoo-py](https://github.com/NTMC-Community/MatchZoo-py/issues), write description about the bug/enhancement.
3. Clone your forked MatchZoo into your machine, add your changes together with associated tests.
4. Run `make push` with terminal, ensure all unit tests & integration tests passed on your computer.
5. Push to your forked repo, then send the pull request to the official repo. In pull request, you need to create a link to the issue you created using `#[issue_id]`, and describe what has been changed.
6. Wait [continuous integration](https://travis-ci.org/NTMC-Community/MatchZoo-py) passed.
7. Wait [Codecov](https://codecov.io/gh/NTMC-Community/MatchZoo-py) generate the coverage report.
8. We'll assign reviewers to review your code.
Your PR will be merged if:
- Funcitonally benefit for the project.
- Passed Countinuous Integration (all unit tests, integration tests and [PEP8](https://www.python.org/dev/peps/pep-0008/) check passed).
- Test coverage didn't decreased, we use [pytest](https://docs.pytest.org/en/latest/).
- With proper docstrings, see codebase as examples.
- With type hints, see [typing](https://docs.python.org/3/library/typing.html).
- All reviewers approved your changes.
**Thanks and let's improve MatchZoo-py together!**
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/CONTRIBUTING.md/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/CONTRIBUTING.md",
"repo_id": "ContextualSP",
"token_count": 1136
}
| 240 |
from .preparer import Preparer
from .prepare import prepare
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/auto/preparer/__init__.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/auto/preparer/__init__.py",
"repo_id": "ContextualSP",
"token_count": 15
}
| 241 |
import matchzoo as mz
from matchzoo.dataloader import DataLoader
class DataLoaderBuilder(object):
"""
DataLoader Bulider. In essense a wrapped partial function.
Example:
>>> import matchzoo as mz
>>> padding_callback = mz.dataloader.callbacks.BasicPadding()
>>> builder = mz.dataloader.DataLoaderBuilder(
... stage='train', callback=padding_callback
... )
>>> data_pack = mz.datasets.toy.load_data()
>>> preprocessor = mz.preprocessors.BasicPreprocessor()
>>> data_processed = preprocessor.fit_transform(data_pack)
>>> dataset = mz.dataloader.Dataset(data_processed, mode='point')
>>> dataloder = builder.build(dataset)
>>> type(dataloder)
<class 'matchzoo.dataloader.dataloader.DataLoader'>
"""
def __init__(self, **kwargs):
"""Init."""
self._kwargs = kwargs
def build(self, dataset, **kwargs) -> DataLoader:
"""
Build a DataLoader.
:param dataset: Dataset to build upon.
:param kwargs: Additional keyword arguments to override the keyword
arguments passed in `__init__`.
"""
return mz.dataloader.DataLoader(
dataset, **{**self._kwargs, **kwargs}
)
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/dataloader/dataloader_builder.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/dataloader/dataloader_builder.py",
"repo_id": "ContextualSP",
"token_count": 541
}
| 242 |
from .load_data import load_data
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/datasets/snli/__init__.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/datasets/snli/__init__.py",
"repo_id": "ContextualSP",
"token_count": 10
}
| 243 |
"""Base task."""
import typing
import abc
import torch
from torch import nn
from matchzoo.engine import base_metric
from matchzoo.utils import parse_metric, parse_loss
class BaseTask(abc.ABC):
"""Base Task, shouldn't be used directly."""
TYPE = 'base'
def __init__(self, losses=None, metrics=None):
"""
Base task constructor.
:param losses: Losses of task.
:param metrics: Metrics for evaluating.
"""
self._losses = self._convert(losses, parse_loss)
self._metrics = self._convert(metrics, parse_metric)
self._assure_losses()
self._assure_metrics()
def _convert(self, identifiers, parse):
if not identifiers:
identifiers = []
elif not isinstance(identifiers, list):
identifiers = [identifiers]
return [
parse(identifier, self.__class__.TYPE)
for identifier in identifiers
]
def _assure_losses(self):
if not self._losses:
first_available = self.list_available_losses()[0]
self._losses = self._convert(first_available, parse_loss)
def _assure_metrics(self):
if not self._metrics:
first_available = self.list_available_metrics()[0]
self._metrics = self._convert(first_available, parse_metric)
@property
def losses(self):
""":return: Losses used in the task."""
return self._losses
@property
def metrics(self):
""":return: Metrics used in the task."""
return self._metrics
@losses.setter
def losses(
self,
new_losses: typing.Union[
typing.List[str],
typing.List[nn.Module],
str,
nn.Module
]
):
self._losses = self._convert(new_losses, parse_loss)
@metrics.setter
def metrics(
self,
new_metrics: typing.Union[
typing.List[str],
typing.List[base_metric.BaseMetric],
str,
base_metric.BaseMetric
]
):
self._metrics = self._convert(new_metrics, parse_metric)
@classmethod
@abc.abstractmethod
def list_available_losses(cls) -> list:
""":return: a list of available losses."""
@classmethod
@abc.abstractmethod
def list_available_metrics(cls) -> list:
""":return: a list of available metrics."""
@property
@abc.abstractmethod
def output_shape(self) -> tuple:
""":return: output shape of a single sample of the task."""
@property
@abc.abstractmethod
def output_dtype(self):
""":return: output data type for specific task."""
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/engine/base_task.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/engine/base_task.py",
"repo_id": "ContextualSP",
"token_count": 1188
}
| 244 |
"""Matching Tensor module."""
import typing
import torch
import torch.nn as nn
import torch.nn.functional as F
class MatchingTensor(nn.Module):
"""
Module that captures the basic interactions between two tensors.
:param matching_dims: Word dimension of two interaction texts.
:param channels: Number of word interaction tensor channels.
:param normalize: Whether to L2-normalize samples along the
dot product axis before taking the dot product.
If set to True, then the output of the dot product
is the cosine proximity between the two samples.
:param init_diag: Whether to initialize the diagonal elements
of the matrix.
Examples:
>>> import matchzoo as mz
>>> matching_dim = 5
>>> matching_tensor = mz.modules.MatchingTensor(
... matching_dim,
... channels=4,
... normalize=True,
... init_diag=True
... )
"""
def __init__(
self,
matching_dim: int,
channels: int = 4,
normalize: bool = True,
init_diag: bool = True
):
""":class:`MatchingTensor` constructor."""
super().__init__()
self._matching_dim = matching_dim
self._channels = channels
self._normalize = normalize
self._init_diag = init_diag
self.interaction_matrix = torch.empty(
self._channels, self._matching_dim, self._matching_dim
)
if self._init_diag:
self.interaction_matrix = self.interaction_matrix.uniform_(-0.05, 0.05)
for channel_index in range(self._channels):
self.interaction_matrix[channel_index].fill_diagonal_(0.1)
self.interaction_matrix = nn.Parameter(self.interaction_matrix)
else:
self.interaction_matrix = nn.Parameter(self.interaction_matrix.uniform_())
def forward(self, x, y):
"""
The computation logic of MatchingTensor.
:param inputs: two input tensors.
"""
if self._normalize:
x = F.normalize(x, p=2, dim=-1)
y = F.normalize(y, p=2, dim=-1)
# output = [b, c, l, r]
output = torch.einsum(
'bld,cde,bre->bclr',
x, self.interaction_matrix, y
)
return output
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/modules/matching_tensor.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/modules/matching_tensor.py",
"repo_id": "ContextualSP",
"token_count": 1024
}
| 245 |
import nltk
from .unit import Unit
class Lemmatization(Unit):
"""Process unit for token lemmatization."""
def transform(self, input_: list) -> list:
"""
Lemmatization a sequence of tokens.
:param input_: list of tokens to be lemmatized.
:return tokens: list of lemmatizd tokens.
"""
lemmatizer = nltk.WordNetLemmatizer()
return [lemmatizer.lemmatize(token, pos='v') for token in input_]
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/units/lemmatization.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/units/lemmatization.py",
"repo_id": "ContextualSP",
"token_count": 187
}
| 246 |
"""Ranking task."""
from matchzoo.engine import base_task
class Ranking(base_task.BaseTask):
"""Ranking Task.
Examples:
>>> ranking_task = Ranking()
>>> ranking_task.metrics = ['map', 'ndcg']
>>> ranking_task.output_shape
(1,)
>>> ranking_task.output_dtype
<class 'float'>
>>> print(ranking_task)
Ranking Task
"""
TYPE = 'ranking'
@classmethod
def list_available_losses(cls) -> list:
""":return: a list of available losses."""
return ['mse']
@classmethod
def list_available_metrics(cls) -> list:
""":return: a list of available metrics."""
return ['map']
@property
def output_shape(self) -> tuple:
""":return: output shape of a single sample of the task."""
return 1,
@property
def output_dtype(self):
""":return: target data type, expect `float` as output."""
return float
def __str__(self):
""":return: Task name as string."""
return 'Ranking Task'
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/tasks/ranking.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/tasks/ranking.py",
"repo_id": "ContextualSP",
"token_count": 443
}
| 247 |
import os
import shutil
from pathlib import Path
import matchzoo
from matchzoo import utils
from matchzoo.engine.base_model import BaseModel
def test_timer():
timer = utils.Timer()
start = timer.time
timer.stop()
assert timer.time
timer.resume()
assert timer.time > start
def test_list_recursive_subclasses():
assert utils.list_recursive_concrete_subclasses(
BaseModel
)
def test_average_meter():
am = utils.AverageMeter()
am.update(1)
assert am.avg == 1.0
am.update(val=2.5, n=2)
assert am.avg == 2.0
def test_early_stopping():
es = utils.EarlyStopping(
patience=1,
key='key',
)
result = {'key': 1.0}
es.update(result)
assert es.should_stop_early is False
es.update(result)
assert es.should_stop_early is True
state = es.state_dict()
new_es = utils.EarlyStopping()
assert new_es.should_stop_early is False
new_es.load_state_dict(state)
assert new_es.best_so_far == 1.0
assert new_es.is_best_so_far is False
assert new_es.should_stop_early is True
def test_get_file():
_url = "https://raw.githubusercontent.com/NTMC-Community/" \
"MatchZoo-py/master/LICENSE"
file_path = utils.get_file(
'LICENSE', _url, extract=True,
cache_dir=matchzoo.USER_DATA_DIR,
cache_subdir='LICENSE',
verbose=1
)
num_lines = 203
assert len(open(file_path, 'rb').readlines()) == num_lines
file_hash = utils._hash_file(file_path, algorithm='md5')
file_path2 = utils.get_file(
'LICENSE', _url, extract=False,
md5_hash=file_hash,
cache_dir=matchzoo.USER_DATA_DIR,
cache_subdir='LICENSE',
verbose=1
)
file_hash2 = utils._hash_file(file_path2, algorithm='md5')
assert file_hash == file_hash2
file_dir = matchzoo.USER_DATA_DIR.joinpath('LICENSE')
if os.path.exists(file_dir):
shutil.rmtree(file_dir)
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tests/test_utils.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tests/test_utils.py",
"repo_id": "ContextualSP",
"token_count": 862
}
| 248 |
#!/usr/bin/env bash
export model_file=checkpoints_sparc/sparc_concat_none_model
export validation_file=dataset_sparc/dev.json
export validation_out_file=dataset_sparc/dev.jsonl
export prediction_out_file=predict.jsonl
python postprocess.py --valid_file ${validation_file} --valid_out_file ${validation_out_file}
allennlp predict \
--include-package dataset_reader.sparc_reader \
--include-package models.sparc_parser \
--include-package predictor.sparc_predictor \
--predictor sparc \
--dataset-reader-choice validation \
--batch-size 1 \
--cuda-device 0 \
--output-file ${model_file}/${prediction_out_file} \
${model_file}/model.tar.gz ${validation_out_file}
|
ContextualSP/semantic_parsing_in_context/bash_files/linux/predict.bash/0
|
{
"file_path": "ContextualSP/semantic_parsing_in_context/bash_files/linux/predict.bash",
"repo_id": "ContextualSP",
"token_count": 234
}
| 249 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
from typing import List, Union, Optional
from context.db_context import SparcDBContext
from context.copy_production_rule_field import CopyProductionRule
from typing import Dict
import copy
from context.grammar import A, C, T, Keywords, Statement
from constant import SpecialSymbol
logger = logging.getLogger(__name__)
class ConditionStatelet:
"""
This class is designed to bring more SQL related common sense into current decoding phase. The main principle is:
1. The accompanying Column and Table should be consistent.
2. The same column cannot be repeated under the Select -> A A and so on. (FIXME: we do not support this now)
"""
def __init__(self,
possible_actions: List[CopyProductionRule],
db_context: SparcDBContext,
enable_prune: bool = True):
self.possible_actions = [action[0] for action in possible_actions]
self.action_history = []
self.valid_tables = self._get_valid_tables(db_context)
self.current_stack: List[Union[str, List[str]]] = []
self.used_terminals = []
self.parent_path = []
self.enable_prune = enable_prune
@staticmethod
def _get_valid_tables(db_context: Optional[SparcDBContext]) -> Dict[str, List[str]]:
col_valid_tables = {}
if db_context is not None:
for entity_name in db_context.knowledge_graph.neighbors_with_table:
# record column to table
entity_parts = entity_name.split(':')
if entity_parts[0] == 'column':
col_name = entity_parts[-1]
tab_name = entity_parts[-2]
# if not
if col_name not in col_valid_tables:
col_valid_tables[col_name] = []
col_valid_tables[col_name].append(tab_name)
return col_valid_tables
else:
return {}
def take_action(self, production_rule: str) -> 'ConditionStatelet':
if not self.enable_prune:
return self
# the action to copy is actually correct
special_str = SpecialSymbol.copy_delimiter
# larger than 1 action is segment copy
if special_str in production_rule and production_rule.count(special_str) >= 2:
return self
elif special_str in production_rule:
production_rule = production_rule.replace(special_str, '')
# clean stack
new_sql_state = copy.deepcopy(self)
lhs, rhs = production_rule.split(' -> ')
# append current production rule
new_sql_state.action_history.append(production_rule)
new_sql_state.current_stack.append([lhs, []])
if lhs not in [C.__name__, T.__name__]:
rhs_tokens = rhs.split(' ')
else:
# default terminal not append into current stack
# record when lhs equal to C.__name__
parent_path = [new_sql_state.current_stack[i][0] for i in range(len(new_sql_state.current_stack))]
# record parent path
new_sql_state.parent_path = copy.deepcopy(parent_path)
parent_path.append(rhs)
new_sql_state.used_terminals.append(':'.join(parent_path))
rhs_tokens = []
for token in rhs_tokens:
is_terminal = token in Keywords
if not is_terminal:
new_sql_state.current_stack[-1][1].append(token)
while len(new_sql_state.current_stack) > 0 and \
len(new_sql_state.current_stack[-1][1]) == 0:
finished_item = new_sql_state.current_stack[-1][0]
del new_sql_state.current_stack[-1]
if finished_item == Statement.__name__:
break
# pop the non-terminals
if new_sql_state.current_stack[-1][1][0] == finished_item:
new_sql_state.current_stack[-1][1] = new_sql_state.current_stack[-1][1][1:]
# append current stack
return new_sql_state
def get_valid_actions(self, valid_actions: dict):
if not self.enable_prune:
return valid_actions
current_clause = self._get_current_clause()
# used terminals to avoid repeated role, specially for Select -> A A A ...
valid_actions_ids = []
for key, items in valid_actions.items():
valid_actions_ids += [(key, rule_id) for rule_id in valid_actions[key][2]]
valid_actions_rules = [self.possible_actions[rule_id] for rule_type, rule_id in valid_actions_ids]
# k is the group index
actions_to_remove = {k: set() for k in valid_actions.keys()}
# if not None
if current_clause:
# repeat constraints
for rule_id, rule in zip(valid_actions_ids, valid_actions_rules):
rule_type, rule_id = rule_id
# rhs is the key for querying
lhs, rhs = rule.split(' -> ')
# C, T should be in the same table
if lhs == T.__name__:
# take the rhs
column_name = self.action_history[-1].split(' -> ')[1]
# column name is *, no limited tables
if column_name == '*':
continue
assert column_name in self.valid_tables
valid_table_name = self.valid_tables[column_name]
if rhs not in valid_table_name:
actions_to_remove[rule_type].add(rule_id)
unique_key = ':'.join(self.parent_path) + lhs + ':' + rhs
# repeated column/table
if unique_key in self.used_terminals:
actions_to_remove[rule_type].add(rule_id)
# now we only prevent linked rules
new_valid_actions = {}
new_global_actions = self._remove_actions(valid_actions, 'global',
actions_to_remove['global']) if 'global' in valid_actions else None
new_linked_actions = self._remove_actions(valid_actions, 'linked',
actions_to_remove['linked']) if 'linked' in valid_actions else None
if new_linked_actions is not None:
new_valid_actions['linked'] = new_linked_actions
if new_global_actions is not None:
new_valid_actions['global'] = new_global_actions
for key in valid_actions.keys():
if key == 'copy_seg' or key == 'copy_token':
new_valid_actions[key] = valid_actions[key]
return new_valid_actions
def _get_current_clause(self):
relevant_clauses = [
A.__name__
]
for rule in self.current_stack[::-1]:
# the first nonterminal which should be parsed
if rule[0] in relevant_clauses:
return rule[0]
return None
@staticmethod
def _remove_actions(valid_actions, key, ids_to_remove):
if len(ids_to_remove) == 0:
return valid_actions[key]
if len(ids_to_remove) == len(valid_actions[key][2]):
return None
current_ids = valid_actions[key][2]
keep_ids = []
keep_ids_loc = []
for loc, rule_id in enumerate(current_ids):
if rule_id not in ids_to_remove:
keep_ids.append(rule_id)
keep_ids_loc.append(loc)
items = list(valid_actions[key])
items[0] = items[0][keep_ids_loc]
items[1] = items[1][keep_ids_loc]
items[2] = keep_ids
if len(items) >= 4:
items[3] = items[3][keep_ids_loc]
return tuple(items)
|
ContextualSP/semantic_parsing_in_context/models/states_machine/condition_state_let.py/0
|
{
"file_path": "ContextualSP/semantic_parsing_in_context/models/states_machine/condition_state_let.py",
"repo_id": "ContextualSP",
"token_count": 3650
}
| 250 |
# Copyright (c) Facebook, Inc. and Microsoft Corporation.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Dict, List
import torch
from genre.utils import chunk_it
from transformers import BartForConditionalGeneration, BartTokenizer
logger = logging.getLogger(__name__)
class GENREHubInterface(BartForConditionalGeneration):
def sample(
self, sentences: List[str], num_beams: int = 5, num_return_sequences=5, **kwargs
) -> List[str]:
input_args = {
k: v.to(self.device)
for k, v in self.tokenizer.batch_encode_plus(
sentences, padding=True, return_tensors="pt"
).items()
}
outputs = self.generate(
**input_args,
min_length=0,
max_length=1024,
num_beams=num_beams,
num_return_sequences=num_return_sequences,
output_scores=True,
return_dict_in_generate=True,
**kwargs
)
return chunk_it(
[
{
"text": text,
"logprob": score,
}
for text, score in zip(
self.tokenizer.batch_decode(
outputs.sequences, skip_special_tokens=True
),
outputs.sequences_scores,
)
],
num_return_sequences,
)
def encode(self, sentence):
return self.tokenizer.encode(sentence, return_tensors="pt")[0]
class GENRE(BartForConditionalGeneration):
@classmethod
def from_pretrained(cls, model_name_or_path):
model = GENREHubInterface.from_pretrained(model_name_or_path)
model.tokenizer = BartTokenizer.from_pretrained(model_name_or_path)
return model
|
ContextualSP/unified_parser_text_to_sql/genre/hf_model.py/0
|
{
"file_path": "ContextualSP/unified_parser_text_to_sql/genre/hf_model.py",
"repo_id": "ContextualSP",
"token_count": 936
}
| 251 |
"""
Based on https://github.com/ryanzhumich/editsql/blob/master/preprocess.py
"""
import argparse
import json
import os
import re
import stanza
import sqlparse
from tqdm import tqdm
from semparse.contexts.spider_db_context import SpiderDBContext
from semparse.sql.spider_utils import disambiguate_items, fix_number_value
from semparse.sql.spider_utils import read_dataset_schema
keyword = ['select', 'distinct', 'from', 'join', 'on', 'where', 'group', 'by', 'order', 'asc', 'desc', 'limit',
'having',
'and', 'not', 'or', 'like', 'between', 'in',
'sum', 'count', 'max', 'min', 'avg',
'(', ')', ',', '>', '<', '=', '==', '>=', '!=', '<=',
'union', 'except', 'intersect',
'\'value\'']
stanza.download('en')
stanza_model = stanza.Pipeline(lang='en', processors='tokenize,pos,lemma')
# stanza_model=None
def write_interaction(interaction_list, split, output_dir):
interaction = []
for db_id in interaction_list:
interaction += interaction_list[db_id]
json_split = os.path.join(output_dir, split + '.json')
with open(json_split, 'w', encoding="utf-8") as outfile:
json.dump(interaction, outfile, indent=2, ensure_ascii=False)
return
def read_database_schema(table_path):
schema_tokens = {}
column_names = {}
database_schemas_dict = {}
with open(table_path, 'r', encoding='UTF-8') as f:
database_schemas = json.load(f)
def get_schema_tokens(table_schema):
column_names_surface_form = []
column_names = []
column_names_original = table_schema['column_names_original']
table_names = table_schema['table_names']
table_names_original = table_schema['table_names_original']
for i, (table_id, column_name) in enumerate(column_names_original):
if table_id >= 0:
table_name = table_names_original[table_id]
column_name_surface_form = '{}.{}'.format(table_name, column_name)
else:
# this is just *
column_name_surface_form = column_name
column_names_surface_form.append(column_name_surface_form.lower())
column_names.append(column_name.lower())
# also add table_name.*
for table_name in table_names_original:
column_names_surface_form.append('{}.*'.format(table_name.lower()))
return column_names_surface_form, column_names
for table_schema in database_schemas:
database_id = table_schema['db_id']
if 'column_names_original' not in table_schema:
table_schema["column_names_original"] = table_schema["column_names"]
table_schema["table_names_original"] = table_schema["table_names"]
table_schema['table_names_original'] = [t.lower() for t in table_schema['table_names_original']]
table_schema['foreign_keys_col'] = [i[0] for i in table_schema['foreign_keys']]
structure_schema = []
for t in table_schema['foreign_keys']:
primary_col, foreign_col = t
primary_col = table_schema['column_names_original'][primary_col]
primary_col_tab = table_schema['table_names_original'][primary_col[0]].lower()
foreign_col = table_schema['column_names_original'][foreign_col]
foreign_col_tab = table_schema['table_names_original'][foreign_col[0]].lower()
structure_schema.append(f"( {primary_col_tab} , {foreign_col_tab} )")
structure_schema = list(sorted(set(structure_schema)))
table_schema['permutations'] = [structure_schema]
database_schemas_dict[database_id] = table_schema
schema_tokens[database_id], column_names[database_id] = get_schema_tokens(table_schema)
if 'column_rewrite_names' in table_schema:
for i in range(len(table_schema['column_rewrite_names'])):
table_schema['column_rewrite_names'][i] = [table_schema['column_names'][i][-1]] + \
table_schema['column_rewrite_names'][i][-1]
table_schema['column_rewrite_names'] = [list(set(map(lambda x: x.lower().replace(' ', ''), i))) for i in
table_schema['column_rewrite_names']]
for i in range(len(table_schema['table_rewrite_names'])):
table_schema['table_rewrite_names'][i] = [table_schema['table_names'][i]] + \
table_schema['table_rewrite_names'][i]
table_schema['table_rewrite_names'] = [list(set(map(lambda x: x.lower().replace(' ', ''), i))) for i in
table_schema['table_rewrite_names']]
return schema_tokens, column_names, database_schemas_dict
def remove_from_with_join(format_sql_2):
used_tables_list = []
format_sql_3 = []
table_to_name = {}
table_list = []
old_table_to_name = {}
old_table_list = []
for sub_sql in format_sql_2.split('\n'):
if 'select ' in sub_sql:
# only replace alias: t1 -> table_name, t2 -> table_name, etc...
if len(table_list) > 0:
for i in range(len(format_sql_3)):
for table, name in table_to_name.items():
format_sql_3[i] = format_sql_3[i].replace(table, name)
old_table_list = table_list
old_table_to_name = table_to_name
table_to_name = {}
table_list = []
format_sql_3.append(sub_sql)
elif sub_sql.startswith('from'):
new_sub_sql = None
sub_sql_tokens = sub_sql.split()
for t_i, t in enumerate(sub_sql_tokens):
if t == 'as':
table_to_name[sub_sql_tokens[t_i + 1]] = sub_sql_tokens[t_i - 1]
table_list.append(sub_sql_tokens[t_i - 1])
elif t == ')' and new_sub_sql is None:
# new_sub_sql keeps some trailing parts after ')'
new_sub_sql = ' '.join(sub_sql_tokens[t_i:])
if len(table_list) > 0:
# if it's a from clause with join
if new_sub_sql is not None:
format_sql_3.append(new_sub_sql)
used_tables_list.append(table_list)
else:
# if it's a from clause without join
table_list = old_table_list
table_to_name = old_table_to_name
assert 'join' not in sub_sql
if new_sub_sql is not None:
sub_sub_sql = sub_sql[:-len(new_sub_sql)].strip()
assert len(sub_sub_sql.split()) == 2
used_tables_list.append([sub_sub_sql.split()[1]])
format_sql_3.append(sub_sub_sql)
format_sql_3.append(new_sub_sql)
elif 'join' not in sub_sql:
assert len(sub_sql.split()) == 2 or len(sub_sql.split()) == 1
if len(sub_sql.split()) == 2:
used_tables_list.append([sub_sql.split()[1]])
format_sql_3.append(sub_sql)
else:
print('bad from clause in remove_from_with_join')
exit()
else:
format_sql_3.append(sub_sql)
if len(table_list) > 0:
for i in range(len(format_sql_3)):
for table, name in table_to_name.items():
format_sql_3[i] = format_sql_3[i].replace(table, name)
used_tables = []
for t in used_tables_list:
for tt in t:
used_tables.append(tt)
used_tables = list(set(used_tables))
return format_sql_3, used_tables, used_tables_list
def remove_from_without_join(format_sql_3, column_names, schema_tokens):
format_sql_4 = []
table_name = None
for sub_sql in format_sql_3.split('\n'):
if 'select ' in sub_sql:
if table_name:
for i in range(len(format_sql_4)):
tokens = format_sql_4[i].split()
for ii, token in enumerate(tokens):
if token in column_names and tokens[ii - 1] != '.':
if (ii + 1 < len(tokens) and tokens[ii + 1] != '.' and tokens[
ii + 1] != '(') or ii + 1 == len(tokens):
if '{}.{}'.format(table_name, token) in schema_tokens:
tokens[ii] = '{} . {}'.format(table_name, token)
format_sql_4[i] = ' '.join(tokens)
format_sql_4.append(sub_sql)
elif sub_sql.startswith('from'):
sub_sql_tokens = sub_sql.split()
if len(sub_sql_tokens) == 1:
table_name = None
elif len(sub_sql_tokens) == 2:
table_name = sub_sql_tokens[1]
else:
print('bad from clause in remove_from_without_join')
print(format_sql_3)
exit()
else:
format_sql_4.append(sub_sql)
if table_name:
for i in range(len(format_sql_4)):
tokens = format_sql_4[i].split()
for ii, token in enumerate(tokens):
if token in column_names and tokens[ii - 1] != '.':
if (ii + 1 < len(tokens) and tokens[ii + 1] != '.' and tokens[ii + 1] != '(') or ii + 1 == len(
tokens):
if '{}.{}'.format(table_name, token) in schema_tokens:
tokens[ii] = '{} . {}'.format(table_name, token)
format_sql_4[i] = ' '.join(tokens)
return format_sql_4
def add_table_name(format_sql_3, used_tables, column_names, schema_tokens):
# If just one table used, easy case, replace all column_name -> table_name.column_name
if len(used_tables) == 1:
table_name = used_tables[0]
format_sql_4 = []
for sub_sql in format_sql_3.split('\n'):
if sub_sql.startswith('from'):
format_sql_4.append(sub_sql)
continue
tokens = sub_sql.split()
for ii, token in enumerate(tokens):
if token in column_names and tokens[ii - 1] != '.':
if (ii + 1 < len(tokens) and tokens[ii + 1] != '.' and tokens[ii + 1] != '(') or ii + 1 == len(
tokens):
if '{}.{}'.format(table_name, token) in schema_tokens:
tokens[ii] = '{} . {}'.format(table_name, token)
format_sql_4.append(' '.join(tokens))
return format_sql_4
def get_table_name_for(token):
table_names = []
for table_name in used_tables:
if '{}.{}'.format(table_name, token) in schema_tokens:
table_names.append(table_name)
if len(table_names) == 0:
return 'table'
if len(table_names) > 1:
return None
else:
return table_names[0]
format_sql_4 = []
for sub_sql in format_sql_3.split('\n'):
if sub_sql.startswith('from'):
format_sql_4.append(sub_sql)
continue
tokens = sub_sql.split()
for ii, token in enumerate(tokens):
# skip *
if token == '*':
continue
if token in column_names and tokens[ii - 1] != '.':
if (ii + 1 < len(tokens) and tokens[ii + 1] != '.' and tokens[ii + 1] != '(') or ii + 1 == len(tokens):
table_name = get_table_name_for(token)
if table_name:
tokens[ii] = '{} . {}'.format(table_name, token)
format_sql_4.append(' '.join(tokens))
return format_sql_4
def normalize_space(format_sql):
format_sql_1 = [' '.join(
sub_sql.strip().replace(',', ' , ').replace('.', ' . ').replace('(', ' ( ').replace(')', ' ) ').split()) for
sub_sql in format_sql.split('\n')]
format_sql_1 = '\n'.join(format_sql_1)
format_sql_2 = format_sql_1.replace('\njoin', ' join').replace(',\n', ', ').replace(' where', '\nwhere').replace(
' intersect', '\nintersect').replace('\nand', ' and').replace('order by t2 .\nstart desc',
'order by t2 . start desc')
format_sql_2 = format_sql_2.replace('select\noperator', 'select operator').replace('select\nconstructor',
'select constructor').replace(
'select\nstart', 'select start').replace('select\ndrop', 'select drop').replace('select\nwork',
'select work').replace(
'select\ngroup', 'select group').replace('select\nwhere_built', 'select where_built').replace('select\norder',
'select order').replace(
'from\noperator', 'from operator').replace('from\nforward', 'from forward').replace('from\nfor',
'from for').replace(
'from\ndrop', 'from drop').replace('from\norder', 'from order').replace('.\nstart', '. start').replace(
'.\norder', '. order').replace('.\noperator', '. operator').replace('.\nsets', '. sets').replace(
'.\nwhere_built', '. where_built').replace('.\nwork', '. work').replace('.\nconstructor',
'. constructor').replace('.\ngroup',
'. group').replace(
'.\nfor', '. for').replace('.\ndrop', '. drop').replace('.\nwhere', '. where')
format_sql_2 = format_sql_2.replace('group by', 'group_by').replace('order by', 'order_by').replace('! =',
'!=').replace(
'limit value', 'limit_value')
return format_sql_2
def normalize_final_sql(format_sql_5):
format_sql_final = format_sql_5.replace('\n', ' ').replace(' . ', '.').replace('group by', 'group_by').replace(
'order by', 'order_by').replace('! =', '!=').replace('limit value', 'limit_value')
# normalize two bad sqls
if 't1' in format_sql_final or 't2' in format_sql_final or 't3' in format_sql_final or 't4' in format_sql_final:
format_sql_final = format_sql_final.replace('t2.dormid', 'dorm.dormid')
# This is the failure case of remove_from_without_join()
format_sql_final = format_sql_final.replace(
'select city.city_name where city.state_name in ( select state.state_name where state.state_name in ( select river.traverse where river.river_name = value ) and state.area = ( select min ( state.area ) where state.state_name in ( select river.traverse where river.river_name = value ) ) ) order_by population desc limit_value',
'select city.city_name where city.state_name in ( select state.state_name where state.state_name in ( select river.traverse where river.river_name = value ) and state.area = ( select min ( state.area ) where state.state_name in ( select river.traverse where river.river_name = value ) ) ) order_by city.population desc limit_value')
return format_sql_final
def normalize_original_sql(sql):
sql = [i.lower() for i in sql]
sql = ' '.join(sql).strip(';').replace("``", "'").replace("\"", "'").replace("''", "'")
sql = sql.replace(')from', ') from')
sql = sql.replace('(', ' ( ')
sql = sql.replace(')', ' ) ')
sql = re.sub('\s+', ' ', sql)
sql = re.sub(r"(')(\S+)", r"\1 \2", sql)
sql = re.sub(r"(\S+)(')", r"\1 \2", sql).split(' ')
sql = ' '.join(sql)
sql = sql.strip(' ;').replace('> =', '>=').replace('! =', '!=')
return sql.split(' ')
def parse_sql(sql_string, db_id, column_names, schema_tokens, schema):
format_sql = sqlparse.format(sql_string, reindent=True)
format_sql_2 = normalize_space(format_sql)
format_sql_3, used_tables, used_tables_list = remove_from_with_join(format_sql_2)
format_sql_3 = '\n'.join(format_sql_3)
format_sql_4 = add_table_name(format_sql_3, used_tables, column_names, schema_tokens)
format_sql_4 = '\n'.join(format_sql_4)
format_sql_5 = remove_from_without_join(format_sql_4, column_names, schema_tokens)
format_sql_5 = '\n'.join(format_sql_5)
format_sql_final = normalize_final_sql(format_sql_5)
return format_sql_final
def read_spider_split(dataset_path, table_path, database_path):
with open(dataset_path) as f:
split_data = json.load(f)
print('read_spider_split', dataset_path, len(split_data))
schemas = read_dataset_schema(table_path, stanza_model)
interaction_list = {}
for i, ex in enumerate(tqdm(split_data)):
db_id = ex['db_id']
ex['query_toks_no_value'] = normalize_original_sql(ex['query_toks_no_value'])
turn_sql = ' '.join(ex['query_toks_no_value'])
turn_sql = turn_sql.replace('select count ( * ) from follows group by value',
'select count ( * ) from follows group by f1')
ex['query_toks_no_value'] = turn_sql.split(' ')
ex = fix_number_value(ex)
try:
ex['query_toks_no_value'] = disambiguate_items(db_id, ex['query_toks_no_value'],
tables_file=table_path, allow_aliases=False)
except:
print(ex['query_toks'])
continue
final_sql_parse = ' '.join(ex['query_toks_no_value'])
final_utterance = ' '.join(ex['question_toks']).lower()
if stanza_model is not None:
lemma_utterance_stanza = stanza_model(final_utterance)
lemma_utterance = [word.lemma for sent in lemma_utterance_stanza.sentences for word in sent.words]
original_utterance = final_utterance
else:
original_utterance = lemma_utterance = final_utterance.split(' ')
# using db content
db_context = SpiderDBContext(db_id,
lemma_utterance,
tables_file=table_path,
dataset_path=database_path,
stanza_model=stanza_model,
schemas=schemas,
original_utterance=original_utterance)
value_match, value_alignment, exact_match, partial_match = db_context.get_db_knowledge_graph(db_id)
if value_match != []:
print(value_match, value_alignment)
if db_id not in interaction_list:
interaction_list[db_id] = []
interaction = {}
interaction['id'] = i
interaction['database_id'] = db_id
interaction['interaction'] = [{'utterance': final_utterance,
'db_id': db_id,
'query': ex['query'],
'question': ex['question'],
'sql': final_sql_parse,
'value_match': value_match,
'value_alignment': value_alignment,
'exact_match': exact_match,
'partial_match': partial_match,
}]
interaction_list[db_id].append(interaction)
return interaction_list
def preprocess_dataset(dataset, dataset_dir, output_dir, table_path, database_path):
# for session in ['train', 'dev']:
for session in ['dev']:
dataset_path = os.path.join(dataset_dir, f'{session}.json')
interaction_list = read_spider_split(dataset_path, table_path, database_path)
write_interaction(interaction_list, session, output_dir)
return interaction_list
def preprocess(dataset, dataset_dir, table_path, database_path, output_dir):
# directory
if not os.path.exists(output_dir):
os.mkdir(output_dir)
# read schema
print('Reading spider database schema file')
schema_tokens, column_names, database_schemas = read_database_schema(table_path)
print('total number of schema_tokens / databases:', len(schema_tokens))
output_table_path = os.path.join(output_dir, 'tables.json')
with open(output_table_path, 'w') as outfile:
json.dump([v for k, v in database_schemas.items()], outfile, indent=4)
# process (SQL, Query) pair in train/dev
preprocess_dataset(dataset, dataset_dir, output_dir, table_path, database_path)
return
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", choices=('spider', 'sparc', 'cosql'), default='spider')
args = parser.parse_args()
dataset = args.dataset
dataset_dir = f'./data/{dataset}/'
table_path = f'./data/{dataset}/tables.json'
database_path = f'./data/{dataset}/database'
output_dir = f'./data/{dataset}_schema_linking_tag'
preprocess(dataset, dataset_dir, table_path, database_path, output_dir)
|
ContextualSP/unified_parser_text_to_sql/step1_schema_linking.py/0
|
{
"file_path": "ContextualSP/unified_parser_text_to_sql/step1_schema_linking.py",
"repo_id": "ContextualSP",
"token_count": 10676
}
| 252 |
"""
Based on https://github.com/ElementAI/Unisar/blob/master/Unisar/api.py
"""
import os
import subprocess
from typing import Optional
import torch
from genre.fairseq_model import GENRE
from semparse.contexts.spider_db_context import SpiderDBContext
from semparse.sql.spider import load_original_schemas, load_tables
from semparse.sql.spider_utils import read_dataset_schema
from step1_schema_linking import read_database_schema
from step2_serialization import build_schema_linking_data
from step3_evaluate import decode_with_constrain, get_alias_schema, post_processing_sql
class UnisarAPI(object):
def __init__(self, logdir: str, config_path: str):
self.model = self.inferer.load_model(logdir, step=None)
def convert_csv_to_sqlite(csv_path: str):
# TODO: infer types when importing
db_path = csv_path + ".sqlite"
if os.path.exists(db_path):
os.remove(db_path)
subprocess.run(["sqlite3", db_path, ".mode csv", f".import {csv_path} Data"])
return db_path
class UnisarAPI(object):
"""Run Unisar model on a given database."""
def __init__(self, log_dir: str, db_path: str, schema_path: Optional[str], stanza_model):
self.log_dir = log_dir
self.db_path = db_path
self.schema_path = schema_path
self.stanza_model = stanza_model
# if self.db_path.endswith(".sqlite"):
# pass
# elif self.db_path.endswith(".csv"):
# self.db_path = convert_csv_to_sqlite(self.db_path)
# else:
# raise ValueError("expected either .sqlite or .csv file")
self.schema = read_dataset_schema(self.schema_path, stanza_model)
_, _, self.database_schemas = read_database_schema(self.schema_path)
self.model = GENRE.from_pretrained(self.log_dir).eval()
if torch.cuda.is_available():
self.model.cuda()
def infer_query(self, question, db_id):
###step-1 schema-linking
lemma_utterance_stanza = self.stanza_model(question)
lemma_utterance = [word.lemma for sent in lemma_utterance_stanza.sentences for word in sent.words]
db_context = SpiderDBContext(db_id,
lemma_utterance,
tables_file=self.schema_path,
dataset_path=self.db_path,
stanza_model=self.stanza_model,
schemas=self.schema,
original_utterance=question)
value_match, value_alignment, exact_match, partial_match = db_context.get_db_knowledge_graph(db_id)
item = {}
item['interaction'] = [{'db_id': db_id,
'question': question,
'sql': '',
'value_match': value_match,
'value_alignment': value_alignment,
'exact_match': exact_match,
'partial_match': partial_match,
}]
###step-2 serialization
source_sequence, _ = build_schema_linking_data(schema=self.database_schemas[db_id],
question=question,
item=item,
turn_id=0,
linking_type='default')
slml_question = source_sequence[0]
###step-3 prediction
schemas, eval_foreign_key_maps = load_tables(self.schema_path)
original_schemas = load_original_schemas(self.schema_path)
alias_schema = get_alias_schema(schemas)
rnt = decode_with_constrain(slml_question, alias_schema[db_id], self.model)
predict_sql = rnt[0]['text'] if isinstance(rnt[0]['text'], str) else rnt[0]['text'][0]
score = rnt[0]['score'].tolist()
predict_sql = post_processing_sql(predict_sql, eval_foreign_key_maps[db_id], original_schemas[db_id],schemas[db_id])
return {
"slml_question": slml_question,
"predict_sql": predict_sql,
"score": score
}
def execute(self, query):
### TODO: replace the query with value version
pass
# conn = sqlite3.connect(self.db_path)
# # Temporary Hack: makes sure all literals are collated in a case-insensitive way
# query = add_collate_nocase(query)
# results = conn.execute(query).fetchall()
# conn.close()
# return results
|
ContextualSP/unified_parser_text_to_sql/unisar/api.py/0
|
{
"file_path": "ContextualSP/unified_parser_text_to_sql/unisar/api.py",
"repo_id": "ContextualSP",
"token_count": 2320
}
| 253 |
import random
import numpy as np
import time
import torch
import torch.backends.cudnn as cudnn
from pathlib import Path
from lib.datasets import build_dataset
from lib import utils
from supernet_engine import evaluate
from model.supernet_transformer import Vision_TransformerSuper
import argparse
import os
import yaml
from lib.config import cfg, update_config_from_file
def decode_cand_tuple(cand_tuple):
depth = cand_tuple[0]
return depth, list(cand_tuple[1:depth+1]), list(cand_tuple[depth + 1: 2 * depth + 1]), cand_tuple[-1]
class EvolutionSearcher(object):
def __init__(self, args, device, model, model_without_ddp, choices, val_loader, test_loader, output_dir):
self.device = device
self.model = model
self.model_without_ddp = model_without_ddp
self.args = args
self.max_epochs = args.max_epochs
self.select_num = args.select_num
self.population_num = args.population_num
self.m_prob = args.m_prob
self.crossover_num = args.crossover_num
self.mutation_num = args.mutation_num
self.parameters_limits = args.param_limits
self.min_parameters_limits = args.min_param_limits
self.val_loader = val_loader
self.test_loader = test_loader
self.output_dir = output_dir
self.s_prob =args.s_prob
self.memory = []
self.vis_dict = {}
self.keep_top_k = {self.select_num: [], 50: []}
self.epoch = 0
self.checkpoint_path = args.resume
self.candidates = []
self.top_accuracies = []
self.cand_params = []
self.choices = choices
def save_checkpoint(self):
info = {}
info['top_accuracies'] = self.top_accuracies
info['memory'] = self.memory
info['candidates'] = self.candidates
info['vis_dict'] = self.vis_dict
info['keep_top_k'] = self.keep_top_k
info['epoch'] = self.epoch
checkpoint_path = os.path.join(self.output_dir, "checkpoint-{}.pth.tar".format(self.epoch))
torch.save(info, checkpoint_path)
print('save checkpoint to', checkpoint_path)
def load_checkpoint(self):
if not os.path.exists(self.checkpoint_path):
return False
info = torch.load(self.checkpoint_path)
self.memory = info['memory']
self.candidates = info['candidates']
self.vis_dict = info['vis_dict']
self.keep_top_k = info['keep_top_k']
self.epoch = info['epoch']
print('load checkpoint from', self.checkpoint_path)
return True
def is_legal(self, cand):
assert isinstance(cand, tuple)
if cand not in self.vis_dict:
self.vis_dict[cand] = {}
info = self.vis_dict[cand]
if 'visited' in info:
return False
depth, mlp_ratio, num_heads, embed_dim = decode_cand_tuple(cand)
sampled_config = {}
sampled_config['layer_num'] = depth
sampled_config['mlp_ratio'] = mlp_ratio
sampled_config['num_heads'] = num_heads
sampled_config['embed_dim'] = [embed_dim]*depth
n_parameters = self.model_without_ddp.get_sampled_params_numel(sampled_config)
info['params'] = n_parameters / 10.**6
if info['params'] > self.parameters_limits:
print('parameters limit exceed')
return False
if info['params'] < self.min_parameters_limits:
print('under minimum parameters limit')
return False
print("rank:", utils.get_rank(), cand, info['params'])
eval_stats = evaluate(self.val_loader, self.model, self.device, amp=self.args.amp, mode='retrain', retrain_config=sampled_config)
test_stats = evaluate(self.test_loader, self.model, self.device, amp=self.args.amp, mode='retrain', retrain_config=sampled_config)
info['acc'] = eval_stats['acc1']
info['test_acc'] = test_stats['acc1']
info['visited'] = True
return True
def update_top_k(self, candidates, *, k, key, reverse=True):
assert k in self.keep_top_k
print('select ......')
t = self.keep_top_k[k]
t += candidates
t.sort(key=key, reverse=reverse)
self.keep_top_k[k] = t[:k]
def stack_random_cand(self, random_func, *, batchsize=10):
while True:
cands = [random_func() for _ in range(batchsize)]
for cand in cands:
if cand not in self.vis_dict:
self.vis_dict[cand] = {}
info = self.vis_dict[cand]
for cand in cands:
yield cand
def get_random_cand(self):
cand_tuple = list()
dimensions = ['mlp_ratio', 'num_heads']
depth = random.choice(self.choices['depth'])
cand_tuple.append(depth)
for dimension in dimensions:
for i in range(depth):
cand_tuple.append(random.choice(self.choices[dimension]))
cand_tuple.append(random.choice(self.choices['embed_dim']))
return tuple(cand_tuple)
def get_random(self, num):
print('random select ........')
cand_iter = self.stack_random_cand(self.get_random_cand)
while len(self.candidates) < num:
cand = next(cand_iter)
if not self.is_legal(cand):
continue
self.candidates.append(cand)
print('random {}/{}'.format(len(self.candidates), num))
print('random_num = {}'.format(len(self.candidates)))
def get_mutation(self, k, mutation_num, m_prob, s_prob):
assert k in self.keep_top_k
print('mutation ......')
res = []
iter = 0
max_iters = mutation_num * 10
def random_func():
cand = list(random.choice(self.keep_top_k[k]))
depth, mlp_ratio, num_heads, embed_dim = decode_cand_tuple(cand)
random_s = random.random()
# depth
if random_s < s_prob:
new_depth = random.choice(self.choices['depth'])
if new_depth > depth:
mlp_ratio = mlp_ratio + [random.choice(self.choices['mlp_ratio']) for _ in range(new_depth - depth)]
num_heads = num_heads + [random.choice(self.choices['num_heads']) for _ in range(new_depth - depth)]
else:
mlp_ratio = mlp_ratio[:new_depth]
num_heads = num_heads[:new_depth]
depth = new_depth
# mlp_ratio
for i in range(depth):
random_s = random.random()
if random_s < m_prob:
mlp_ratio[i] = random.choice(self.choices['mlp_ratio'])
# num_heads
for i in range(depth):
random_s = random.random()
if random_s < m_prob:
num_heads[i] = random.choice(self.choices['num_heads'])
# embed_dim
random_s = random.random()
if random_s < s_prob:
embed_dim = random.choice(self.choices['embed_dim'])
result_cand = [depth] + mlp_ratio + num_heads + [embed_dim]
return tuple(result_cand)
cand_iter = self.stack_random_cand(random_func)
while len(res) < mutation_num and max_iters > 0:
max_iters -= 1
cand = next(cand_iter)
if not self.is_legal(cand):
continue
res.append(cand)
print('mutation {}/{}'.format(len(res), mutation_num))
print('mutation_num = {}'.format(len(res)))
return res
def get_crossover(self, k, crossover_num):
assert k in self.keep_top_k
print('crossover ......')
res = []
iter = 0
max_iters = 10 * crossover_num
def random_func():
p1 = random.choice(self.keep_top_k[k])
p2 = random.choice(self.keep_top_k[k])
max_iters_tmp = 50
while len(p1) != len(p2) and max_iters_tmp > 0:
max_iters_tmp -= 1
p1 = random.choice(self.keep_top_k[k])
p2 = random.choice(self.keep_top_k[k])
return tuple(random.choice([i, j]) for i, j in zip(p1, p2))
cand_iter = self.stack_random_cand(random_func)
while len(res) < crossover_num and max_iters > 0:
max_iters -= 1
cand = next(cand_iter)
if not self.is_legal(cand):
continue
res.append(cand)
print('crossover {}/{}'.format(len(res), crossover_num))
print('crossover_num = {}'.format(len(res)))
return res
def search(self):
print(
'population_num = {} select_num = {} mutation_num = {} crossover_num = {} random_num = {} max_epochs = {}'.format(
self.population_num, self.select_num, self.mutation_num, self.crossover_num,
self.population_num - self.mutation_num - self.crossover_num, self.max_epochs))
# self.load_checkpoint()
self.get_random(self.population_num)
while self.epoch < self.max_epochs:
print('epoch = {}'.format(self.epoch))
self.memory.append([])
for cand in self.candidates:
self.memory[-1].append(cand)
self.update_top_k(
self.candidates, k=self.select_num, key=lambda x: self.vis_dict[x]['acc'])
self.update_top_k(
self.candidates, k=50, key=lambda x: self.vis_dict[x]['acc'])
print('epoch = {} : top {} result'.format(
self.epoch, len(self.keep_top_k[50])))
tmp_accuracy = []
for i, cand in enumerate(self.keep_top_k[50]):
print('No.{} {} Top-1 val acc = {}, Top-1 test acc = {}, params = {}'.format(
i + 1, cand, self.vis_dict[cand]['acc'], self.vis_dict[cand]['test_acc'], self.vis_dict[cand]['params']))
tmp_accuracy.append(self.vis_dict[cand]['acc'])
self.top_accuracies.append(tmp_accuracy)
mutation = self.get_mutation(
self.select_num, self.mutation_num, self.m_prob, self.s_prob)
crossover = self.get_crossover(self.select_num, self.crossover_num)
self.candidates = mutation + crossover
self.get_random(self.population_num)
self.epoch += 1
self.save_checkpoint()
def get_args_parser():
parser = argparse.ArgumentParser('DeiT training and evaluation script', add_help=False)
parser.add_argument('--batch-size', default=64, type=int)
# evolution search parameters
parser.add_argument('--max-epochs', type=int, default=20)
parser.add_argument('--select-num', type=int, default=10)
parser.add_argument('--population-num', type=int, default=50)
parser.add_argument('--m_prob', type=float, default=0.2)
parser.add_argument('--s_prob', type=float, default=0.4)
parser.add_argument('--crossover-num', type=int, default=25)
parser.add_argument('--epochs', type=int, default=30)
parser.add_argument('--mutation-num', type=int, default=25)
parser.add_argument('--param-limits', type=float, default=23)
parser.add_argument('--min-param-limits', type=float, default=18)
# config file
parser.add_argument('--cfg',help='experiment configure file name',required=True,type=str)
# custom parameters
parser.add_argument('--platform', default='pai', type=str, choices=['itp', 'pai', 'aml'],
help='Name of model to train')
parser.add_argument('--teacher_model', default='', type=str,
help='Name of teacher model to train')
parser.add_argument('--relative_position', action='store_true')
parser.add_argument('--max_relative_position', type=int, default=14, help='max distance in relative position embedding')
parser.add_argument('--scale', action='store_true')
parser.add_argument('--gp', action='store_true')
parser.add_argument('--change_qkv', action='store_true')
# Model parameters
parser.add_argument('--model', default='', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--input-size', default=224, type=int)
parser.add_argument('--patch_size', default=16, type=int)
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--drop-block', type=float, default=None, metavar='PCT',
help='Drop block rate (default: None)')
parser.add_argument('--model-ema', action='store_true')
parser.add_argument('--no-model-ema', action='store_false', dest='model_ema')
# parser.set_defaults(model_ema=True)
parser.add_argument('--model-ema-decay', type=float, default=0.99996, help='')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, help='')
# custom model argument
parser.add_argument('--rpe_type', type=str, default='bias', choices=['bias', 'direct'])
parser.add_argument('--post_norm', action='store_true')
parser.add_argument('--no_abs_pos', action='store_true')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "cosine"')
parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',
help='learning rate (default: 5e-4)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--lr-power', type=float, default=1.0,
help='power of the polynomial lr scheduler')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation parameters
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + \
"(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--repeated-aug', action='store_true')
parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')
parser.set_defaults(repeated_aug=True)
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# Dataset parameters
parser.add_argument('--data-path', default='/datasets01_101/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--data-set', default='IMNET', choices=['CIFAR', 'IMNET', 'INAT', 'INAT19', 'EVO_IMNET'],
type=str, help='Image Net dataset path')
parser.add_argument('--inat-category', default='name',
choices=['kingdom', 'phylum', 'class', 'order', 'supercategory', 'family', 'genus', 'name'],
type=str, help='semantic granularity')
parser.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--dist-eval', action='store_true', default=False, help='Enabling distributed evaluation')
parser.add_argument('--pin-mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem',
help='')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--amp', action='store_true')
parser.add_argument('--no-amp', action='store_false', dest='amp')
parser.set_defaults(amp=True)
return parser
def main(args):
update_config_from_file(args.cfg)
utils.init_distributed_mode(args)
device = torch.device(args.device)
print(args)
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
# save config for later experiments
with open(os.path.join(args.output_dir, "config.yaml"), 'w') as f:
f.write(args_text)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(args.seed)
cudnn.benchmark = True
args.prefetcher = not args.no_prefetcher
dataset_val, args.nb_classes = build_dataset(is_train=False, args=args, folder_name="subImageNet")
dataset_test, _ = build_dataset(is_train=False, args=args, folder_name="val")
if args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print(
'Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False)
sampler_test = torch.utils.data.DistributedSampler(
dataset_test, num_replicas=num_tasks, rank=global_rank, shuffle=False)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
sampler_test = torch.utils.data.SequentialSampler(dataset_test)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
sampler_test = torch.utils.data.SequentialSampler(dataset_test)
data_loader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=int(2 * args.batch_size),
sampler=sampler_test, num_workers=args.num_workers,
pin_memory=args.pin_mem, drop_last=False
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, batch_size=int(2 * args.batch_size),
sampler=sampler_val, num_workers=args.num_workers,
pin_memory=args.pin_mem, drop_last=False
)
print(f"Creating SuperVisionTransformer")
print(cfg)
model = Vision_TransformerSuper(img_size=args.input_size,
patch_size=args.patch_size,
embed_dim=cfg.SUPERNET.EMBED_DIM, depth=cfg.SUPERNET.DEPTH,
num_heads=cfg.SUPERNET.NUM_HEADS,mlp_ratio=cfg.SUPERNET.MLP_RATIO,
qkv_bias=True, drop_rate=args.drop,
drop_path_rate=args.drop_path,
gp=args.gp,
num_classes=args.nb_classes,
max_relative_position=args.max_relative_position,
relative_position=args.relative_position,
change_qkv=args.change_qkv, abs_pos=not args.no_abs_pos)
model.to(device)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
print("resume from checkpoint: {}".format(args.resume))
model_without_ddp.load_state_dict(checkpoint['model'])
choices = {'num_heads': cfg.SEARCH_SPACE.NUM_HEADS, 'mlp_ratio': cfg.SEARCH_SPACE.MLP_RATIO,
'embed_dim': cfg.SEARCH_SPACE.EMBED_DIM , 'depth': cfg.SEARCH_SPACE.DEPTH}
t = time.time()
searcher = EvolutionSearcher(args, device, model, model_without_ddp, choices, data_loader_val, data_loader_test, args.output_dir)
searcher.search()
print('total searching time = {:.2f} hours'.format(
(time.time() - t) / 3600))
if __name__ == '__main__':
parser = argparse.ArgumentParser('AutoFormer evolution search', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
|
Cream/AutoFormer/evolution.py/0
|
{
"file_path": "Cream/AutoFormer/evolution.py",
"repo_id": "Cream",
"token_count": 11718
}
| 254 |
import torch
import torch.nn as nn
import torch.nn.functional as F
class LayerNormSuper(torch.nn.LayerNorm):
def __init__(self, super_embed_dim):
super().__init__(super_embed_dim)
# the largest embed dim
self.super_embed_dim = super_embed_dim
# the current sampled embed dim
self.sample_embed_dim = None
self.samples = {}
self.profiling = False
def profile(self, mode=True):
self.profiling = mode
def sample_parameters(self, resample=False):
if self.profiling or resample:
return self._sample_parameters()
return self.samples
def _sample_parameters(self):
self.samples['weight'] = self.weight[:self.sample_embed_dim]
self.samples['bias'] = self.bias[:self.sample_embed_dim]
return self.samples
def set_sample_config(self, sample_embed_dim):
self.sample_embed_dim = sample_embed_dim
self._sample_parameters()
def forward(self, x):
self.sample_parameters()
return F.layer_norm(x, (self.sample_embed_dim,), weight=self.samples['weight'], bias=self.samples['bias'], eps=self.eps)
def calc_sampled_param_num(self):
assert 'weight' in self.samples.keys()
assert 'bias' in self.samples.keys()
return self.samples['weight'].numel() + self.samples['bias'].numel()
def get_complexity(self, sequence_length):
return sequence_length * self.sample_embed_dim
|
Cream/AutoFormer/model/module/layernorm_super.py/0
|
{
"file_path": "Cream/AutoFormer/model/module/layernorm_super.py",
"repo_id": "Cream",
"token_count": 607
}
| 255 |
""" Search cell """
import _init_paths
import os
import torch
import json
import numpy as np
import lib.utils.genotypes as gt
from tensorboardX import SummaryWriter
from lib.models.model_test import ModelTest
from lib.utils import utils
from lib.config import AugmentConfig
from lib.core.augment_function import validate
# config
config = AugmentConfig()
# make apex optional
if config.distributed:
# DDP = torch.nn.parallel.DistributedDataParallel
try:
import apex
from apex.parallel import DistributedDataParallel as DDP
from apex import amp, optimizers
from apex.fp16_utils import *
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")
# tensorboard
writer = SummaryWriter(log_dir=os.path.join(config.path, "tb"))
writer.add_text('config', config.as_markdown(), 0)
logger = utils.get_logger(os.path.join(config.path, "{}.log".format(config.name)))
if config.local_rank == 0:
config.print_params(logger.info)
if 'cifar' in config.dataset:
from lib.datasets.cifar import get_augment_datasets
elif 'imagenet' in config.dataset:
from lib.datasets.imagenet import get_augment_datasets
else:
raise Exception("Not support dataser!")
def main():
logger.info("Logger is set - training start")
# set seed
np.random.seed(config.seed)
torch.manual_seed(config.seed)
torch.cuda.manual_seed_all(config.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
if config.distributed:
config.gpu = config.local_rank % torch.cuda.device_count()
torch.cuda.set_device(config.gpu)
# distributed init
torch.distributed.init_process_group(backend='nccl', init_method=config.dist_url,
world_size=config.world_size, rank=config.local_rank)
config.world_size = torch.distributed.get_world_size()
config.total_batch_size = config.world_size * config.batch_size
else:
config.total_batch_size = config.batch_size
loaders, samplers = get_augment_datasets(config)
train_loader, valid_loader = loaders
train_sampler, valid_sampler = samplers
file = open(config.cell_file, 'r')
js = file.read()
r_dict = json.loads(js)
if config.local_rank == 0:
logger.info(r_dict)
file.close()
genotypes_dict = {}
for layer_idx, genotype in r_dict.items():
genotypes_dict[int(layer_idx)] = gt.from_str(genotype)
model_main = ModelTest(genotypes_dict, config.model_type, config.res_stem, init_channel=config.init_channels, \
stem_multiplier=config.stem_multiplier, n_nodes=4, num_classes=config.n_classes)
resume_state = torch.load(config.resume_path, map_location='cpu')
model_main.load_state_dict(resume_state, strict=False)
model_main = model_main.cuda()
if config.distributed:
model_main = DDP(model_main, delay_allreduce=True)
top1, top5 = validate(valid_loader, model_main, 0, 0, writer, logger, config)
if config.local_rank == 0:
print("Final best Prec@1 = {:.4%}, Prec@5 = {:.4%}".format(top1, top5))
if __name__ == "__main__":
main()
|
Cream/CDARTS/CDARTS/test.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS/test.py",
"repo_id": "Cream",
"token_count": 1266
}
| 256 |
from six.moves import cPickle as pickle
from .base import BaseFileHandler
class PickleHandler(BaseFileHandler):
def load_from_fileobj(self, file, **kwargs):
return pickle.load(file, **kwargs)
def load_from_path(self, filepath, **kwargs):
return super(PickleHandler, self).load_from_path(
filepath, mode='rb', **kwargs)
def dump_to_str(self, obj, **kwargs):
kwargs.setdefault('protocol', 2)
return pickle.dumps(obj, **kwargs)
def dump_to_fileobj(self, obj, file, **kwargs):
kwargs.setdefault('protocol', 2)
pickle.dump(obj, file, **kwargs)
def dump_to_path(self, obj, filepath, **kwargs):
super(PickleHandler, self).dump_to_path(
obj, filepath, mode='wb', **kwargs)
|
Cream/CDARTS/CDARTS_detection/mmcv/fileio/handlers/pickle_handler.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/fileio/handlers/pickle_handler.py",
"repo_id": "Cream",
"token_count": 331
}
| 257 |
from torch.nn.parallel import DataParallel
from .scatter_gather import scatter_kwargs
class MMDataParallel(DataParallel):
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
|
Cream/CDARTS/CDARTS_detection/mmcv/parallel/data_parallel.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/parallel/data_parallel.py",
"repo_id": "Cream",
"token_count": 88
}
| 258 |
from __future__ import division
from math import cos, pi
from .hook import Hook
class LrUpdaterHook(Hook):
def __init__(self,
by_epoch=True,
warmup=None,
warmup_iters=0,
warmup_ratio=0.1,
**kwargs):
# validate the "warmup" argument
if warmup is not None:
if warmup not in ['constant', 'linear', 'exp']:
raise ValueError(
'"{}" is not a supported type for warming up, valid types'
' are "constant" and "linear"'.format(warmup))
if warmup is not None:
assert warmup_iters > 0, \
'"warmup_iters" must be a positive integer'
assert 0 < warmup_ratio <= 1.0, \
'"warmup_ratio" must be in range (0,1]'
self.by_epoch = by_epoch
self.warmup = warmup
self.warmup_iters = warmup_iters
self.warmup_ratio = warmup_ratio
self.base_lr = [] # initial lr for all param groups
self.regular_lr = [] # expected lr if no warming up is performed
def _set_lr(self, runner, lr_groups):
for param_group, lr in zip(runner.optimizer.param_groups, lr_groups):
param_group['lr'] = lr
def get_lr(self, runner, base_lr):
raise NotImplementedError
def get_regular_lr(self, runner):
return [self.get_lr(runner, _base_lr) for _base_lr in self.base_lr]
def get_warmup_lr(self, cur_iters):
if self.warmup == 'constant':
warmup_lr = [_lr * self.warmup_ratio for _lr in self.regular_lr]
elif self.warmup == 'linear':
k = (1 - cur_iters / self.warmup_iters) * (1 - self.warmup_ratio)
warmup_lr = [_lr * (1 - k) for _lr in self.regular_lr]
elif self.warmup == 'exp':
k = self.warmup_ratio**(1 - cur_iters / self.warmup_iters)
warmup_lr = [_lr * k for _lr in self.regular_lr]
return warmup_lr
def before_run(self, runner):
# NOTE: when resuming from a checkpoint, if 'initial_lr' is not saved,
# it will be set according to the optimizer params
for group in runner.optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
self.base_lr = [
group['initial_lr'] for group in runner.optimizer.param_groups
]
def before_train_epoch(self, runner):
if not self.by_epoch:
return
self.regular_lr = self.get_regular_lr(runner)
self._set_lr(runner, self.regular_lr)
def before_train_iter(self, runner):
cur_iter = runner.iter
if not self.by_epoch:
self.regular_lr = self.get_regular_lr(runner)
if self.warmup is None or cur_iter >= self.warmup_iters:
self._set_lr(runner, self.regular_lr)
else:
warmup_lr = self.get_warmup_lr(cur_iter)
self._set_lr(runner, warmup_lr)
elif self.by_epoch:
if self.warmup is None or cur_iter > self.warmup_iters:
return
elif cur_iter == self.warmup_iters:
self._set_lr(runner, self.regular_lr)
else:
warmup_lr = self.get_warmup_lr(cur_iter)
self._set_lr(runner, warmup_lr)
class FixedLrUpdaterHook(LrUpdaterHook):
def __init__(self, **kwargs):
super(FixedLrUpdaterHook, self).__init__(**kwargs)
def get_lr(self, runner, base_lr):
return base_lr
class StepLrUpdaterHook(LrUpdaterHook):
def __init__(self, step, gamma=0.1, **kwargs):
assert isinstance(step, (list, int))
if isinstance(step, list):
for s in step:
assert isinstance(s, int) and s > 0
elif isinstance(step, int):
assert step > 0
else:
raise TypeError('"step" must be a list or integer')
self.step = step
self.gamma = gamma
super(StepLrUpdaterHook, self).__init__(**kwargs)
def get_lr(self, runner, base_lr):
progress = runner.epoch if self.by_epoch else runner.iter
if isinstance(self.step, int):
return base_lr * (self.gamma**(progress // self.step))
exp = len(self.step)
for i, s in enumerate(self.step):
if progress < s:
exp = i
break
return base_lr * self.gamma**exp
class ExpLrUpdaterHook(LrUpdaterHook):
def __init__(self, gamma, **kwargs):
self.gamma = gamma
super(ExpLrUpdaterHook, self).__init__(**kwargs)
def get_lr(self, runner, base_lr):
progress = runner.epoch if self.by_epoch else runner.iter
return base_lr * self.gamma**progress
class PolyLrUpdaterHook(LrUpdaterHook):
def __init__(self, power=1., min_lr=0., **kwargs):
self.power = power
self.min_lr = min_lr
super(PolyLrUpdaterHook, self).__init__(**kwargs)
def get_lr(self, runner, base_lr):
if self.by_epoch:
progress = runner.epoch
max_progress = runner.max_epochs
else:
progress = runner.iter
max_progress = runner.max_iters
coeff = (1 - progress / max_progress)**self.power
return (base_lr - self.min_lr) * coeff + self.min_lr
class InvLrUpdaterHook(LrUpdaterHook):
def __init__(self, gamma, power=1., **kwargs):
self.gamma = gamma
self.power = power
super(InvLrUpdaterHook, self).__init__(**kwargs)
def get_lr(self, runner, base_lr):
progress = runner.epoch if self.by_epoch else runner.iter
return base_lr * (1 + self.gamma * progress)**(-self.power)
class CosineLrUpdaterHook(LrUpdaterHook):
def __init__(self, target_lr=0.001, **kwargs):
self.target_lr = target_lr
super(CosineLrUpdaterHook, self).__init__(**kwargs)
def get_lr(self, runner, base_lr):
if self.by_epoch:
progress = runner.epoch
max_progress = runner.max_epochs
else:
progress = runner.iter
max_progress = runner.max_iters
return self.target_lr + 0.5 * (base_lr - self.target_lr) * \
(1 + cos(pi * (progress / max_progress)))
|
Cream/CDARTS/CDARTS_detection/mmcv/runner/hooks/lr_updater.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/runner/hooks/lr_updater.py",
"repo_id": "Cream",
"token_count": 3057
}
| 259 |
from .io import Cache, VideoReader, frames2video
from .processing import convert_video, resize_video, cut_video, concat_video
from .optflow import (flowread, flowwrite, quantize_flow, dequantize_flow,
flow_warp)
__all__ = [
'Cache', 'VideoReader', 'frames2video', 'convert_video', 'resize_video',
'cut_video', 'concat_video', 'flowread', 'flowwrite', 'quantize_flow',
'dequantize_flow', 'flow_warp'
]
|
Cream/CDARTS/CDARTS_detection/mmcv/video/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/video/__init__.py",
"repo_id": "Cream",
"token_count": 168
}
| 260 |
import torch
from .max_iou_assigner import MaxIoUAssigner
from ..geometry import bbox_overlaps
class ApproxMaxIoUAssigner(MaxIoUAssigner):
"""Assign a corresponding gt bbox or background to each bbox.
Each proposals will be assigned with `-1`, `0`, or a positive integer
indicating the ground truth index.
- -1: don't care
- 0: negative sample, no assigned gt
- positive integer: positive sample, index (1-based) of assigned gt
Args:
pos_iou_thr (float): IoU threshold for positive bboxes.
neg_iou_thr (float or tuple): IoU threshold for negative bboxes.
min_pos_iou (float): Minimum iou for a bbox to be considered as a
positive bbox. Positive samples can have smaller IoU than
pos_iou_thr due to the 4th step (assign max IoU sample to each gt).
gt_max_assign_all (bool): Whether to assign all bboxes with the same
highest overlap with some gt to that gt.
ignore_iof_thr (float): IoF threshold for ignoring bboxes (if
`gt_bboxes_ignore` is specified). Negative values mean not
ignoring any bboxes.
ignore_wrt_candidates (bool): Whether to compute the iof between
`bboxes` and `gt_bboxes_ignore`, or the contrary.
"""
def __init__(self,
pos_iou_thr,
neg_iou_thr,
min_pos_iou=.0,
gt_max_assign_all=True,
ignore_iof_thr=-1,
ignore_wrt_candidates=True):
self.pos_iou_thr = pos_iou_thr
self.neg_iou_thr = neg_iou_thr
self.min_pos_iou = min_pos_iou
self.gt_max_assign_all = gt_max_assign_all
self.ignore_iof_thr = ignore_iof_thr
self.ignore_wrt_candidates = ignore_wrt_candidates
def assign(self,
approxs,
squares,
approxs_per_octave,
gt_bboxes,
gt_bboxes_ignore=None,
gt_labels=None):
"""Assign gt to approxs.
This method assign a gt bbox to each group of approxs (bboxes),
each group of approxs is represent by a base approx (bbox) and
will be assigned with -1, 0, or a positive number.
-1 means don't care, 0 means negative sample,
positive number is the index (1-based) of assigned gt.
The assignment is done in following steps, the order matters.
1. assign every bbox to -1
2. use the max IoU of each group of approxs to assign
2. assign proposals whose iou with all gts < neg_iou_thr to 0
3. for each bbox, if the iou with its nearest gt >= pos_iou_thr,
assign it to that bbox
4. for each gt bbox, assign its nearest proposals (may be more than
one) to itself
Args:
approxs (Tensor): Bounding boxes to be assigned,
shape(approxs_per_octave*n, 4).
squares (Tensor): Base Bounding boxes to be assigned,
shape(n, 4).
approxs_per_octave (int): number of approxs per octave
gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
labelled as `ignored`, e.g., crowd boxes in COCO.
gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
Returns:
:obj:`AssignResult`: The assign result.
"""
if squares.shape[0] == 0 or gt_bboxes.shape[0] == 0:
raise ValueError('No gt or approxs')
num_squares = squares.size(0)
num_gts = gt_bboxes.size(0)
# re-organize anchors by approxs_per_octave x num_squares
approxs = torch.transpose(
approxs.view(num_squares, approxs_per_octave, 4), 0,
1).contiguous().view(-1, 4)
all_overlaps = bbox_overlaps(approxs, gt_bboxes)
overlaps, _ = all_overlaps.view(approxs_per_octave, num_squares,
num_gts).max(dim=0)
overlaps = torch.transpose(overlaps, 0, 1)
bboxes = squares[:, :4]
if (self.ignore_iof_thr > 0) and (gt_bboxes_ignore is not None) and (
gt_bboxes_ignore.numel() > 0):
if self.ignore_wrt_candidates:
ignore_overlaps = bbox_overlaps(bboxes,
gt_bboxes_ignore,
mode='iof')
ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)
else:
ignore_overlaps = bbox_overlaps(gt_bboxes_ignore,
bboxes,
mode='iof')
ignore_max_overlaps, _ = ignore_overlaps.max(dim=0)
overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1
assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)
return assign_result
|
Cream/CDARTS/CDARTS_detection/mmdet/core/bbox/assigners/approx_max_iou_assigner.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/bbox/assigners/approx_max_iou_assigner.py",
"repo_id": "Cream",
"token_count": 2455
}
| 261 |
from .class_names import (coco_classes, dataset_aliases, get_classes,
imagenet_det_classes, imagenet_vid_classes,
voc_classes)
from .eval_hooks import DistEvalHook
from .mean_ap import average_precision, eval_map, print_map_summary
from .recall import (eval_recalls, plot_iou_recall, plot_num_recall,
print_recall_summary)
__all__ = [
'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes',
'coco_classes', 'dataset_aliases', 'get_classes', 'DistEvalHook',
'average_precision', 'eval_map', 'print_map_summary', 'eval_recalls',
'print_recall_summary', 'plot_num_recall', 'plot_iou_recall'
]
|
Cream/CDARTS/CDARTS_detection/mmdet/core/evaluation/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/evaluation/__init__.py",
"repo_id": "Cream",
"token_count": 306
}
| 262 |
import torch
import numpy as np
from mmdet.ops import nms
from ..bbox import bbox_mapping_back
def merge_aug_proposals(aug_proposals, img_metas, rpn_test_cfg):
"""Merge augmented proposals (multiscale, flip, etc.)
Args:
aug_proposals (list[Tensor]): proposals from different testing
schemes, shape (n, 5). Note that they are not rescaled to the
original image size.
img_metas (list[dict]): image info including "shape_scale" and "flip".
rpn_test_cfg (dict): rpn test config.
Returns:
Tensor: shape (n, 4), proposals corresponding to original image scale.
"""
recovered_proposals = []
for proposals, img_info in zip(aug_proposals, img_metas):
img_shape = img_info['img_shape']
scale_factor = img_info['scale_factor']
flip = img_info['flip']
_proposals = proposals.clone()
_proposals[:, :4] = bbox_mapping_back(_proposals[:, :4], img_shape,
scale_factor, flip)
recovered_proposals.append(_proposals)
aug_proposals = torch.cat(recovered_proposals, dim=0)
merged_proposals, _ = nms(aug_proposals, rpn_test_cfg.nms_thr)
scores = merged_proposals[:, 4]
_, order = scores.sort(0, descending=True)
num = min(rpn_test_cfg.max_num, merged_proposals.shape[0])
order = order[:num]
merged_proposals = merged_proposals[order, :]
return merged_proposals
def merge_aug_bboxes(aug_bboxes, aug_scores, img_metas, rcnn_test_cfg):
"""Merge augmented detection bboxes and scores.
Args:
aug_bboxes (list[Tensor]): shape (n, 4*#class)
aug_scores (list[Tensor] or None): shape (n, #class)
img_shapes (list[Tensor]): shape (3, ).
rcnn_test_cfg (dict): rcnn test config.
Returns:
tuple: (bboxes, scores)
"""
recovered_bboxes = []
for bboxes, img_info in zip(aug_bboxes, img_metas):
img_shape = img_info[0]['img_shape']
scale_factor = img_info[0]['scale_factor']
flip = img_info[0]['flip']
bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip)
recovered_bboxes.append(bboxes)
bboxes = torch.stack(recovered_bboxes).mean(dim=0)
if aug_scores is None:
return bboxes
else:
scores = torch.stack(aug_scores).mean(dim=0)
return bboxes, scores
def merge_aug_scores(aug_scores):
"""Merge augmented bbox scores."""
if isinstance(aug_scores[0], torch.Tensor):
return torch.mean(torch.stack(aug_scores), dim=0)
else:
return np.mean(aug_scores, axis=0)
def merge_aug_masks(aug_masks, img_metas, rcnn_test_cfg, weights=None):
"""Merge augmented mask prediction.
Args:
aug_masks (list[ndarray]): shape (n, #class, h, w)
img_shapes (list[ndarray]): shape (3, ).
rcnn_test_cfg (dict): rcnn test config.
Returns:
tuple: (bboxes, scores)
"""
recovered_masks = [
mask if not img_info[0]['flip'] else mask[..., ::-1]
for mask, img_info in zip(aug_masks, img_metas)
]
if weights is None:
merged_masks = np.mean(recovered_masks, axis=0)
else:
merged_masks = np.average(
np.array(recovered_masks), axis=0, weights=np.array(weights))
return merged_masks
|
Cream/CDARTS/CDARTS_detection/mmdet/core/post_processing/merge_augs.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/post_processing/merge_augs.py",
"repo_id": "Cream",
"token_count": 1476
}
| 263 |
import os.path as osp
import warnings
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
from ..registry import PIPELINES
@PIPELINES.register_module
class LoadImageFromFile(object):
def __init__(self, to_float32=False):
self.to_float32 = to_float32
def __call__(self, results):
if results['img_prefix'] is not None:
filename = osp.join(results['img_prefix'],
results['img_info']['filename'])
else:
filename = results['img_info']['filename']
img = mmcv.imread(filename)
if self.to_float32:
img = img.astype(np.float32)
results['filename'] = filename
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
return results
def __repr__(self):
return self.__class__.__name__ + '(to_float32={})'.format(
self.to_float32)
@PIPELINES.register_module
class LoadAnnotations(object):
def __init__(self,
with_bbox=True,
with_label=True,
with_mask=False,
with_seg=False,
poly2mask=True,
skip_img_without_anno=True):
self.with_bbox = with_bbox
self.with_label = with_label
self.with_mask = with_mask
self.with_seg = with_seg
self.poly2mask = poly2mask
self.skip_img_without_anno = skip_img_without_anno
def _load_bboxes(self, results):
ann_info = results['ann_info']
results['gt_bboxes'] = ann_info['bboxes']
if len(results['gt_bboxes']) == 0 and self.skip_img_without_anno:
if results['img_prefix'] is not None:
file_path = osp.join(results['img_prefix'],
results['img_info']['filename'])
else:
file_path = results['img_info']['filename']
warnings.warn(
'Skip the image "{}" that has no valid gt bbox'.format(
file_path))
return None
gt_bboxes_ignore = ann_info.get('bboxes_ignore', None)
if gt_bboxes_ignore is not None:
results['gt_bboxes_ignore'] = gt_bboxes_ignore
results['bbox_fields'].append('gt_bboxes_ignore')
results['bbox_fields'].append('gt_bboxes')
return results
def _load_labels(self, results):
results['gt_labels'] = results['ann_info']['labels']
return results
def _poly2mask(self, mask_ann, img_h, img_w):
if isinstance(mask_ann, list):
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(mask_ann, img_h, img_w)
rle = maskUtils.merge(rles)
elif isinstance(mask_ann['counts'], list):
# uncompressed RLE
rle = maskUtils.frPyObjects(mask_ann, img_h, img_w)
else:
# rle
rle = mask_ann
mask = maskUtils.decode(rle)
return mask
def _load_masks(self, results):
h, w = results['img_info']['height'], results['img_info']['width']
gt_masks = results['ann_info']['masks']
if self.poly2mask:
gt_masks = [self._poly2mask(mask, h, w) for mask in gt_masks]
results['gt_masks'] = gt_masks
results['mask_fields'].append('gt_masks')
return results
def _load_semantic_seg(self, results):
results['gt_semantic_seg'] = mmcv.imread(
osp.join(results['seg_prefix'], results['ann_info']['seg_map']),
flag='unchanged').squeeze()
return results
def __call__(self, results):
if self.with_bbox:
results = self._load_bboxes(results)
if results is None:
return None
if self.with_label:
results = self._load_labels(results)
if self.with_mask:
results = self._load_masks(results)
if self.with_seg:
results = self._load_semantic_seg(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += ('(with_bbox={}, with_label={}, with_mask={},'
' with_seg={})').format(self.with_bbox, self.with_label,
self.with_mask, self.with_seg)
return repr_str
@PIPELINES.register_module
class LoadProposals(object):
def __init__(self, num_max_proposals=None):
self.num_max_proposals = num_max_proposals
def __call__(self, results):
proposals = results['proposals']
if proposals.shape[1] not in (4, 5):
raise AssertionError(
'proposals should have shapes (n, 4) or (n, 5), '
'but found {}'.format(proposals.shape))
proposals = proposals[:, :4]
if self.num_max_proposals is not None:
proposals = proposals[:self.num_max_proposals]
if len(proposals) == 0:
proposals = np.array([0, 0, 0, 0], dtype=np.float32)
results['proposals'] = proposals
results['bbox_fields'].append('proposals')
return results
def __repr__(self):
return self.__class__.__name__ + '(num_max_proposals={})'.format(
self.num_max_proposals)
|
Cream/CDARTS/CDARTS_detection/mmdet/datasets/pipelines/loading.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/datasets/pipelines/loading.py",
"repo_id": "Cream",
"token_count": 2656
}
| 264 |
import numpy as np
import torch.nn as nn
from mmcv.cnn import normal_init
from .anchor_head import AnchorHead
from ..registry import HEADS
from ..utils import bias_init_with_prob, ConvModule
from ..bbox_heads.auto_head.build_head import build_search_head
@HEADS.register_module
class RetinaHead(AnchorHead):
def __init__(self,
num_classes,
in_channels,
stacked_convs=4,
octave_base_scale=4,
scales_per_octave=3,
search_head=None,
conv_cfg=None,
norm_cfg=None,
**kwargs):
self.stacked_convs = stacked_convs
self.search_head = search_head
self.octave_base_scale = octave_base_scale
self.scales_per_octave = scales_per_octave
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
octave_scales = np.array(
[2**(i / scales_per_octave) for i in range(scales_per_octave)])
anchor_scales = octave_scales * octave_base_scale
super(RetinaHead, self).__init__(
num_classes, in_channels, anchor_scales=anchor_scales, **kwargs)
def _init_layers(self):
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
if self.search_head is not None:
if 'cls' in self.search_head.branch:
self.cls_convs = build_search_head(self.search_head)
else:
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(chn, self.feat_channels, 3, stride=1, padding=1,
conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
if 'reg' in self.search_head.branch:
self.reg_convs = build_search_head(self.search_head)
else:
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.reg_convs.append(
ConvModule(chn, self.feat_channels, 3, stride=1, padding=1,
conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
else:
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(chn, self.feat_channels, 3, stride=1, padding=1,
conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(chn, self.feat_channels, 3, stride=1, padding=1,
conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
self.retina_cls = nn.Conv2d(
self.feat_channels,
self.num_anchors * self.cls_out_channels,
3,
padding=1)
self.retina_reg = nn.Conv2d(
self.feat_channels, self.num_anchors * 4, 3, padding=1)
def init_weights(self):
for m in self.cls_convs.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, std=0.01)
for m in self.reg_convs.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.retina_cls, std=0.01, bias=bias_cls)
normal_init(self.retina_reg, std=0.01)
def forward_single(self, x):
cls_feat = x
reg_feat = x
if self.search_head is not None:
if 'cls' in self.search_head.branch:
cls_feat = self.cls_convs(cls_feat)[0]
else:
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
if 'reg' in self.search_head.branch:
reg_feat = self.reg_convs(reg_feat)[0]
else:
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
else:
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
return cls_score, bbox_pred
|
Cream/CDARTS/CDARTS_detection/mmdet/models/anchor_heads/retina_head.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/anchor_heads/retina_head.py",
"repo_id": "Cream",
"token_count": 2445
}
| 265 |
import torch
import torch.nn as nn
from torch.nn import functional as F
from timm.models import resume_checkpoint
from .builder import *
from ..registry import BACKBONES
IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)
IMAGENET_INCEPTION_MEAN = (0.5, 0.5, 0.5)
IMAGENET_INCEPTION_STD = (0.5, 0.5, 0.5)
def hard_sigmoid(x, inplace: bool = False):
if inplace:
return x.add_(3.).clamp_(0., 6.).div_(6.)
else:
return F.relu6(x + 3.) / 6.
class HardSigmoid(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_sigmoid(x, self.inplace)
class SelectAdaptivePool2d(nn.Module):
"""Selectable global pooling layer with dynamic input kernel size
"""
def __init__(self, output_size=1, pool_type='avg', flatten=False):
super(SelectAdaptivePool2d, self).__init__()
self.output_size = output_size
self.pool_type = pool_type
self.flatten = flatten
if pool_type == 'avgmax':
self.pool = AdaptiveAvgMaxPool2d(output_size)
elif pool_type == 'catavgmax':
self.pool = AdaptiveCatAvgMaxPool2d(output_size)
elif pool_type == 'max':
self.pool = nn.AdaptiveMaxPool2d(output_size)
else:
if pool_type != 'avg':
assert False, 'Invalid pool type: %s' % pool_type
self.pool = nn.AdaptiveAvgPool2d(output_size)
def forward(self, x):
x = self.pool(x)
if self.flatten:
x = x.flatten(1)
return x
def feat_mult(self):
return adaptive_pool_feat_mult(self.pool_type)
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ 'output_size=' + str(self.output_size) \
+ ', pool_type=' + self.pool_type + ')'
def create_conv2d(in_chs, out_chs, kernel_size, **kwargs):
""" Select a 2d convolution implementation based on arguments
Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d.
Used extensively by EfficientNet, MobileNetv3 and related networks.
"""
assert 'groups' not in kwargs # only use 'depthwise' bool arg
if isinstance(kernel_size, list):
assert 'num_experts' not in kwargs # MixNet + CondConv combo not supported currently
# We're going to use only lists for defining the MixedConv2d kernel groups,
# ints, tuples, other iterables will continue to pass to normal conv and specify h, w.
m = MixedConv2d(in_chs, out_chs, kernel_size, **kwargs)
else:
depthwise = kwargs.pop('depthwise', False)
groups = out_chs if depthwise else 1
if 'num_experts' in kwargs and kwargs['num_experts'] > 0:
m = CondConv2d(in_chs, out_chs, kernel_size, groups=groups, **kwargs)
else:
m = create_conv2d_pad(in_chs, out_chs, kernel_size, groups=groups, **kwargs)
return m
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv_stem', 'classifier': 'classifier',
**kwargs
}
def conv_bn(inp, oup, stride, groups=1, act_fn=nn.ReLU):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False, groups=groups),
nn.BatchNorm2d(oup),
act_fn(inplace=True)
)
def conv_1x1_bn(inp, oup, groups=1, act_fn=nn.ReLU):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False, groups=groups),
nn.BatchNorm2d(oup),
act_fn(inplace=True)
)
default_cfgs = {
'mobilenetv3_large_075': _cfg(url=''),
'mobilenetv3_large_100': _cfg(
interpolation='bicubic',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_large_100_ra-f55367f5.pth'),
'mobilenetv3_small_075': _cfg(url=''),
'mobilenetv3_small_100': _cfg(url=''),
'mobilenetv3_rw': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_100-35495452.pth',
interpolation='bicubic'),
'tf_mobilenetv3_large_075': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_075-150ee8b0.pth',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_mobilenetv3_large_100': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_100-427764d5.pth',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_mobilenetv3_large_minimal_100': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_minimal_100-8596ae28.pth',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_mobilenetv3_small_075': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_075-da427f52.pth',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_mobilenetv3_small_100': _cfg(
url= 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_100-37f49e2b.pth',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_mobilenetv3_small_minimal_100': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_minimal_100-922a7843.pth',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
}
_DEBUG = False
class ChildNet(nn.Module):
def __init__(self, block_args, num_classes=1000, in_chans=3, stem_size=16, num_features=1280, head_bias=True,
channel_multiplier=1.0, pad_type='', act_layer=nn.ReLU, drop_rate=0., drop_path_rate=0.,
se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, global_pool='avg', pool_bn=False, zero_gamma=False):
super(ChildNet, self).__init__()
norm_layer = nn.SyncBatchNorm
self.num_classes = num_classes
self.num_features = num_features
self.drop_rate = drop_rate
self._in_chs = in_chans
self.pool_bn = pool_bn
# Stem
stem_size = round_channels(stem_size, channel_multiplier)
self.conv_stem = create_conv2d(self._in_chs, stem_size, 3, stride=2, padding=pad_type)
self.bn1 = norm_layer(stem_size, **norm_kwargs)
self.act1 = act_layer(inplace=True)
self._in_chs = stem_size
# Middle stages (IR/ER/DS Blocks)
builder = ChildNetBuilder(
channel_multiplier, 8, None, 32, pad_type, act_layer, se_kwargs,
norm_layer, norm_kwargs, drop_path_rate, verbose=_DEBUG)
self.blocks = nn.Sequential(*builder(self._in_chs, block_args))
# self.blocks = builder(self._in_chs, block_args)
self._in_chs = builder.in_chs
# Head + Pooling
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.conv_head = create_conv2d(self._in_chs, self.num_features, 1, padding=pad_type, bias=head_bias)
self.act2 = act_layer(inplace=True)
# Classifier
self.classifier = nn.Linear(self.num_features * self.global_pool.feat_mult(), self.num_classes)
if pool_bn:
self.pool_bn = nn.BatchNorm1d(1)
efficientnet_init_weights(self, zero_gamma=zero_gamma)
def get_classifier(self):
return self.classifier
def reset_classifier(self, num_classes, global_pool='avg'):
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.num_classes = num_classes
self.classifier = nn.Linear(
self.num_features * self.global_pool.feat_mult(), num_classes) if self.num_classes else None
def forward_features(self, x):
# architecture = [[0], [], [], [], [], [0]]
x = self.conv_stem(x)
x = self.bn1(x)
x = self.act1(x)
outputs = []
# 24, 40, 96, 320
block_idxs = [1, 2, 4, 6]
for i, block in enumerate(self.blocks):
x = block(x)
if i in block_idxs:
outputs.append(x)
# x = self.blocks(x)
return tuple(outputs)
def forward(self, x):
x = self.forward_features(x)
return x
def modify_block_args(block_args, kernel_size, exp_ratio):
# kernel_size: 3,5,7
# exp_ratio: 4,6
block_type = block_args['block_type']
# each type of block has different valid arguments, fill accordingly
if block_type == 'cn':
block_args['kernel_size'] = kernel_size
elif block_type == 'er':
block_args['exp_kernel_size'] = kernel_size
else:
block_args['dw_kernel_size'] = kernel_size
if block_type == 'ir' or block_type == 'er':
block_args['exp_ratio'] = exp_ratio
return block_args
def _gen_childnet(**kwargs):
# 390M
arch_list = [[0], [3, 4, 2, 0], [5, 2, 4, 0], [4, 3, 2, 2], [1, 3, 0, 1], [2, 4, 4, 2], [0]]
# 290M
# arch_list = [[0], [3], [3, 3], [3, 1, 3], [3, 3, 3, 3], [3, 3, 3], [0]]
choices = {'kernel_size': [3, 5, 7], 'exp_ratio': [4, 6]}
choices_list = [[x,y] for x in choices['kernel_size'] for y in choices['exp_ratio']]
num_features = 1280
# act_layer = HardSwish
act_layer = Swish
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_e1_c16_se0.25'],
# stage 1, 112x112 in
['ir_r1_k3_s2_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25'],
# stage 2, 56x56 in
['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s1_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25'],
# stage 3, 28x28 in
['ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25', 'ir_r2_k3_s1_e4_c80_se0.25'],
# stage 4, 14x14in
['ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25'],
# stage 5, 14x14in
['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25'],
# stage 6, 7x7 in
['cn_r1_k1_s1_c960_se0.25'],
]
# arch_def = [
# # stage 0, 112x112 in
# ['ds_r1_k3_s1_e1_c16_se0.25'],
# # stage 1, 112x112 in
# ['ir_r1_k3_s2_e4_c24_se0.25'],
# # stage 2, 56x56 in
# ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25'],
# # stage 3, 28x28 in
# ['ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s2_e6_c80_se0.25'],
# # stage 4, 14x14in
# ['ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25',
# 'ir_r1_k3_s1_e6_c96_se0.25'],
# # stage 5, 14x14in
# ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25'],
# # stage 6, 7x7 in
# ['cn_r1_k1_s1_c960_se0.25'],
# ]
new_arch = []
# change to child arch_def
for i, (layer_choice, layer_arch) in enumerate(zip(arch_list, arch_def)):
if len(layer_arch) == 1:
new_arch.append(layer_arch)
continue
else:
new_layer = []
for j, (block_choice, block_arch) in enumerate(zip(layer_choice, layer_arch)):
kernel_size, exp_ratio = choices_list[block_choice]
elements = block_arch.split('_')
block_arch = block_arch.replace(elements[2], 'k{}'.format(str(kernel_size)))
block_arch = block_arch.replace(elements[4], 'e{}'.format(str(exp_ratio)))
new_layer.append(block_arch)
new_arch.append(new_layer)
model_kwargs = dict(
block_args=decode_arch_def(new_arch),
num_features=num_features,
stem_size=16,
# channel_multiplier=channel_multiplier,
norm_kwargs=resolve_bn_args(kwargs),
act_layer=act_layer,
se_kwargs=dict(act_layer=nn.ReLU, gate_fn=hard_sigmoid, reduce_mid=True, divisor=8),
num_classes=1000,
drop_rate=0.2,
drop_path_rate=0.2,
global_pool='avg'
)
model = ChildNet(**model_kwargs)
return model
@BACKBONES.register_module
class SSDMobilenetV3(nn.Module):
def __init__(self, input_size, width_mult=1.0,
activation_type='relu',
single_scale=False):
super(SSDMobilenetV3, self).__init__()
self.input_size = input_size
self.single_scale = single_scale
self.width_mult = width_mult
self.backbone = _gen_childnet()
# del self.backbone.blocks[3][2]
# del self.backbone.blocks[3][4]
#for m in self.backbone.modules():
# if isinstance(m, nn.BatchNorm2d):
# m.eval()
# m.weight.requires_grad = False
# m.bias.requires_grad = False
self.last_channel = self.backbone.blocks[-1][-1].conv.out_channels # self.backbone.blocks[-1][-1]
# building last several layers
self.extra_convs = []
if not self.single_scale:
self.extra_convs.append(conv_1x1_bn(self.last_channel, 1280,
act_fn=Swish))
self.extra_convs.append(conv_1x1_bn(1280, 256,
act_fn=Swish))
self.extra_convs.append(conv_bn(256, 256, 2, groups=256,
act_fn=Swish))
self.extra_convs.append(conv_1x1_bn(256, 512, groups=1,
act_fn=Swish))
self.extra_convs.append(conv_1x1_bn(512, 128,
act_fn=Swish))
self.extra_convs.append(conv_bn(128, 128, 2, groups=128,
act_fn=Swish))
self.extra_convs.append(conv_1x1_bn(128, 256,
act_fn=Swish))
self.extra_convs.append(conv_1x1_bn(256, 128,
act_fn=Swish))
self.extra_convs.append(conv_bn(128, 128, 2, groups=128,
act_fn=Swish))
self.extra_convs.append(conv_1x1_bn(128, 256,
act_fn=Swish))
self.extra_convs.append(conv_1x1_bn(256, 64,
act_fn=Swish))
self.extra_convs.append(conv_bn(64, 64, 2, groups=64,
act_fn=Swish))
self.extra_convs.append(conv_1x1_bn(64, 128,
act_fn=Swish))
self.extra_convs = nn.Sequential(*self.extra_convs)
def init_weights(self, pretrained=None):
if pretrained:
state_dict = torch.load(pretrained)
state_dict = state_dict['state_dict']
# resume_checkpoint(self.backbone, pretrained)
self.backbone.load_state_dict(state_dict, strict=True)
else:
print("No pretrained model!")
return
def forward(self, x):
outputs = self.backbone(x)
x = outputs[-1]
outs = []
for i, conv in enumerate(self.extra_convs):
x = conv(x)
if i % 3 == 0:
outs.append(x)
if self.single_scale:
# outs.append(x)
return outputs
return tuple(outs)
|
Cream/CDARTS/CDARTS_detection/mmdet/models/backbones/mobilenetv3.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/backbones/mobilenetv3.py",
"repo_id": "Cream",
"token_count": 8351
}
| 266 |
from __future__ import division
import torch
import torch.nn as nn
from .base import BaseDetector
from .test_mixins import RPNTestMixin
from .. import builder
from ..registry import DETECTORS
from mmdet.core import (build_assigner, bbox2roi, bbox2result, build_sampler,
merge_aug_masks)
@DETECTORS.register_module
class CascadeRCNN(BaseDetector, RPNTestMixin):
def __init__(self,
num_stages,
backbone,
neck=None,
shared_head=None,
rpn_head=None,
bbox_roi_extractor=None,
bbox_head=None,
mask_roi_extractor=None,
mask_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
assert bbox_roi_extractor is not None
assert bbox_head is not None
super(CascadeRCNN, self).__init__()
self.num_stages = num_stages
self.backbone = builder.build_backbone(backbone)
if neck is not None:
self.neck = builder.build_neck(neck)
if rpn_head is not None:
self.rpn_head = builder.build_head(rpn_head)
if shared_head is not None:
self.shared_head = builder.build_shared_head(shared_head)
if bbox_head is not None:
self.bbox_roi_extractor = nn.ModuleList()
self.bbox_head = nn.ModuleList()
if not isinstance(bbox_roi_extractor, list):
bbox_roi_extractor = [
bbox_roi_extractor for _ in range(num_stages)
]
if not isinstance(bbox_head, list):
bbox_head = [bbox_head for _ in range(num_stages)]
assert len(bbox_roi_extractor) == len(bbox_head) == self.num_stages
for roi_extractor, head in zip(bbox_roi_extractor, bbox_head):
self.bbox_roi_extractor.append(
builder.build_roi_extractor(roi_extractor))
self.bbox_head.append(builder.build_head(head))
if mask_head is not None:
self.mask_head = nn.ModuleList()
if not isinstance(mask_head, list):
mask_head = [mask_head for _ in range(num_stages)]
assert len(mask_head) == self.num_stages
for head in mask_head:
self.mask_head.append(builder.build_head(head))
if mask_roi_extractor is not None:
self.share_roi_extractor = False
self.mask_roi_extractor = nn.ModuleList()
if not isinstance(mask_roi_extractor, list):
mask_roi_extractor = [
mask_roi_extractor for _ in range(num_stages)
]
assert len(mask_roi_extractor) == self.num_stages
for roi_extractor in mask_roi_extractor:
self.mask_roi_extractor.append(
builder.build_roi_extractor(roi_extractor))
else:
self.share_roi_extractor = True
self.mask_roi_extractor = self.bbox_roi_extractor
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
@property
def with_rpn(self):
return hasattr(self, 'rpn_head') and self.rpn_head is not None
def init_weights(self, pretrained=None):
super(CascadeRCNN, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
if self.with_neck:
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
if self.with_rpn:
self.rpn_head.init_weights()
if self.with_shared_head:
self.shared_head.init_weights(pretrained=pretrained)
for i in range(self.num_stages):
if self.with_bbox:
self.bbox_roi_extractor[i].init_weights()
self.bbox_head[i].init_weights()
if self.with_mask:
if not self.share_roi_extractor:
self.mask_roi_extractor[i].init_weights()
self.mask_head[i].init_weights()
def extract_feat(self, img):
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_train(self,
img,
img_meta,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
proposals=None):
x = self.extract_feat(img)
losses = dict()
if self.with_rpn:
rpn_outs = self.rpn_head(x)
rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta,
self.train_cfg.rpn)
rpn_losses = self.rpn_head.loss(
*rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
losses.update(rpn_losses)
proposal_cfg = self.train_cfg.get('rpn_proposal',
self.test_cfg.rpn)
proposal_inputs = rpn_outs + (img_meta, proposal_cfg)
proposal_list = self.rpn_head.get_bboxes(*proposal_inputs)
else:
proposal_list = proposals
for i in range(self.num_stages):
self.current_stage = i
rcnn_train_cfg = self.train_cfg.rcnn[i]
lw = self.train_cfg.stage_loss_weights[i]
# assign gts and sample proposals
sampling_results = []
if self.with_bbox or self.with_mask:
bbox_assigner = build_assigner(rcnn_train_cfg.assigner)
bbox_sampler = build_sampler(
rcnn_train_cfg.sampler, context=self)
num_imgs = img.size(0)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
for j in range(num_imgs):
assign_result = bbox_assigner.assign(
proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j],
gt_labels[j])
sampling_result = bbox_sampler.sample(
assign_result,
proposal_list[j],
gt_bboxes[j],
gt_labels[j],
feats=[lvl_feat[j][None] for lvl_feat in x])
sampling_results.append(sampling_result)
# bbox head forward and loss
bbox_roi_extractor = self.bbox_roi_extractor[i]
bbox_head = self.bbox_head[i]
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],
rois)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
cls_score, bbox_pred = bbox_head(bbox_feats)
bbox_targets = bbox_head.get_target(sampling_results, gt_bboxes,
gt_labels, rcnn_train_cfg)
loss_bbox = bbox_head.loss(cls_score, bbox_pred, *bbox_targets)
for name, value in loss_bbox.items():
losses['s{}.{}'.format(i, name)] = (
value * lw if 'loss' in name else value)
# mask head forward and loss
if self.with_mask:
if not self.share_roi_extractor:
mask_roi_extractor = self.mask_roi_extractor[i]
pos_rois = bbox2roi(
[res.pos_bboxes for res in sampling_results])
mask_feats = mask_roi_extractor(
x[:mask_roi_extractor.num_inputs], pos_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
else:
# reuse positive bbox feats
pos_inds = []
device = bbox_feats.device
for res in sampling_results:
pos_inds.append(
torch.ones(
res.pos_bboxes.shape[0],
device=device,
dtype=torch.uint8))
pos_inds.append(
torch.zeros(
res.neg_bboxes.shape[0],
device=device,
dtype=torch.uint8))
pos_inds = torch.cat(pos_inds)
mask_feats = bbox_feats[pos_inds]
mask_head = self.mask_head[i]
mask_pred = mask_head(mask_feats)
mask_targets = mask_head.get_target(sampling_results, gt_masks,
rcnn_train_cfg)
pos_labels = torch.cat(
[res.pos_gt_labels for res in sampling_results])
loss_mask = mask_head.loss(mask_pred, mask_targets, pos_labels)
for name, value in loss_mask.items():
losses['s{}.{}'.format(i, name)] = (
value * lw if 'loss' in name else value)
# refine bboxes
if i < self.num_stages - 1:
pos_is_gts = [res.pos_is_gt for res in sampling_results]
roi_labels = bbox_targets[0] # bbox_targets is a tuple
with torch.no_grad():
proposal_list = bbox_head.refine_bboxes(
rois, roi_labels, bbox_pred, pos_is_gts, img_meta)
return losses
def simple_test(self, img, img_meta, proposals=None, rescale=False):
x = self.extract_feat(img)
proposal_list = self.simple_test_rpn(
x, img_meta, self.test_cfg.rpn) if proposals is None else proposals
img_shape = img_meta[0]['img_shape']
ori_shape = img_meta[0]['ori_shape']
scale_factor = img_meta[0]['scale_factor']
# "ms" in variable names means multi-stage
ms_bbox_result = {}
ms_segm_result = {}
ms_scores = []
rcnn_test_cfg = self.test_cfg.rcnn
rois = bbox2roi(proposal_list)
for i in range(self.num_stages):
bbox_roi_extractor = self.bbox_roi_extractor[i]
bbox_head = self.bbox_head[i]
bbox_feats = bbox_roi_extractor(
x[:len(bbox_roi_extractor.featmap_strides)], rois)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
cls_score, bbox_pred = bbox_head(bbox_feats)
ms_scores.append(cls_score)
if self.test_cfg.keep_all_stages:
det_bboxes, det_labels = bbox_head.get_det_bboxes(
rois,
cls_score,
bbox_pred,
img_shape,
scale_factor,
rescale=rescale,
cfg=rcnn_test_cfg)
bbox_result = bbox2result(det_bboxes, det_labels,
bbox_head.num_classes)
ms_bbox_result['stage{}'.format(i)] = bbox_result
if self.with_mask:
mask_roi_extractor = self.mask_roi_extractor[i]
mask_head = self.mask_head[i]
if det_bboxes.shape[0] == 0:
segm_result = [
[] for _ in range(mask_head.num_classes - 1)
]
else:
_bboxes = (
det_bboxes[:, :4] * scale_factor
if rescale else det_bboxes)
mask_rois = bbox2roi([_bboxes])
mask_feats = mask_roi_extractor(
x[:len(mask_roi_extractor.featmap_strides)],
mask_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats, i)
mask_pred = mask_head(mask_feats)
segm_result = mask_head.get_seg_masks(
mask_pred, _bboxes, det_labels, rcnn_test_cfg,
ori_shape, scale_factor, rescale)
ms_segm_result['stage{}'.format(i)] = segm_result
if i < self.num_stages - 1:
bbox_label = cls_score.argmax(dim=1)
rois = bbox_head.regress_by_class(rois, bbox_label, bbox_pred,
img_meta[0])
cls_score = sum(ms_scores) / self.num_stages
det_bboxes, det_labels = self.bbox_head[-1].get_det_bboxes(
rois,
cls_score,
bbox_pred,
img_shape,
scale_factor,
rescale=rescale,
cfg=rcnn_test_cfg)
bbox_result = bbox2result(det_bboxes, det_labels,
self.bbox_head[-1].num_classes)
ms_bbox_result['ensemble'] = bbox_result
if self.with_mask:
if det_bboxes.shape[0] == 0:
segm_result = [
[] for _ in range(self.mask_head[-1].num_classes - 1)
]
else:
_bboxes = (
det_bboxes[:, :4] * scale_factor
if rescale else det_bboxes)
mask_rois = bbox2roi([_bboxes])
aug_masks = []
for i in range(self.num_stages):
mask_roi_extractor = self.mask_roi_extractor[i]
mask_feats = mask_roi_extractor(
x[:len(mask_roi_extractor.featmap_strides)], mask_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
mask_pred = self.mask_head[i](mask_feats)
aug_masks.append(mask_pred.sigmoid().cpu().numpy())
merged_masks = merge_aug_masks(aug_masks,
[img_meta] * self.num_stages,
self.test_cfg.rcnn)
segm_result = self.mask_head[-1].get_seg_masks(
merged_masks, _bboxes, det_labels, rcnn_test_cfg,
ori_shape, scale_factor, rescale)
ms_segm_result['ensemble'] = segm_result
if not self.test_cfg.keep_all_stages:
if self.with_mask:
results = (ms_bbox_result['ensemble'],
ms_segm_result['ensemble'])
else:
results = ms_bbox_result['ensemble']
else:
if self.with_mask:
results = {
stage: (ms_bbox_result[stage], ms_segm_result[stage])
for stage in ms_bbox_result
}
else:
results = ms_bbox_result
return results
def aug_test(self, img, img_meta, proposals=None, rescale=False):
raise NotImplementedError
def show_result(self, data, result, img_norm_cfg, **kwargs):
if self.with_mask:
ms_bbox_result, ms_segm_result = result
if isinstance(ms_bbox_result, dict):
result = (ms_bbox_result['ensemble'],
ms_segm_result['ensemble'])
else:
if isinstance(result, dict):
result = result['ensemble']
super(CascadeRCNN, self).show_result(data, result, img_norm_cfg,
**kwargs)
|
Cream/CDARTS/CDARTS_detection/mmdet/models/detectors/cascade_rcnn.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/detectors/cascade_rcnn.py",
"repo_id": "Cream",
"token_count": 9343
}
| 267 |
import numpy as np
import torch
import torch.nn as nn
from .utils import weighted_loss
from ..registry import LOSSES
@weighted_loss
def balanced_l1_loss(pred,
target,
beta=1.0,
alpha=0.5,
gamma=1.5,
reduction='mean'):
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
b = np.e**(gamma / alpha) - 1
loss = torch.where(
diff < beta, alpha / b *
(b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff,
gamma * diff + gamma / b - alpha * beta)
return loss
@LOSSES.register_module
class BalancedL1Loss(nn.Module):
"""Balanced L1 Loss
arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
"""
def __init__(self,
alpha=0.5,
gamma=1.5,
beta=1.0,
reduction='mean',
loss_weight=1.0):
super(BalancedL1Loss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * balanced_l1_loss(
pred,
target,
weight,
alpha=self.alpha,
gamma=self.gamma,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
|
Cream/CDARTS/CDARTS_detection/mmdet/models/losses/balanced_l1_loss.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/losses/balanced_l1_loss.py",
"repo_id": "Cream",
"token_count": 1001
}
| 268 |
# --------------------------------------------------------
# Copyright (c) 2019 Jianyuan Guo ([email protected])
# --------------------------------------------------------
# from .darts_neck_search import DartsNeck
from .hit_neck_search import HitNeck
def build_search_neck(cfg):
"""Build neck model from config dict.
"""
if cfg is not None:
cfg_ = cfg.copy()
neck_type = cfg_.pop('type')
if neck_type == 'DARTS':
raise NotImplementedError
# return DartsNeck(**cfg_)
elif neck_type == 'HitDet':
return HitNeck(**cfg_)
else:
raise KeyError('Invalid neck type {}'.fromat(neck_type))
else:
return None
|
Cream/CDARTS/CDARTS_detection/mmdet/models/necks/auto_neck/build_neck.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/necks/auto_neck/build_neck.py",
"repo_id": "Cream",
"token_count": 287
}
| 269 |
import logging
import torch.nn as nn
from mmcv.cnn import constant_init, kaiming_init
from mmcv.runner import load_checkpoint
from mmdet.core import auto_fp16
from ..backbones import ResNet, make_res_layer
from ..registry import SHARED_HEADS
@SHARED_HEADS.register_module
class ResLayer(nn.Module):
def __init__(self,
depth,
stage=3,
stride=2,
dilation=1,
style='pytorch',
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
with_cp=False,
dcn=None):
super(ResLayer, self).__init__()
self.norm_eval = norm_eval
self.norm_cfg = norm_cfg
self.stage = stage
self.fp16_enabled = False
block, stage_blocks = ResNet.arch_settings[depth]
stage_block = stage_blocks[stage]
planes = 64 * 2**stage
inplanes = 64 * 2**(stage - 1) * block.expansion
res_layer = make_res_layer(
block,
inplanes,
planes,
stage_block,
stride=stride,
dilation=dilation,
style=style,
with_cp=with_cp,
norm_cfg=self.norm_cfg,
dcn=dcn)
self.add_module('layer{}'.format(stage + 1), res_layer)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
@auto_fp16()
def forward(self, x):
res_layer = getattr(self, 'layer{}'.format(self.stage + 1))
out = res_layer(x)
return out
def train(self, mode=True):
super(ResLayer, self).train(mode)
if self.norm_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
|
Cream/CDARTS/CDARTS_detection/mmdet/models/shared_heads/res_layer.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/shared_heads/res_layer.py",
"repo_id": "Cream",
"token_count": 1165
}
| 270 |
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='deform_conv',
ext_modules=[
CUDAExtension('deform_conv_cuda', [
'src/deform_conv_cuda.cpp',
'src/deform_conv_cuda_kernel.cu',
]),
CUDAExtension(
'deform_pool_cuda',
['src/deform_pool_cuda.cpp', 'src/deform_pool_cuda_kernel.cu']),
],
cmdclass={'build_ext': BuildExtension})
|
Cream/CDARTS/CDARTS_detection/mmdet/ops/dcn/setup.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/dcn/setup.py",
"repo_id": "Cream",
"token_count": 229
}
| 271 |
import numpy as np
import torch
from . import nms_cuda, nms_cpu
from .soft_nms_cpu import soft_nms_cpu
def nms(dets, iou_thr, device_id=None):
"""Dispatch to either CPU or GPU NMS implementations.
The input can be either a torch tensor or numpy array. GPU NMS will be used
if the input is a gpu tensor or device_id is specified, otherwise CPU NMS
will be used. The returned type will always be the same as inputs.
Arguments:
dets (torch.Tensor or np.ndarray): bboxes with scores.
iou_thr (float): IoU threshold for NMS.
device_id (int, optional): when `dets` is a numpy array, if `device_id`
is None, then cpu nms is used, otherwise gpu_nms will be used.
Returns:
tuple: kept bboxes and indice, which is always the same data type as
the input.
"""
# convert dets (tensor or numpy array) to tensor
if isinstance(dets, torch.Tensor):
is_numpy = False
dets_th = dets
elif isinstance(dets, np.ndarray):
is_numpy = True
device = 'cpu' if device_id is None else 'cuda:{}'.format(device_id)
dets_th = torch.from_numpy(dets).to(device)
else:
raise TypeError(
'dets must be either a Tensor or numpy array, but got {}'.format(
type(dets)))
# execute cpu or cuda nms
if dets_th.shape[0] == 0:
inds = dets_th.new_zeros(0, dtype=torch.long)
else:
if dets_th.is_cuda:
inds = nms_cuda.nms(dets_th, iou_thr)
else:
inds = nms_cpu.nms(dets_th, iou_thr)
if is_numpy:
inds = inds.cpu().numpy()
return dets[inds, :], inds
def soft_nms(dets, iou_thr, method='linear', sigma=0.5, min_score=1e-3):
if isinstance(dets, torch.Tensor):
is_tensor = True
dets_np = dets.detach().cpu().numpy()
elif isinstance(dets, np.ndarray):
is_tensor = False
dets_np = dets
else:
raise TypeError(
'dets must be either a Tensor or numpy array, but got {}'.format(
type(dets)))
method_codes = {'linear': 1, 'gaussian': 2}
if method not in method_codes:
raise ValueError('Invalid method for SoftNMS: {}'.format(method))
new_dets, inds = soft_nms_cpu(
dets_np,
iou_thr,
method=method_codes[method],
sigma=sigma,
min_score=min_score)
if is_tensor:
return dets.new_tensor(new_dets), dets.new_tensor(
inds, dtype=torch.long)
else:
return new_dets.astype(np.float32), inds.astype(np.int64)
|
Cream/CDARTS/CDARTS_detection/mmdet/ops/nms/nms_wrapper.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/nms/nms_wrapper.py",
"repo_id": "Cream",
"token_count": 1208
}
| 272 |
#include <ATen/ATen.h>
#include <THC/THCAtomics.cuh>
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
#define THREADS_PER_BLOCK 1024
inline int GET_BLOCKS(const int N) {
int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
int max_block_num = 65000;
return min(optimal_block_num, max_block_num);
}
template <typename scalar_t>
__device__ scalar_t bilinear_interpolate(const scalar_t *bottom_data,
const int height, const int width,
scalar_t y, scalar_t x) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
return 0;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (scalar_t)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (scalar_t)x_low;
} else {
x_high = x_low + 1;
}
scalar_t ly = y - y_low;
scalar_t lx = x - x_low;
scalar_t hy = 1. - ly;
scalar_t hx = 1. - lx;
// do bilinear interpolation
scalar_t lt = bottom_data[y_low * width + x_low];
scalar_t rt = bottom_data[y_low * width + x_high];
scalar_t lb = bottom_data[y_high * width + x_low];
scalar_t rb = bottom_data[y_high * width + x_high];
scalar_t w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
scalar_t val = (w1 * lt + w2 * rt + w3 * lb + w4 * rb);
return val;
}
template <typename scalar_t>
__global__ void ROIAlignForward(const int nthreads, const scalar_t *bottom_data,
const scalar_t *bottom_rois,
const scalar_t spatial_scale,
const int sample_num, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
scalar_t *top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the aligned output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const scalar_t *offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
scalar_t roi_start_w = offset_bottom_rois[1] * spatial_scale;
scalar_t roi_start_h = offset_bottom_rois[2] * spatial_scale;
scalar_t roi_end_w = (offset_bottom_rois[3] + 1) * spatial_scale;
scalar_t roi_end_h = (offset_bottom_rois[4] + 1) * spatial_scale;
// Force malformed ROIs to be 1x1
scalar_t roi_width = fmaxf((scalar_t)roi_end_w - roi_start_w, 0.);
scalar_t roi_height = fmaxf((scalar_t)roi_end_h - roi_start_h, 0.);
scalar_t bin_size_h = roi_height / pooled_height;
scalar_t bin_size_w = roi_width / pooled_width;
const scalar_t *offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
int sample_num_h = (sample_num > 0)
? sample_num
: ceil(roi_height / pooled_height); // e.g., = 2
int sample_num_w =
(sample_num > 0) ? sample_num : ceil(roi_width / pooled_width);
scalar_t output_val = 0;
for (int iy = 0; iy < sample_num_h; iy++) {
const scalar_t y = roi_start_h + ph * bin_size_h +
(scalar_t)(iy + scalar_t(.5f)) * bin_size_h /
(scalar_t)(sample_num_h);
for (int ix = 0; ix < sample_num_w; ix++) {
const scalar_t x = roi_start_w + pw * bin_size_w +
(scalar_t)(ix + scalar_t(.5f)) * bin_size_w /
(scalar_t)(sample_num_w);
scalar_t val = bilinear_interpolate<scalar_t>(offset_bottom_data,
height, width, y, x);
output_val += val;
}
}
output_val /= (sample_num_h * sample_num_w);
top_data[index] = output_val;
}
}
int ROIAlignForwardLaucher(const at::Tensor features, const at::Tensor rois,
const float spatial_scale, const int sample_num,
const int channels, const int height,
const int width, const int num_rois,
const int pooled_height, const int pooled_width,
at::Tensor output) {
const int output_size = num_rois * pooled_height * pooled_width * channels;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
features.scalar_type(), "ROIAlignLaucherForward", ([&] {
const scalar_t *bottom_data = features.data<scalar_t>();
const scalar_t *rois_data = rois.data<scalar_t>();
scalar_t *top_data = output.data<scalar_t>();
ROIAlignForward<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>(
output_size, bottom_data, rois_data, scalar_t(spatial_scale),
sample_num, channels, height, width, pooled_height,
pooled_width, top_data);
}));
THCudaCheck(cudaGetLastError());
return 1;
}
template <typename scalar_t>
__device__ void bilinear_interpolate_gradient(const int height, const int width,
scalar_t y, scalar_t x,
scalar_t &w1, scalar_t &w2,
scalar_t &w3, scalar_t &w4,
int &x_low, int &x_high,
int &y_low, int &y_high) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (scalar_t)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (scalar_t)x_low;
} else {
x_high = x_low + 1;
}
scalar_t ly = y - y_low;
scalar_t lx = x - x_low;
scalar_t hy = 1. - ly;
scalar_t hx = 1. - lx;
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename scalar_t>
__global__ void ROIAlignBackward(
const int nthreads, const scalar_t *top_diff, const scalar_t *bottom_rois,
const scalar_t spatial_scale, const int sample_num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, scalar_t *bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the aligned output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const scalar_t *offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
scalar_t roi_start_w = offset_bottom_rois[1] * spatial_scale;
scalar_t roi_start_h = offset_bottom_rois[2] * spatial_scale;
scalar_t roi_end_w = (offset_bottom_rois[3] + 1) * spatial_scale;
scalar_t roi_end_h = (offset_bottom_rois[4] + 1) * spatial_scale;
// Force malformed ROIs to be 1x1
scalar_t roi_width = fmaxf((scalar_t)roi_end_w - roi_start_w, 0.);
scalar_t roi_height = fmaxf((scalar_t)roi_end_h - roi_start_h, 0.);
scalar_t bin_size_h = roi_height / pooled_height;
scalar_t bin_size_w = roi_width / pooled_width;
scalar_t *offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int offset_top = (n * channels + c) * pooled_height * pooled_width +
ph * pooled_width + pw;
scalar_t offset_top_diff = top_diff[offset_top];
int sample_num_h = (sample_num > 0)
? sample_num
: ceil(roi_height / pooled_height); // e.g., = 2
int sample_num_w =
(sample_num > 0) ? sample_num : ceil(roi_width / pooled_width);
const scalar_t count = (scalar_t)(sample_num_h * sample_num_w);
for (int iy = 0; iy < sample_num_h; iy++) {
const scalar_t y =
roi_start_h + ph * bin_size_h +
(scalar_t)(iy + .5f) * bin_size_h / (scalar_t)(sample_num_h);
for (int ix = 0; ix < sample_num_w; ix++) {
const scalar_t x =
roi_start_w + pw * bin_size_w +
(scalar_t)(ix + .5f) * bin_size_w / (scalar_t)(sample_num_w);
scalar_t w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient<scalar_t>(
height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high);
scalar_t g1 = offset_top_diff * w1 / count;
scalar_t g2 = offset_top_diff * w2 / count;
scalar_t g3 = offset_top_diff * w3 / count;
scalar_t g4 = offset_top_diff * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
atomicAdd(offset_bottom_diff + y_low * width + x_low, g1);
atomicAdd(offset_bottom_diff + y_low * width + x_high, g2);
atomicAdd(offset_bottom_diff + y_high * width + x_low, g3);
atomicAdd(offset_bottom_diff + y_high * width + x_high, g4);
}
}
}
}
}
int ROIAlignBackwardLaucher(const at::Tensor top_grad, const at::Tensor rois,
const float spatial_scale, const int sample_num,
const int channels, const int height,
const int width, const int num_rois,
const int pooled_height, const int pooled_width,
at::Tensor bottom_grad) {
const int output_size = num_rois * pooled_height * pooled_width * channels;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
top_grad.scalar_type(), "ROIAlignLaucherBackward", ([&] {
const scalar_t *top_diff = top_grad.data<scalar_t>();
const scalar_t *rois_data = rois.data<scalar_t>();
scalar_t *bottom_diff = bottom_grad.data<scalar_t>();
if (sizeof(scalar_t) == sizeof(double)) {
fprintf(stderr, "double is not supported\n");
exit(-1);
}
ROIAlignBackward<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>(
output_size, top_diff, rois_data, spatial_scale, sample_num,
channels, height, width, pooled_height, pooled_width,
bottom_diff);
}));
THCudaCheck(cudaGetLastError());
return 1;
}
|
Cream/CDARTS/CDARTS_detection/mmdet/ops/roi_align/src/roi_align_kernel.cu/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/roi_align/src/roi_align_kernel.cu",
"repo_id": "Cream",
"token_count": 5579
}
| 273 |
// modify from
// https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/csrc/SigmoidFocalLoss.h
#include <torch/extension.h>
at::Tensor SigmoidFocalLoss_forward_cuda(const at::Tensor &logits,
const at::Tensor &targets,
const int num_classes,
const float gamma, const float alpha);
at::Tensor SigmoidFocalLoss_backward_cuda(const at::Tensor &logits,
const at::Tensor &targets,
const at::Tensor &d_losses,
const int num_classes,
const float gamma, const float alpha);
// Interface for Python
at::Tensor SigmoidFocalLoss_forward(const at::Tensor &logits,
const at::Tensor &targets,
const int num_classes, const float gamma,
const float alpha) {
if (logits.type().is_cuda()) {
return SigmoidFocalLoss_forward_cuda(logits, targets, num_classes, gamma,
alpha);
}
}
at::Tensor SigmoidFocalLoss_backward(const at::Tensor &logits,
const at::Tensor &targets,
const at::Tensor &d_losses,
const int num_classes, const float gamma,
const float alpha) {
if (logits.type().is_cuda()) {
return SigmoidFocalLoss_backward_cuda(logits, targets, d_losses,
num_classes, gamma, alpha);
}
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &SigmoidFocalLoss_forward,
"SigmoidFocalLoss forward (CUDA)");
m.def("backward", &SigmoidFocalLoss_backward,
"SigmoidFocalLoss backward (CUDA)");
}
|
Cream/CDARTS/CDARTS_detection/mmdet/ops/sigmoid_focal_loss/src/sigmoid_focal_loss.cpp/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/sigmoid_focal_loss/src/sigmoid_focal_loss.cpp",
"repo_id": "Cream",
"token_count": 1138
}
| 274 |
import argparse
from collections import OrderedDict
import mmcv
import torch
arch_settings = {50: (3, 4, 6, 3), 101: (3, 4, 23, 3)}
def convert_bn(blobs, state_dict, caffe_name, torch_name, converted_names):
# detectron replace bn with affine channel layer
state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name +
'_b'])
state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name +
'_s'])
bn_size = state_dict[torch_name + '.weight'].size()
state_dict[torch_name + '.running_mean'] = torch.zeros(bn_size)
state_dict[torch_name + '.running_var'] = torch.ones(bn_size)
converted_names.add(caffe_name + '_b')
converted_names.add(caffe_name + '_s')
def convert_conv_fc(blobs, state_dict, caffe_name, torch_name,
converted_names):
state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name +
'_w'])
converted_names.add(caffe_name + '_w')
if caffe_name + '_b' in blobs:
state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name +
'_b'])
converted_names.add(caffe_name + '_b')
def convert(src, dst, depth):
"""Convert keys in detectron pretrained ResNet models to pytorch style."""
# load arch_settings
if depth not in arch_settings:
raise ValueError('Only support ResNet-50 and ResNet-101 currently')
block_nums = arch_settings[depth]
# load caffe model
caffe_model = mmcv.load(src, encoding='latin1')
blobs = caffe_model['blobs'] if 'blobs' in caffe_model else caffe_model
# convert to pytorch style
state_dict = OrderedDict()
converted_names = set()
convert_conv_fc(blobs, state_dict, 'conv1', 'conv1', converted_names)
convert_bn(blobs, state_dict, 'res_conv1_bn', 'bn1', converted_names)
for i in range(1, len(block_nums) + 1):
for j in range(block_nums[i - 1]):
if j == 0:
convert_conv_fc(blobs, state_dict,
'res{}_{}_branch1'.format(i + 1, j),
'layer{}.{}.downsample.0'.format(i, j),
converted_names)
convert_bn(blobs, state_dict,
'res{}_{}_branch1_bn'.format(i + 1, j),
'layer{}.{}.downsample.1'.format(i, j),
converted_names)
for k, letter in enumerate(['a', 'b', 'c']):
convert_conv_fc(blobs, state_dict,
'res{}_{}_branch2{}'.format(i + 1, j, letter),
'layer{}.{}.conv{}'.format(i, j, k + 1),
converted_names)
convert_bn(blobs, state_dict,
'res{}_{}_branch2{}_bn'.format(i + 1, j, letter),
'layer{}.{}.bn{}'.format(i, j,
k + 1), converted_names)
# check if all layers are converted
for key in blobs:
if key not in converted_names:
print('Not Convert: {}'.format(key))
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
parser.add_argument('depth', type=int, help='ResNet model depth')
args = parser.parse_args()
convert(args.src, args.dst, args.depth)
if __name__ == '__main__':
main()
|
Cream/CDARTS/CDARTS_detection/tools/detectron2pytorch.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/tools/detectron2pytorch.py",
"repo_id": "Cream",
"token_count": 1966
}
| 275 |
import numpy as np
import torch
from torch.utils.data import Dataset
from tqdm import trange
import os
from pycocotools.coco import COCO
from pycocotools import mask
from torchvision import transforms
from dataloaders import custom_transforms as tr
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
class COCOSegmentation(Dataset):
NUM_CLASSES = 21
CAT_LIST = [0, 5, 2, 16, 9, 44, 6, 3, 17, 62, 21, 67, 18, 19, 4,
1, 64, 20, 63, 7, 72]
def __init__(self,
args,
base_dir,
split='train',
year='2017'):
super().__init__()
ann_file = os.path.join(base_dir, 'annotations/instances_{}{}.json'.format(split, year))
ids_file = os.path.join(base_dir, 'annotations/{}_ids_{}.pth'.format(split, year))
# self.img_dir = os.path.join(base_dir, 'images/{}{}'.format(split, year))
self.img_dir = os.path.join(base_dir, '{}{}'.format(split, year))
self.split = split
self.coco = COCO(ann_file)
self.coco_mask = mask
if os.path.exists(ids_file):
self.ids = torch.load(ids_file)
else:
ids = list(self.coco.imgs.keys())
self.ids = self._preprocess(ids, ids_file)
self.args = args
def __getitem__(self, index):
_img, _target = self._make_img_gt_point_pair(index)
sample = {'image': _img, 'label': _target}
if self.split == "train":
return self.transform_tr(sample)
elif self.split == 'val':
return self.transform_val(sample)
def _make_img_gt_point_pair(self, index):
coco = self.coco
img_id = self.ids[index]
img_metadata = coco.loadImgs(img_id)[0]
path = img_metadata['file_name']
_img = Image.open(os.path.join(self.img_dir, path)).convert('RGB')
cocotarget = coco.loadAnns(coco.getAnnIds(imgIds=img_id))
_target = Image.fromarray(self._gen_seg_mask(
cocotarget, img_metadata['height'], img_metadata['width']))
return _img, _target
def _preprocess(self, ids, ids_file):
print("Preprocessing mask, this will take a while. " + \
"But don't worry, it only run once for each split.")
tbar = trange(len(ids))
new_ids = []
for i in tbar:
img_id = ids[i]
cocotarget = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_id))
img_metadata = self.coco.loadImgs(img_id)[0]
mask = self._gen_seg_mask(cocotarget, img_metadata['height'],
img_metadata['width'])
# more than 1k pixels
if (mask > 0).sum() > 1000:
new_ids.append(img_id)
tbar.set_description('Doing: {}/{}, got {} qualified images'. \
format(i, len(ids), len(new_ids)))
print('Found number of qualified images: ', len(new_ids))
torch.save(new_ids, ids_file)
return new_ids
def _gen_seg_mask(self, target, h, w):
mask = np.zeros((h, w), dtype=np.uint8)
coco_mask = self.coco_mask
for instance in target:
rle = coco_mask.frPyObjects(instance['segmentation'], h, w)
m = coco_mask.decode(rle)
cat = instance['category_id']
if cat in self.CAT_LIST:
c = self.CAT_LIST.index(cat)
else:
continue
if len(m.shape) < 3:
mask[:, :] += (mask == 0) * (m * c)
else:
mask[:, :] += (mask == 0) * (((np.sum(m, axis=2)) > 0) * c).astype(np.uint8)
return mask
def transform_tr(self, sample):
composed_transforms = transforms.Compose([
tr.RandomHorizontalFlip(),
tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size),
tr.RandomGaussianBlur(),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])
return composed_transforms(sample)
def transform_val(self, sample):
composed_transforms = transforms.Compose([
tr.FixScaleCrop(crop_size=self.args.crop_size),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])
return composed_transforms(sample)
def __len__(self):
return len(self.ids)
if __name__ == "__main__":
from dataloaders import custom_transforms as tr
from dataloaders.dataloader_utils import decode_segmap
from torch.utils.data import DataLoader
from torchvision import transforms
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.base_size = 513
args.crop_size = 513
coco_val = COCOSegmentation(args, split='val', year='2017')
dataloader = DataLoader(coco_val, batch_size=4, shuffle=True, num_workers=0)
for ii, sample in enumerate(dataloader):
for jj in range(sample["image"].size()[0]):
img = sample['image'].numpy()
gt = sample['label'].numpy()
tmp = np.array(gt[jj]).astype(np.uint8)
segmap = decode_segmap(tmp, dataset='coco')
img_tmp = np.transpose(img[jj], axes=[1, 2, 0])
img_tmp *= (0.229, 0.224, 0.225)
img_tmp += (0.485, 0.456, 0.406)
img_tmp *= 255.0
img_tmp = img_tmp.astype(np.uint8)
plt.figure()
plt.title('display')
plt.subplot(211)
plt.imshow(img_tmp)
plt.subplot(212)
plt.imshow(segmap)
if ii == 1:
break
plt.show(block=True)
|
Cream/CDARTS/CDARTS_segmentation/dataloaders/datasets/coco.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/dataloaders/datasets/coco.py",
"repo_id": "Cream",
"token_count": 2863
}
| 276 |
# ------------------------------------------------------------------------------
# Builds model.
# Written by Bowen Cheng ([email protected])
# ------------------------------------------------------------------------------
import torch
from .backbone import resnet, mobilenet, mnasnet, hrnet, xception
from .meta_arch import DeepLabV3, DeepLabV3Plus, PanopticDeepLab
from .loss import RegularCE, OhemCE, DeepLabCE, L1Loss, MSELoss, CrossEntropyLoss
def build_segmentation_model_from_cfg(config):
"""Builds segmentation model with specific configuration.
Args:
config: the configuration.
Returns:
A nn.Module segmentation model.
"""
model_map = {
'deeplabv3': DeepLabV3,
'deeplabv3plus': DeepLabV3Plus,
'panoptic_deeplab': PanopticDeepLab,
}
model_cfg = {
'deeplabv3': dict(
replace_stride_with_dilation=config.MODEL.BACKBONE.DILATION,
in_channels=config.MODEL.DECODER.IN_CHANNELS,
feature_key=config.MODEL.DECODER.FEATURE_KEY,
decoder_channels=config.MODEL.DECODER.DECODER_CHANNELS,
atrous_rates=config.MODEL.DECODER.ATROUS_RATES,
num_classes=config.DATASET.NUM_CLASSES,
semantic_loss=build_loss_from_cfg(config.LOSS.SEMANTIC),
semantic_loss_weight=config.LOSS.SEMANTIC.WEIGHT,
),
'deeplabv3plus': dict(
replace_stride_with_dilation=config.MODEL.BACKBONE.DILATION,
in_channels=config.MODEL.DECODER.IN_CHANNELS,
feature_key=config.MODEL.DECODER.FEATURE_KEY,
low_level_channels=config.MODEL.DEEPLABV3PLUS.LOW_LEVEL_CHANNELS,
low_level_key=config.MODEL.DEEPLABV3PLUS.LOW_LEVEL_KEY,
low_level_channels_project=config.MODEL.DEEPLABV3PLUS.LOW_LEVEL_CHANNELS_PROJECT,
decoder_channels=config.MODEL.DECODER.DECODER_CHANNELS,
atrous_rates=config.MODEL.DECODER.ATROUS_RATES,
num_classes=config.DATASET.NUM_CLASSES,
semantic_loss=build_loss_from_cfg(config.LOSS.SEMANTIC),
semantic_loss_weight=config.LOSS.SEMANTIC.WEIGHT,
),
'panoptic_deeplab': dict(
replace_stride_with_dilation=config.MODEL.BACKBONE.DILATION,
in_channels=config.MODEL.DECODER.IN_CHANNELS,
feature_key=config.MODEL.DECODER.FEATURE_KEY,
low_level_channels=config.MODEL.PANOPTIC_DEEPLAB.LOW_LEVEL_CHANNELS,
low_level_key=config.MODEL.PANOPTIC_DEEPLAB.LOW_LEVEL_KEY,
low_level_channels_project=config.MODEL.PANOPTIC_DEEPLAB.LOW_LEVEL_CHANNELS_PROJECT,
decoder_channels=config.MODEL.DECODER.DECODER_CHANNELS,
atrous_rates=config.MODEL.DECODER.ATROUS_RATES,
num_classes=config.DATASET.NUM_CLASSES,
has_instance=config.MODEL.PANOPTIC_DEEPLAB.INSTANCE.ENABLE,
instance_low_level_channels_project=config.MODEL.PANOPTIC_DEEPLAB.INSTANCE.LOW_LEVEL_CHANNELS_PROJECT,
instance_decoder_channels=config.MODEL.PANOPTIC_DEEPLAB.INSTANCE.DECODER_CHANNELS,
instance_head_channels=config.MODEL.PANOPTIC_DEEPLAB.INSTANCE.HEAD_CHANNELS,
instance_aspp_channels=config.MODEL.PANOPTIC_DEEPLAB.INSTANCE.ASPP_CHANNELS,
instance_num_classes=config.MODEL.PANOPTIC_DEEPLAB.INSTANCE.NUM_CLASSES,
instance_class_key=config.MODEL.PANOPTIC_DEEPLAB.INSTANCE.CLASS_KEY,
semantic_loss=build_loss_from_cfg(config.LOSS.SEMANTIC),
semantic_loss_weight=config.LOSS.SEMANTIC.WEIGHT,
center_loss=build_loss_from_cfg(config.LOSS.CENTER),
center_loss_weight=config.LOSS.CENTER.WEIGHT,
offset_loss=build_loss_from_cfg(config.LOSS.OFFSET),
offset_loss_weight=config.LOSS.OFFSET.WEIGHT,
),
}
if config.MODEL.BACKBONE.META == 'resnet':
backbone = resnet.__dict__[config.MODEL.BACKBONE.NAME](
pretrained=config.MODEL.BACKBONE.PRETRAINED,
replace_stride_with_dilation=model_cfg[config.MODEL.META_ARCHITECTURE]['replace_stride_with_dilation']
)
elif config.MODEL.BACKBONE.META == 'mobilenet_v2':
backbone = mobilenet.__dict__[config.MODEL.BACKBONE.NAME](
pretrained=config.MODEL.BACKBONE.PRETRAINED,
)
elif config.MODEL.BACKBONE.META == 'mnasnet':
backbone = mnasnet.__dict__[config.MODEL.BACKBONE.NAME](
pretrained=config.MODEL.BACKBONE.PRETRAINED,
)
elif config.MODEL.BACKBONE.META == 'hrnet':
backbone = hrnet.__dict__[config.MODEL.BACKBONE.NAME](
pretrained=config.MODEL.BACKBONE.PRETRAINED,
)
elif config.MODEL.BACKBONE.META == 'xception':
backbone = xception.__dict__[config.MODEL.BACKBONE.NAME](
pretrained=config.MODEL.BACKBONE.PRETRAINED,
replace_stride_with_dilation=model_cfg[config.MODEL.META_ARCHITECTURE]['replace_stride_with_dilation']
)
else:
raise ValueError('Unknown meta backbone {}, please first implement it.'.format(config.MODEL.BACKBONE.META))
model = model_map[config.MODEL.META_ARCHITECTURE](
backbone,
**model_cfg[config.MODEL.META_ARCHITECTURE]
)
# set batchnorm momentum
for module in model.modules():
if isinstance(module, torch.nn.BatchNorm2d):
module.momentum = config.MODEL.BN_MOMENTUM
return model
def build_loss_from_cfg(config):
"""Builds loss function with specific configuration.
Args:
config: the configuration.
Returns:
A nn.Module loss.
"""
if config.NAME == 'cross_entropy':
# return CrossEntropyLoss(ignore_index=config.IGNORE, reduction='mean')
return RegularCE(ignore_label=config.IGNORE)
elif config.NAME == 'ohem':
return OhemCE(ignore_label=config.IGNORE, threshold=config.THRESHOLD, min_kept=config.MIN_KEPT)
elif config.NAME == 'hard_pixel_mining':
return DeepLabCE(ignore_label=config.IGNORE, top_k_percent_pixels=config.TOP_K_PERCENT)
elif config.NAME == 'mse':
return MSELoss(reduction=config.REDUCTION)
elif config.NAME == 'l1':
return L1Loss(reduction=config.REDUCTION)
else:
raise ValueError('Unknown loss type: {}'.format(config.NAME))
|
Cream/CDARTS/CDARTS_segmentation/segmentation/model/build.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/model/build.py",
"repo_id": "Cream",
"token_count": 2938
}
| 277 |
# ------------------------------------------------------------------------------
# Post-processing to get instance and panoptic segmentation results.
# Written by Bowen Cheng ([email protected])
# ------------------------------------------------------------------------------
import torch
import torch.nn.functional as F
from .semantic_post_processing import get_semantic_segmentation
__all__ = ['find_instance_center', 'get_instance_segmentation', 'get_panoptic_segmentation']
def find_instance_center(ctr_hmp, threshold=0.1, nms_kernel=3, top_k=None):
"""
Find the center points from the center heatmap.
Arguments:
ctr_hmp: A Tensor of shape [N, 1, H, W] of raw center heatmap output, where N is the batch size,
for consistent, we only support N=1.
threshold: A Float, threshold applied to center heatmap score.
nms_kernel: An Integer, NMS max pooling kernel size.
top_k: An Integer, top k centers to keep.
Returns:
A Tensor of shape [K, 2] where K is the number of center points. The order of second dim is (y, x).
"""
if ctr_hmp.size(0) != 1:
raise ValueError('Only supports inference for batch size = 1')
# thresholding, setting values below threshold to -1
ctr_hmp = F.threshold(ctr_hmp, threshold, -1)
# NMS
nms_padding = (nms_kernel - 1) // 2
ctr_hmp_max_pooled = F.max_pool2d(ctr_hmp, kernel_size=nms_kernel, stride=1, padding=nms_padding)
ctr_hmp[ctr_hmp != ctr_hmp_max_pooled] = -1
# squeeze first two dimensions
ctr_hmp = ctr_hmp.squeeze()
assert len(ctr_hmp.size()) == 2, 'Something is wrong with center heatmap dimension.'
# find non-zero elements
ctr_all = torch.nonzero(ctr_hmp > 0)
if top_k is None:
return ctr_all
elif ctr_all.size(0) < top_k:
return ctr_all
else:
# find top k centers.
top_k_scores, _ = torch.topk(torch.flatten(ctr_hmp), top_k)
return torch.nonzero(ctr_hmp > top_k_scores[-1])
def group_pixels(ctr, offsets):
"""
Gives each pixel in the image an instance id.
Arguments:
ctr: A Tensor of shape [K, 2] where K is the number of center points. The order of second dim is (y, x).
offsets: A Tensor of shape [N, 2, H, W] of raw offset output, where N is the batch size,
for consistent, we only support N=1. The order of second dim is (offset_y, offset_x).
Returns:
A Tensor of shape [1, H, W] (to be gathered by distributed data parallel).
"""
if offsets.size(0) != 1:
raise ValueError('Only supports inference for batch size = 1')
offsets = offsets.squeeze(0)
height, width = offsets.size()[1:]
# generates a coordinate map, where each location is the coordinate of that loc
y_coord = torch.arange(height, dtype=offsets.dtype, device=offsets.device).repeat(1, width, 1).transpose(1, 2)
x_coord = torch.arange(width, dtype=offsets.dtype, device=offsets.device).repeat(1, height, 1)
coord = torch.cat((y_coord, x_coord), dim=0)
ctr_loc = coord + offsets
ctr_loc = ctr_loc.reshape((2, height * width)).transpose(1, 0)
# ctr: [K, 2] -> [K, 1, 2]
# ctr_loc = [H*W, 2] -> [1, H*W, 2]
ctr = ctr.unsqueeze(1)
ctr_loc = ctr_loc.unsqueeze(0)
# distance: [K, H*W]
distance = torch.norm(ctr - ctr_loc, dim=-1)
# finds center with minimum distance at each location, offset by 1, to reserve id=0 for stuff
instance_id = torch.argmin(distance, dim=0).reshape((1, height, width)) + 1
return instance_id
def get_instance_segmentation(sem_seg, ctr_hmp, offsets, thing_list, threshold=0.1, nms_kernel=3, top_k=None,
thing_seg=None):
"""
Post-processing for instance segmentation, gets class agnostic instance id map.
Arguments:
sem_seg: A Tensor of shape [1, H, W], predicted semantic label.
ctr_hmp: A Tensor of shape [N, 1, H, W] of raw center heatmap output, where N is the batch size,
for consistent, we only support N=1.
offsets: A Tensor of shape [N, 2, H, W] of raw offset output, where N is the batch size,
for consistent, we only support N=1. The order of second dim is (offset_y, offset_x).
thing_list: A List of thing class id.
threshold: A Float, threshold applied to center heatmap score.
nms_kernel: An Integer, NMS max pooling kernel size.
top_k: An Integer, top k centers to keep.
thing_seg: A Tensor of shape [1, H, W], predicted foreground mask, if not provided, inference from
semantic prediction.
Returns:
A Tensor of shape [1, H, W] (to be gathered by distributed data parallel).
A Tensor of shape [1, K, 2] where K is the number of center points. The order of second dim is (y, x).
"""
if thing_seg is None:
# gets foreground segmentation
thing_seg = torch.zeros_like(sem_seg)
for thing_class in thing_list:
thing_seg[sem_seg == thing_class] = 1
ctr = find_instance_center(ctr_hmp, threshold=threshold, nms_kernel=nms_kernel, top_k=top_k)
if ctr.size(0) == 0:
return torch.zeros_like(sem_seg), ctr.unsqueeze(0)
ins_seg = group_pixels(ctr, offsets)
return thing_seg * ins_seg, ctr.unsqueeze(0)
def merge_semantic_and_instance(sem_seg, ins_seg, label_divisor, thing_list, stuff_area, void_label):
"""
Post-processing for panoptic segmentation, by merging semantic segmentation label and class agnostic
instance segmentation label.
Arguments:
sem_seg: A Tensor of shape [1, H, W], predicted semantic label.
ins_seg: A Tensor of shape [1, H, W], predicted instance label.
label_divisor: An Integer, used to convert panoptic id = semantic id * label_divisor + instance_id.
thing_list: A List of thing class id.
stuff_area: An Integer, remove stuff whose area is less tan stuff_area.
void_label: An Integer, indicates the region has no confident prediction.
Returns:
A Tensor of shape [1, H, W] (to be gathered by distributed data parallel).
Raises:
ValueError, if batch size is not 1.
"""
# In case thing mask does not align with semantic prediction
pan_seg = torch.zeros_like(sem_seg) + void_label
thing_seg = ins_seg > 0
semantic_thing_seg = torch.zeros_like(sem_seg)
for thing_class in thing_list:
semantic_thing_seg[sem_seg == thing_class] = 1
# keep track of instance id for each class
class_id_tracker = {}
# paste thing by majority voting
instance_ids = torch.unique(ins_seg)
for ins_id in instance_ids:
if ins_id == 0:
continue
# Make sure only do majority voting within semantic_thing_seg
thing_mask = (ins_seg == ins_id) & (semantic_thing_seg == 1)
if torch.nonzero(thing_mask).size(0) == 0:
continue
class_id, _ = torch.mode(sem_seg[thing_mask].view(-1, ))
if class_id.item() in class_id_tracker:
new_ins_id = class_id_tracker[class_id.item()]
else:
class_id_tracker[class_id.item()] = 1
new_ins_id = 1
class_id_tracker[class_id.item()] += 1
pan_seg[thing_mask] = class_id * label_divisor + new_ins_id
# paste stuff to unoccupied area
class_ids = torch.unique(sem_seg)
for class_id in class_ids:
if class_id.item() in thing_list:
# thing class
continue
# calculate stuff area
stuff_mask = (sem_seg == class_id) & (~thing_seg)
area = torch.nonzero(stuff_mask).size(0)
if area >= stuff_area:
pan_seg[stuff_mask] = class_id * label_divisor
return pan_seg
def get_panoptic_segmentation(sem, ctr_hmp, offsets, thing_list, label_divisor, stuff_area, void_label,
threshold=0.1, nms_kernel=3, top_k=None, foreground_mask=None):
"""
Post-processing for panoptic segmentation.
Arguments:
sem: A Tensor of shape [N, C, H, W] of raw semantic output, where N is the batch size, for consistent,
we only support N=1. Or, a processed Tensor of shape [1, H, W].
ctr_hmp: A Tensor of shape [N, 1, H, W] of raw center heatmap output, where N is the batch size,
for consistent, we only support N=1.
offsets: A Tensor of shape [N, 2, H, W] of raw offset output, where N is the batch size,
for consistent, we only support N=1. The order of second dim is (offset_y, offset_x).
thing_list: A List of thing class id.
label_divisor: An Integer, used to convert panoptic id = semantic id * label_divisor + instance_id.
stuff_area: An Integer, remove stuff whose area is less tan stuff_area.
void_label: An Integer, indicates the region has no confident prediction.
threshold: A Float, threshold applied to center heatmap score.
nms_kernel: An Integer, NMS max pooling kernel size.
top_k: An Integer, top k centers to keep.
foreground_mask: A Tensor of shape [N, 2, H, W] of raw foreground mask, where N is the batch size,
we only support N=1. Or, a processed Tensor of shape [1, H, W].
Returns:
A Tensor of shape [1, H, W] (to be gathered by distributed data parallel), int64.
Raises:
ValueError, if batch size is not 1.
"""
if sem.dim() != 4 and sem.dim() != 3:
raise ValueError('Semantic prediction with un-supported dimension: {}.'.format(sem.dim()))
if sem.dim() == 4 and sem.size(0) != 1:
raise ValueError('Only supports inference for batch size = 1')
if ctr_hmp.size(0) != 1:
raise ValueError('Only supports inference for batch size = 1')
if offsets.size(0) != 1:
raise ValueError('Only supports inference for batch size = 1')
if foreground_mask is not None:
if foreground_mask.dim() != 4 and foreground_mask.dim() != 3:
raise ValueError('Foreground prediction with un-supported dimension: {}.'.format(sem.dim()))
if sem.dim() == 4:
semantic = get_semantic_segmentation(sem)
else:
semantic = sem
if foreground_mask is not None:
if foreground_mask.dim() == 4:
thing_seg = get_semantic_segmentation(foreground_mask)
else:
thing_seg = foreground_mask
else:
thing_seg = None
instance, center = get_instance_segmentation(semantic, ctr_hmp, offsets, thing_list,
threshold=threshold, nms_kernel=nms_kernel, top_k=top_k,
thing_seg=thing_seg)
panoptic = merge_semantic_and_instance(semantic, instance, label_divisor, thing_list, stuff_area, void_label)
return panoptic, center
|
Cream/CDARTS/CDARTS_segmentation/segmentation/model/post_processing/instance_post_processing.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/model/post_processing/instance_post_processing.py",
"repo_id": "Cream",
"token_count": 4375
}
| 278 |
import os
import cv2
cv2.setNumThreads(0)
import torch
import numpy as np
from random import shuffle
import torch.utils.data as data
class BaseDataset(data.Dataset):
def __init__(self, setting, split_name, preprocess=None, file_length=None):
super(BaseDataset, self).__init__()
self._split_name = split_name
self._img_path = setting['img_root']
self._gt_path = setting['gt_root']
self._portion = setting['portion'] if 'portion' in setting else None
self._train_source = setting['train_source']
self._eval_source = setting['eval_source']
self._test_source = setting['test_source'] if 'test_source' in setting else setting['eval_source']
self._down_sampling = setting['down_sampling']
print("using downsampling:", self._down_sampling)
self._file_names = self._get_file_names(split_name)
print("Found %d images"%len(self._file_names))
self._file_length = file_length
self.preprocess = preprocess
def __len__(self):
if self._file_length is not None:
return self._file_length
return len(self._file_names)
def __getitem__(self, index):
if self._file_length is not None:
names = self._construct_new_file_names(self._file_length)[index]
else:
names = self._file_names[index]
img_path = os.path.join(self._img_path, names[0])
gt_path = os.path.join(self._gt_path, names[1])
item_name = names[1].split("/")[-1].split(".")[0]
img, gt = self._fetch_data(img_path, gt_path)
img = img[:, :, ::-1]
if self.preprocess is not None:
img, gt, extra_dict = self.preprocess(img, gt)
if self._split_name is 'train':
img = torch.from_numpy(np.ascontiguousarray(img)).float()
gt = torch.from_numpy(np.ascontiguousarray(gt)).long()
if self.preprocess is not None and extra_dict is not None:
for k, v in extra_dict.items():
extra_dict[k] = torch.from_numpy(np.ascontiguousarray(v))
if 'label' in k:
extra_dict[k] = extra_dict[k].long()
if 'img' in k:
extra_dict[k] = extra_dict[k].float()
output_dict = dict(data=img, label=gt, fn=str(item_name),
n=len(self._file_names))
if self.preprocess is not None and extra_dict is not None:
output_dict.update(**extra_dict)
return output_dict
def _fetch_data(self, img_path, gt_path, dtype=None):
img = self._open_image(img_path, down_sampling=self._down_sampling)
gt = self._open_image(gt_path, cv2.IMREAD_GRAYSCALE, dtype=dtype, down_sampling=self._down_sampling)
return img, gt
def _get_file_names(self, split_name):
assert split_name in ['train', 'val', 'test']
source = self._train_source
if split_name == "val":
source = self._eval_source
elif split_name == 'test':
source = self._test_source
file_names = []
with open(source) as f:
files = f.readlines()
if self._portion is not None:
shuffle(files)
num_files = len(files)
if self._portion > 0:
split = int(np.floor(self._portion * num_files))
files = files[:split]
elif self._portion < 0:
split = int(np.floor((1 + self._portion) * num_files))
files = files[split:]
for item in files:
img_name, gt_name = self._process_item_names(item)
file_names.append([img_name, gt_name])
return file_names
def _construct_new_file_names(self, length):
assert isinstance(length, int)
files_len = len(self._file_names)
new_file_names = self._file_names * (length // files_len)
rand_indices = torch.randperm(files_len).tolist()
new_indices = rand_indices[:length % files_len]
new_file_names += [self._file_names[i] for i in new_indices]
return new_file_names
@staticmethod
def _process_item_names(item):
item = item.strip()
# item = item.split('\t')
item = item.split(' ')
img_name = item[0]
gt_name = item[1]
return img_name, gt_name
def get_length(self):
return self.__len__()
@staticmethod
def _open_image(filepath, mode=cv2.IMREAD_COLOR, dtype=None, down_sampling=1):
# cv2: B G R
# h w c
img = np.array(cv2.imread(filepath, mode), dtype=dtype)
if isinstance(down_sampling, int):
H, W = img.shape[:2]
if len(img.shape) == 3:
img = cv2.resize(img, (W // down_sampling, H // down_sampling), interpolation=cv2.INTER_LINEAR)
else:
img = cv2.resize(img, (W // down_sampling, H // down_sampling), interpolation=cv2.INTER_NEAREST)
assert img.shape[0] == H // down_sampling and img.shape[1] == W // down_sampling
else:
assert (isinstance(down_sampling, tuple) or isinstance(down_sampling, list)) and len(down_sampling) == 2
if len(img.shape) == 3:
img = cv2.resize(img, (down_sampling[1], down_sampling[0]), interpolation=cv2.INTER_LINEAR)
else:
img = cv2.resize(img, (down_sampling[1], down_sampling[0]), interpolation=cv2.INTER_NEAREST)
assert img.shape[0] == down_sampling[0] and img.shape[1] == down_sampling[1]
return img
@classmethod
def get_class_colors(*args):
raise NotImplementedError
@classmethod
def get_class_names(*args):
raise NotImplementedError
if __name__ == "__main__":
data_setting = {'img_root': '',
'gt_root': '',
'train_source': '',
'eval_source': ''}
bd = BaseDataset(data_setting, 'train', None)
print(bd.get_class_names())
|
Cream/CDARTS/CDARTS_segmentation/tools/datasets/BaseDataset.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/tools/datasets/BaseDataset.py",
"repo_id": "Cream",
"token_count": 2865
}
| 279 |
#!/usr/bin/env python2
'''
Visualization demo for panoptic COCO sample_data
The code shows an example of color generation for panoptic data (with
"generate_new_colors" set to True). For each segment distinct color is used in
a way that it close to the color of corresponding semantic class.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os, sys
import numpy as np
import json
import cv2
import os
import PIL.Image as Image
import matplotlib.pyplot as plt
from skimage.segmentation import find_boundaries
from panopticapi.utils import IdGenerator, rgb2id
# whether from the PNG are used or new colors are generated
generate_new_colors = True
json_file = './panoptic_cityscapes_mul2/panoptic/predictions.json'
segmentations_folder = './panoptic_cityscapes_mul2/panoptic/predictions/'
img_folder = '/home2/hongyuan/data/cityscapes/leftImg8bit/val/'
panoptic_coco_categories = './panoptic_coco_categories.json'
output_dir = 'cityscapes_vis_results'
os.makedirs(output_dir, exist_ok=True)
with open(json_file, 'r') as f:
coco_d = json.load(f)
# ann = np.random.choice(coco_d['annotations'])
with open(panoptic_coco_categories, 'r') as f:
categories_list = json.load(f)
categegories = {category['id']: category for category in categories_list}
# find input img that correspond to the annotation
img = None
# for image_info in coco_d['images']:
for image_info in coco_d['images']:
for ann in coco_d['annotations']:
if image_info['id'] == ann['image_id']:
try:
img = np.array(
Image.open(os.path.join(img_folder, image_info['file_name'].split('_')[0], image_info['file_name'].split('gtFine_leftImg8bit.png')[0]+'leftImg8bit.png'))
)
except:
print("Undable to find correspoding input image.")
break
segmentation = np.array(
Image.open(os.path.join(segmentations_folder, ann['file_name'])),
dtype=np.uint8
)
segmentation_id = rgb2id(segmentation)
# find segments boundaries
boundaries = find_boundaries(segmentation_id, mode='thick')
if generate_new_colors:
segmentation[:, :, :] = 0
color_generator = IdGenerator(categegories)
for segment_info in ann['segments_info']:
try:
color = color_generator.get_color(segment_info['category_id'])
mask = segmentation_id == segment_info['id']
segmentation[mask] = color
except:
pass
# depict boundaries
segmentation[boundaries] = [0, 0, 0]
if img.shape[:2] == segmentation.shape[:2]:
pass
else:
print('img: {} shape error! img shape: {} seg shape: {}'.format(ann['image_id'], img.shape[:2], segmentation.shape[:2]))
continue
if len(img.shape) == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
try:
segmentation = cv2.addWeighted(img, 0.6, segmentation, 0.4, 0)
except:
import pdb; pdb.set_trace()
cv2.imwrite(os.path.join(output_dir, '{}.jpg').format(ann['image_id']), img[:, :, ::-1])
cv2.imwrite(os.path.join(output_dir, '{}_mask.jpg').format(ann['image_id']), segmentation[:, :, ::-1])
#if img is None:
# plt.figure()
# plt.imshow(segmentation)
# plt.axis('off')
#else:
# plt.figure(figsize=(9, 5))
# plt.subplot(121)
# plt.imshow(img)
# plt.axis('off')
# plt.subplot(122)
# plt.imshow(segmentation)
# plt.axis('off')
# plt.tight_layout()
#plt.show()
|
Cream/CDARTS/CDARTS_segmentation/tools/vis/vis_cityscapes.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/tools/vis/vis_cityscapes.py",
"repo_id": "Cream",
"token_count": 1590
}
| 280 |
#!/usr/bin/env python3
# encoding: utf-8
import os
import cv2
cv2.setNumThreads(0)
import numpy as np
from utils.visualize import print_iou, show_img, show_prediction
from engine.evaluator import Evaluator
from engine.logger import get_logger
from seg_opr.metric import hist_info, compute_score
logger = get_logger()
class SegEvaluator(Evaluator):
def func_per_iteration(self, data, device, iter=None):
if self.config is not None: config = self.config
img = data['data']
label = data['label']
name = data['fn']
if len(config.eval_scale_array) == 1:
pred = self.whole_eval(img, None, device)
else:
pred = self.sliding_eval(img, config.eval_crop_size, config.eval_stride_rate, device)
hist_tmp, labeled_tmp, correct_tmp = hist_info(config.num_classes, pred, label)
results_dict = {'hist': hist_tmp, 'labeled': labeled_tmp, 'correct': correct_tmp}
if self.save_path is not None:
fn = name + '.png'
cv2.imwrite(os.path.join(self.save_path, fn), pred)
logger.info('Save the image ' + fn)
# tensorboard logger does not fit multiprocess
if self.logger is not None and iter is not None:
colors = self.dataset.get_class_colors()
image = img
clean = np.zeros(label.shape)
comp_img = show_img(colors, config.background, image, clean, label, pred)
self.logger.add_image('vis', np.swapaxes(np.swapaxes(comp_img, 0, 2), 1, 2), iter)
if self.show_image or self.show_prediction:
colors = self.dataset.get_class_colors()
image = img
clean = np.zeros(label.shape)
if self.show_image:
comp_img = show_img(colors, config.background, image, clean, label, pred)
else:
comp_img = show_prediction(colors, config.background, image, pred)
cv2.imwrite(name + ".png", comp_img[:,:,::-1])
return results_dict
def compute_metric(self, results):
hist = np.zeros((self.config.num_classes, self.config.num_classes))
correct = 0
labeled = 0
count = 0
for d in results:
hist += d['hist']
correct += d['correct']
labeled += d['labeled']
count += 1
iu, mean_IU, mean_IU_no_back, mean_pixel_acc = compute_score(hist, correct, labeled)
result_line = print_iou(iu, mean_pixel_acc, self.dataset.get_class_names(), True)
return result_line, mean_IU
|
Cream/CDARTS/CDARTS_segmentation/train/eval.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/train/eval.py",
"repo_id": "Cream",
"token_count": 1176
}
| 281 |
import torch
import torch.nn as nn
__all__ = ['OPS', 'ResNetBasicblock', 'SearchSpaceNames']
OPS = {
'none' : lambda C_in, C_out, stride, affine, track_running_stats: Zero(C_in, C_out, stride),
'avg_pool_3x3' : lambda C_in, C_out, stride, affine, track_running_stats: POOLING(C_in, C_out, stride, 'avg', affine, track_running_stats),
'max_pool_3x3' : lambda C_in, C_out, stride, affine, track_running_stats: POOLING(C_in, C_out, stride, 'max', affine, track_running_stats),
'nor_conv_7x7' : lambda C_in, C_out, stride, affine, track_running_stats: ReLUConvBN(C_in, C_out, (7,7), (stride,stride), (3,3), (1,1), affine, track_running_stats),
'nor_conv_3x3' : lambda C_in, C_out, stride, affine, track_running_stats: ReLUConvBN(C_in, C_out, (3,3), (stride,stride), (1,1), (1,1), affine, track_running_stats),
'nor_conv_1x1' : lambda C_in, C_out, stride, affine, track_running_stats: ReLUConvBN(C_in, C_out, (1,1), (stride,stride), (0,0), (1,1), affine, track_running_stats),
'dua_sepc_3x3' : lambda C_in, C_out, stride, affine, track_running_stats: DualSepConv(C_in, C_out, (3,3), (stride,stride), (1,1), (1,1), affine, track_running_stats),
'dua_sepc_5x5' : lambda C_in, C_out, stride, affine, track_running_stats: DualSepConv(C_in, C_out, (5,5), (stride,stride), (2,2), (1,1), affine, track_running_stats),
'dil_sepc_3x3' : lambda C_in, C_out, stride, affine, track_running_stats: SepConv(C_in, C_out, (3,3), (stride,stride), (2,2), (2,2), affine, track_running_stats),
'dil_sepc_5x5' : lambda C_in, C_out, stride, affine, track_running_stats: SepConv(C_in, C_out, (5,5), (stride,stride), (4,4), (2,2), affine, track_running_stats),
'skip_connect' : lambda C_in, C_out, stride, affine, track_running_stats: Identity() if stride == 1 and C_in == C_out else FactorizedReduce(C_in, C_out, stride, affine, track_running_stats),
}
CONNECT_NAS_BENCHMARK = ['none', 'skip_connect', 'nor_conv_3x3']
NAS_BENCH_201 = ['none', 'skip_connect', 'nor_conv_1x1', 'nor_conv_3x3', 'avg_pool_3x3']
DARTS_SPACE = ['none', 'skip_connect', 'dua_sepc_3x3', 'dua_sepc_5x5', 'dil_sepc_3x3', 'dil_sepc_5x5', 'avg_pool_3x3', 'max_pool_3x3']
SearchSpaceNames = {'connect-nas' : CONNECT_NAS_BENCHMARK,
'nas-bench-201': NAS_BENCH_201,
'darts' : DARTS_SPACE}
class ReLUConvBN(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine, track_running_stats=True):
super(ReLUConvBN, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_out, kernel_size, stride=stride, padding=padding, dilation=dilation, bias=False),
nn.BatchNorm2d(C_out, affine=affine, track_running_stats=track_running_stats)
)
def forward(self, x):
return self.op(x)
class SepConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine, track_running_stats=True):
super(SepConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=C_in, bias=False),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, affine=affine, track_running_stats=track_running_stats),
)
def forward(self, x):
return self.op(x)
class DualSepConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine, track_running_stats=True):
super(DualSepConv, self).__init__()
self.op_a = SepConv(C_in, C_in , kernel_size, stride, padding, dilation, affine, track_running_stats)
self.op_b = SepConv(C_in, C_out, kernel_size, 1, padding, dilation, affine, track_running_stats)
def forward(self, x):
x = self.op_a(x)
x = self.op_b(x)
return x
class ResNetBasicblock(nn.Module):
def __init__(self, inplanes, planes, stride, affine=True):
super(ResNetBasicblock, self).__init__()
assert stride == 1 or stride == 2, 'invalid stride {:}'.format(stride)
self.conv_a = ReLUConvBN(inplanes, planes, 3, stride, 1, 1, affine)
self.conv_b = ReLUConvBN( planes, planes, 3, 1, 1, 1, affine)
if stride == 2:
self.downsample = nn.Sequential(
nn.AvgPool2d(kernel_size=2, stride=2, padding=0),
nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, padding=0, bias=False))
elif inplanes != planes:
self.downsample = ReLUConvBN(inplanes, planes, 1, 1, 0, 1, affine)
else:
self.downsample = None
self.in_dim = inplanes
self.out_dim = planes
self.stride = stride
self.num_conv = 2
def extra_repr(self):
string = '{name}(inC={in_dim}, outC={out_dim}, stride={stride})'.format(name=self.__class__.__name__, **self.__dict__)
return string
def forward(self, inputs):
basicblock = self.conv_a(inputs)
basicblock = self.conv_b(basicblock)
if self.downsample is not None:
residual = self.downsample(inputs)
else:
residual = inputs
return residual + basicblock
class POOLING(nn.Module):
def __init__(self, C_in, C_out, stride, mode, affine=True, track_running_stats=True):
super(POOLING, self).__init__()
if C_in == C_out:
self.preprocess = None
else:
self.preprocess = ReLUConvBN(C_in, C_out, 1, 1, 0, affine, track_running_stats)
if mode == 'avg' : self.op = nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False)
elif mode == 'max': self.op = nn.MaxPool2d(3, stride=stride, padding=1)
else : raise ValueError('Invalid mode={:} in POOLING'.format(mode))
def forward(self, inputs):
if self.preprocess: x = self.preprocess(inputs)
else : x = inputs
return self.op(x)
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class Zero(nn.Module):
def __init__(self, C_in, C_out, stride):
super(Zero, self).__init__()
self.C_in = C_in
self.C_out = C_out
self.stride = stride
self.is_zero = True
def forward(self, x):
if self.C_in == self.C_out:
if self.stride == 1: return x.mul(0.)
else : return x[:,:,::self.stride,::self.stride].mul(0.)
else:
shape = list(x.shape)
shape[1] = self.C_out
zeros = x.new_zeros(shape, dtype=x.dtype, device=x.device)
return zeros
def extra_repr(self):
return 'C_in={C_in}, C_out={C_out}, stride={stride}'.format(**self.__dict__)
class FactorizedReduce(nn.Module):
def __init__(self, C_in, C_out, stride, affine, track_running_stats):
super(FactorizedReduce, self).__init__()
self.stride = stride
self.C_in = C_in
self.C_out = C_out
self.relu = nn.ReLU(inplace=False)
if stride == 2:
#assert C_out % 2 == 0, 'C_out : {:}'.format(C_out)
C_outs = [C_out // 2, C_out - C_out // 2]
self.convs = nn.ModuleList()
for i in range(2):
self.convs.append( nn.Conv2d(C_in, C_outs[i], 1, stride=stride, padding=0, bias=False) )
self.pad = nn.ConstantPad2d((0, 1, 0, 1), 0)
else:
raise ValueError('Invalid stride : {:}'.format(stride))
self.bn = nn.BatchNorm2d(C_out, affine=affine, track_running_stats=track_running_stats)
def forward(self, x):
x = self.relu(x)
y = self.pad(x)
out = torch.cat([self.convs[0](x), self.convs[1](y[:,:,1:,1:])], dim=1)
out = self.bn(out)
return out
def extra_repr(self):
return 'C_in={C_in}, C_out={C_out}, stride={stride}'.format(**self.__dict__)
|
Cream/CDARTS/benchmark201/models/ops.py/0
|
{
"file_path": "Cream/CDARTS/benchmark201/models/ops.py",
"repo_id": "Cream",
"token_count": 3465
}
| 282 |
from lib.utils.util import *
from timm.models.efficientnet_blocks import *
# ChildNet Builder definition.
class ChildNetBuilder:
def __init__(
self,
channel_multiplier=1.0,
channel_divisor=8,
channel_min=None,
output_stride=32,
pad_type='',
act_layer=None,
se_kwargs=None,
norm_layer=nn.BatchNorm2d,
norm_kwargs=None,
drop_path_rate=0.,
feature_location='',
verbose=False,
logger=None):
self.channel_multiplier = channel_multiplier
self.channel_divisor = channel_divisor
self.channel_min = channel_min
self.output_stride = output_stride
self.pad_type = pad_type
self.act_layer = act_layer
self.se_kwargs = se_kwargs
self.norm_layer = norm_layer
self.norm_kwargs = norm_kwargs
self.drop_path_rate = drop_path_rate
self.feature_location = feature_location
assert feature_location in ('pre_pwl', 'post_exp', '')
self.verbose = verbose
self.in_chs = None
self.features = OrderedDict()
self.logger = logger
def _round_channels(self, chs):
return round_channels(
chs,
self.channel_multiplier,
self.channel_divisor,
self.channel_min)
def _make_block(self, ba, block_idx, block_count):
drop_path_rate = self.drop_path_rate * block_idx / block_count
bt = ba.pop('block_type')
ba['in_chs'] = self.in_chs
ba['out_chs'] = self._round_channels(ba['out_chs'])
if 'fake_in_chs' in ba and ba['fake_in_chs']:
ba['fake_in_chs'] = self._round_channels(ba['fake_in_chs'])
ba['norm_layer'] = self.norm_layer
ba['norm_kwargs'] = self.norm_kwargs
ba['pad_type'] = self.pad_type
# block act fn overrides the model default
ba['act_layer'] = ba['act_layer'] if ba['act_layer'] is not None else self.act_layer
assert ba['act_layer'] is not None
if bt == 'ir':
ba['drop_path_rate'] = drop_path_rate
ba['se_kwargs'] = self.se_kwargs
if self.verbose:
self.logger.info(
' InvertedResidual {}, Args: {}'.format(
block_idx, str(ba)))
block = InvertedResidual(**ba)
elif bt == 'ds' or bt == 'dsa':
ba['drop_path_rate'] = drop_path_rate
ba['se_kwargs'] = self.se_kwargs
if self.verbose:
self.logger.info(
' DepthwiseSeparable {}, Args: {}'.format(
block_idx, str(ba)))
block = DepthwiseSeparableConv(**ba)
elif bt == 'cn':
if self.verbose:
self.logger.info(
' ConvBnAct {}, Args: {}'.format(
block_idx, str(ba)))
block = ConvBnAct(**ba)
else:
assert False, 'Uknkown block type (%s) while building model.' % bt
self.in_chs = ba['out_chs'] # update in_chs for arg of next block
return block
def __call__(self, in_chs, model_block_args):
""" Build the blocks
Args:
in_chs: Number of input-channels passed to first block
model_block_args: A list of lists, outer list defines stages, inner
list contains strings defining block configuration(s)
Return:
List of block stacks (each stack wrapped in nn.Sequential)
"""
if self.verbose:
self.logger.info(
'Building model trunk with %d stages...' %
len(model_block_args))
self.in_chs = in_chs
total_block_count = sum([len(x) for x in model_block_args])
total_block_idx = 0
current_stride = 2
current_dilation = 1
feature_idx = 0
stages = []
# outer list of block_args defines the stacks ('stages' by some
# conventions)
for stage_idx, stage_block_args in enumerate(model_block_args):
last_stack = stage_idx == (len(model_block_args) - 1)
if self.verbose:
self.logger.info('Stack: {}'.format(stage_idx))
assert isinstance(stage_block_args, list)
blocks = []
# each stack (stage) contains a list of block arguments
for block_idx, block_args in enumerate(stage_block_args):
last_block = block_idx == (len(stage_block_args) - 1)
extract_features = '' # No features extracted
if self.verbose:
self.logger.info(' Block: {}'.format(block_idx))
# Sort out stride, dilation, and feature extraction details
assert block_args['stride'] in (1, 2)
if block_idx >= 1:
# only the first block in any stack can have a stride > 1
block_args['stride'] = 1
do_extract = False
if self.feature_location == 'pre_pwl':
if last_block:
next_stage_idx = stage_idx + 1
if next_stage_idx >= len(model_block_args):
do_extract = True
else:
do_extract = model_block_args[next_stage_idx][0]['stride'] > 1
elif self.feature_location == 'post_exp':
if block_args['stride'] > 1 or (last_stack and last_block):
do_extract = True
if do_extract:
extract_features = self.feature_location
next_dilation = current_dilation
if block_args['stride'] > 1:
next_output_stride = current_stride * block_args['stride']
if next_output_stride > self.output_stride:
next_dilation = current_dilation * block_args['stride']
block_args['stride'] = 1
if self.verbose:
self.logger.info(
' Converting stride to dilation to maintain output_stride=={}'.format(
self.output_stride))
else:
current_stride = next_output_stride
block_args['dilation'] = current_dilation
if next_dilation != current_dilation:
current_dilation = next_dilation
# create the block
block = self._make_block(
block_args, total_block_idx, total_block_count)
blocks.append(block)
# stash feature module name and channel info for model feature
# extraction
if extract_features:
feature_module = block.feature_module(extract_features)
if feature_module:
feature_module = 'blocks.{}.{}.'.format(
stage_idx, block_idx) + feature_module
feature_channels = block.feature_channels(extract_features)
self.features[feature_idx] = dict(
name=feature_module,
num_chs=feature_channels
)
feature_idx += 1
# incr global block idx (across all stacks)
total_block_idx += 1
stages.append(nn.Sequential(*blocks))
return stages
|
Cream/Cream/lib/models/builders/build_childnet.py/0
|
{
"file_path": "Cream/Cream/lib/models/builders/build_childnet.py",
"repo_id": "Cream",
"token_count": 4048
}
| 283 |
# model settings
model = dict(
type='CascadeRCNN',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='CascadeRoIHead',
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
]),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
|
Cream/EfficientViT/downstream/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py/0
|
{
"file_path": "Cream/EfficientViT/downstream/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py",
"repo_id": "Cream",
"token_count": 4560
}
| 284 |
# model settings
model = dict(
type='RPN',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=2000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0)))
|
Cream/EfficientViT/downstream/configs/_base_/models/rpn_r50_fpn.py/0
|
{
"file_path": "Cream/EfficientViT/downstream/configs/_base_/models/rpn_r50_fpn.py",
"repo_id": "Cream",
"token_count": 1066
}
| 285 |
from mmcv.runner import OptimizerHook, HOOKS
try:
import apex
except:
print('apex is not installed')
@HOOKS.register_module()
class DistOptimizerHook(OptimizerHook):
"""Optimizer hook for distributed training."""
def __init__(self, update_interval=1, grad_clip=None, coalesce=True, bucket_size_mb=-1, use_fp16=False):
self.grad_clip = grad_clip
self.coalesce = coalesce
self.bucket_size_mb = bucket_size_mb
self.update_interval = update_interval
self.use_fp16 = use_fp16
def before_run(self, runner):
runner.optimizer.zero_grad()
def after_train_iter(self, runner):
runner.outputs['loss'] /= self.update_interval
if self.use_fp16:
with apex.amp.scale_loss(runner.outputs['loss'], runner.optimizer) as scaled_loss:
scaled_loss.backward()
else:
runner.outputs['loss'].backward()
if self.every_n_iters(runner, self.update_interval):
if self.grad_clip is not None:
self.clip_grads(runner.model.parameters())
runner.optimizer.step()
runner.optimizer.zero_grad()
|
Cream/EfficientViT/downstream/mmcv_custom/runner/optimizer.py/0
|
{
"file_path": "Cream/EfficientViT/downstream/mmcv_custom/runner/optimizer.py",
"repo_id": "Cream",
"token_count": 513
}
| 286 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class SwinMLPBlock(nn.Module):
r""" Swin MLP Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
drop (float, optional): Dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.padding = [self.window_size - self.shift_size, self.shift_size,
self.window_size - self.shift_size, self.shift_size] # P_l,P_r,P_t,P_b
self.norm1 = norm_layer(dim)
# use group convolution to implement multi-head MLP
self.spatial_mlp = nn.Conv1d(self.num_heads * self.window_size ** 2,
self.num_heads * self.window_size ** 2,
kernel_size=1,
groups=self.num_heads)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# shift
if self.shift_size > 0:
P_l, P_r, P_t, P_b = self.padding
shifted_x = F.pad(x, [0, 0, P_l, P_r, P_t, P_b], "constant", 0)
else:
shifted_x = x
_, _H, _W, _ = shifted_x.shape
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
# Window/Shifted-Window Spatial MLP
x_windows_heads = x_windows.view(-1, self.window_size * self.window_size, self.num_heads, C // self.num_heads)
x_windows_heads = x_windows_heads.transpose(1, 2) # nW*B, nH, window_size*window_size, C//nH
x_windows_heads = x_windows_heads.reshape(-1, self.num_heads * self.window_size * self.window_size,
C // self.num_heads)
spatial_mlp_windows = self.spatial_mlp(x_windows_heads) # nW*B, nH*window_size*window_size, C//nH
spatial_mlp_windows = spatial_mlp_windows.view(-1, self.num_heads, self.window_size * self.window_size,
C // self.num_heads).transpose(1, 2)
spatial_mlp_windows = spatial_mlp_windows.reshape(-1, self.window_size * self.window_size, C)
# merge windows
spatial_mlp_windows = spatial_mlp_windows.reshape(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(spatial_mlp_windows, self.window_size, _H, _W) # B H' W' C
# reverse shift
if self.shift_size > 0:
P_l, P_r, P_t, P_b = self.padding
x = shifted_x[:, P_t:-P_b, P_l:-P_r, :].contiguous()
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
def flops(self):
flops = 0
H, W = self.input_resolution
# norm1
flops += self.dim * H * W
# Window/Shifted-Window Spatial MLP
if self.shift_size > 0:
nW = (H / self.window_size + 1) * (W / self.window_size + 1)
else:
nW = H * W / self.window_size / self.window_size
flops += nW * self.dim * (self.window_size * self.window_size) * (self.window_size * self.window_size)
# mlp
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
# norm2
flops += self.dim * H * W
return flops
class PatchMerging(nn.Module):
r""" Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
x = x.view(B, H, W, C)
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
def extra_repr(self) -> str:
return f"input_resolution={self.input_resolution}, dim={self.dim}"
def flops(self):
H, W = self.input_resolution
flops = H * W * self.dim
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
return flops
class BasicLayer(nn.Module):
""" A basic Swin MLP layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
drop (float, optional): Dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., drop=0., drop_path=0.,
norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinMLPBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
drop=drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.downsample is not None:
x = self.downsample(x)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
if self.downsample is not None:
flops += self.downsample.flops()
return flops
class PatchEmbed(nn.Module):
r""" Image to Patch Embedding
Args:
img_size (int): Image size. Default: 224.
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C
if self.norm is not None:
x = self.norm(x)
return x
def flops(self):
Ho, Wo = self.patches_resolution
flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
if self.norm is not None:
flops += Ho * Wo * self.embed_dim
return flops
class SwinMLP(nn.Module):
r""" Swin MLP
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin MLP layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
drop_rate (float): Dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],
window_size=7, mlp_ratio=4., drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, **kwargs):
super().__init__()
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.mlp_ratio = mlp_ratio
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
drop=drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, (nn.Linear, nn.Conv1d)):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
def forward_features(self, x):
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
for layer in self.layers:
x = layer(x)
x = self.norm(x) # B L C
x = self.avgpool(x.transpose(1, 2)) # B C 1
x = torch.flatten(x, 1)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
|
Cream/MiniViT/Mini-Swin/models/swin_mlp.py/0
|
{
"file_path": "Cream/MiniViT/Mini-Swin/models/swin_mlp.py",
"repo_id": "Cream",
"token_count": 8733
}
| 287 |
# TinyCLIP: CLIP Distillation via Affinity Mimicking and Weight Inheritance
:pushpin: This is an official PyTorch implementation of **[ICCV 2023]** - [TinyCLIP: CLIP Distillation via Affinity Mimicking and Weight Inheritance](https://openaccess.thecvf.com/content/ICCV2023/html/Wu_TinyCLIP_CLIP_Distillation_via_Affinity_Mimicking_and_Weight_Inheritance_ICCV_2023_paper.html)
**TinyCLIP** is a novel **cross-modal distillation** method for large-scale language-image pre-trained models. The method introduces two core techniques: **affinity mimicking** and **weight inheritance**. This work unleashes the capacity of small CLIP models, fully leveraging large-scale models as well as pre-training data and striking the best trade-off between speed and accuracy.
<p align="center">
<img src="./figure/TinyCLIP.jpg" width="1000">
</p>
## Highlights
<p align="center">
<img src="./figure/fig1.jpg" width="500">
</p>
* TinyCLIP ViT-45M/32 uses only **half parameters** of ViT-B/32 to achieves **comparable zero-shot performance**.
* TinyCLIP ResNet-19M reduces the parameters by **50\%** while getting **$2\times$** inference speedup, and obtains **56.4\%** accuracy on ImageNet.
## News
* *Dec.2023* TinyCLIP models have been integrated into [🤗Hugging Face Model Hub](https://huggingface.co/collections/wkcn/tinyclip-model-zoo-6581aa105311fe07be88cb0d).
* *Oct.2023* Training code is released.
* *Sep.2023* This is preliminary released code, including inference code and checkpoints.
## Model Zoo
| Model | Weight inheritance | Pretrain | IN-1K Acc@1(%) | MACs(G) | Throughput(pairs/s) | Link |
|--------------------|--------------------|---------------|----------------|---------|---------------------|------|
[TinyCLIP ViT-39M/16 Text-19M](./src/open_clip/model_configs/TinyCLIP-ViT-39M-16-Text-19M.json) | manual | YFCC-15M | 63.5 | 9.5 | 1,469 | [Model](https://github.com/wkcn/TinyCLIP-model-zoo/releases/download/checkpoints/TinyCLIP-ViT-39M-16-Text-19M-YFCC15M.pt)
[TinyCLIP ViT-8M/16 Text-3M](./src/open_clip/model_configs/TinyCLIP-ViT-8M-16-Text-3M.json) | manual | YFCC-15M | 41.1 | 2.0 | 4,150 | [Model](https://github.com/wkcn/TinyCLIP-model-zoo/releases/download/checkpoints/TinyCLIP-ViT-8M-16-Text-3M-YFCC15M.pt)
[TinyCLIP ResNet-30M Text-29M](./src/open_clip/model_configs/TinyCLIP-ResNet-30M-Text-29M.json) | manual | LAION-400M | 59.1 | 6.9 | 1,811 | [Model](https://github.com/wkcn/TinyCLIP-model-zoo/releases/download/checkpoints/TinyCLIP-ResNet-30M-Text-29M-LAION400M.pt)
[TinyCLIP ResNet-19M Text-19M](./src/open_clip/model_configs/TinyCLIP-ResNet-19M-Text-19M.json) | manual | LAION-400M | 56.4 | 4.4 | 3,024| [Model](https://github.com/wkcn/TinyCLIP-model-zoo/releases/download/checkpoints/TinyCLIP-ResNet-19M-Text-19M-LAION400M.pt)
[TinyCLIP ViT-61M/32 Text-29M](./src/open_clip/model_configs/TinyCLIP-ViT-61M-32-Text-29M.json) | manual | LAION-400M | 62.4 | 5.3 | 3,191|[Model](https://github.com/wkcn/TinyCLIP-model-zoo/releases/download/checkpoints/TinyCLIP-ViT-61M-32-Text-29M-LAION400M.pt)
[TinyCLIP ViT-40M/32 Text-19M](./src/open_clip/model_configs/TinyCLIP-ViT-40M-32-Text-19M.json) | manual | LAION-400M | 59.8 | 3.5 | 4,641|[Model](https://github.com/wkcn/TinyCLIP-model-zoo/releases/download/checkpoints/TinyCLIP-ViT-40M-32-Text-19M-LAION400M.pt)
TinyCLIP ViT-63M/32 Text-31M | auto | LAION-400M | 63.9 | 5.6 | 2,905|[Model](https://github.com/wkcn/TinyCLIP-model-zoo/releases/download/checkpoints/TinyCLIP-auto-ViT-63M-32-Text-31M-LAION400M.pt)
TinyCLIP ViT-45M/32 Text-18M | auto | LAION-400M | 61.4 | 3.7 | 3,682|[Model](https://github.com/wkcn/TinyCLIP-model-zoo/releases/download/checkpoints/TinyCLIP-auto-ViT-45M-32-Text-18M-LAION400M.pt)
TinyCLIP ViT-22M/32 Text-10M | auto | LAION-400M | 53.7 | 1.9 | 5,504|[Model](https://github.com/wkcn/TinyCLIP-model-zoo/releases/download/checkpoints/TinyCLIP-auto-ViT-22M-32-Text-10M-LAION400M.pt)
TinyCLIP ViT-63M/32 Text-31M | auto | LAION+YFCC-400M | 64.5 | 5.6| 2,909 | [Model](https://github.com/wkcn/TinyCLIP-model-zoo/releases/download/checkpoints/TinyCLIP-auto-ViT-63M-32-Text-31M-LAIONYFCC400M.pt)
TinyCLIP ViT-45M/32 Text-18M | auto | LAION+YFCC-400M | 62.7 | 1.9 | 3,685 | [Model](https://github.com/wkcn/TinyCLIP-model-zoo/releases/download/checkpoints/TinyCLIP-auto-ViT-45M-32-Text-18M-LAIONYFCC400M.pt)
Note: The configs of models with auto inheritance are generated automatically.
## Getting started
:beginner: Here is the setup tutorial, evaluation and pretraining scripts.
### Install dependencies and prepare dataset
- [Preparation](./docs/PREPARATION.md)
### Evaluate it
- [Evaluation](./docs/EVALUATION.md)
### Model inference
- [Inference](./inference.py)
- [Use with 🤗Hugging Face Transformers](https://huggingface.co/collections/wkcn/tinyclip-model-zoo-6581aa105311fe07be88cb0d)
### Pretrain it
- [Pretraining](./docs/PRETRAINING.md)
## Citation
If this repo is helpful for you, please consider to cite it. :mega: Thank you! :)
```bibtex
@InProceedings{tinyclip,
title = {TinyCLIP: CLIP Distillation via Affinity Mimicking and Weight Inheritance},
author = {Wu, Kan and Peng, Houwen and Zhou, Zhenghong and Xiao, Bin and Liu, Mengchen and Yuan, Lu and Xuan, Hong and Valenzuela, Michael and Chen, Xi (Stephen) and Wang, Xinggang and Chao, Hongyang and Hu, Han},
booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)},
month = {October},
year = {2023},
pages = {21970-21980}
}
```
## Acknowledge
Our code is based on [CLIP](https://github.com/openai/CLIP), [OpenCLIP](https://github.com/mlfoundations/open_clip), [CoFi](https://github.com/princeton-nlp/CoFiPruning) and [PyTorch](https://github.com/pytorch/pytorch). Thank contributors for their awesome contribution!
## License
- [License](./LICENSE)
|
Cream/TinyCLIP/README.md/0
|
{
"file_path": "Cream/TinyCLIP/README.md",
"repo_id": "Cream",
"token_count": 2293
}
| 288 |
import requests
import os
import multiprocessing as mp
from io import BytesIO
import numpy as np
import PIL
from PIL import Image
import pickle
import sys
def grab(line):
"""
Download a single image from the TSV.
"""
uid, split, line = line
try:
caption, url = line.split("\t")[:2]
except:
print("Parse error")
return
if os.path.exists(ROOT + "/%s/%d/%d.jpg" % (split, uid % 1000, uid)):
print("Finished", uid)
return uid, caption, url
# Let's not crash if anythign weird happens
try:
dat = requests.get(url, timeout=20)
if dat.status_code != 200:
print("404 file", url)
return
# Try to parse this as an Image file, we'll fail out if not
im = Image.open(BytesIO(dat.content))
im.thumbnail((512, 512), PIL.Image.BICUBIC)
if min(*im.size) < max(*im.size) / 3:
print("Too small", url)
return
im.save(ROOT + "/%s/%d/%d.jpg" % (split, uid % 1000, uid))
# Another try/catch just because sometimes saving and re-loading
# the image is different than loading it once.
try:
o = Image.open(ROOT + "/%s/%d/%d.jpg" % (split, uid % 1000, uid))
o = np.array(o)
print("Success", o.shape, uid, url)
return uid, caption, url
except:
print("Failed", uid, url)
except Exception as e:
print("Unknown error", e)
pass
if __name__ == "__main__":
ROOT = "cc_data"
if not os.path.exists(ROOT):
os.mkdir(ROOT)
os.mkdir(os.path.join(ROOT, "train"))
os.mkdir(os.path.join(ROOT, "val"))
for i in range(1000):
os.mkdir(os.path.join(ROOT, "train", str(i)))
os.mkdir(os.path.join(ROOT, "val", str(i)))
p = mp.Pool(300)
for tsv in sys.argv[1:]:
print("Processing file", tsv)
assert 'val' in tsv.lower() or 'train' in tsv.lower()
split = 'val' if 'val' in tsv.lower() else 'train'
results = p.map(grab,
[(i, split, x) for i, x in enumerate(open(tsv).read().split("\n"))])
out = open(tsv.replace(".tsv", "_output.csv"), "w")
out.write("title\tfilepath\n")
for row in results:
if row is None:
continue
id, caption, url = row
fp = os.path.join(ROOT, split, str(id % 1000), str(id) + ".jpg")
if os.path.exists(fp):
out.write("%s\t%s\n" % (caption, fp))
else:
print("Drop", id)
out.close()
p.close()
|
Cream/TinyCLIP/src/data/gather_cc.py/0
|
{
"file_path": "Cream/TinyCLIP/src/data/gather_cc.py",
"repo_id": "Cream",
"token_count": 1321
}
| 289 |
import logging
def setup_logging(log_file, level, include_host=False):
if include_host:
import socket
hostname = socket.gethostname()
formatter = logging.Formatter(
f'%(asctime)s | {hostname} | %(levelname)s | %(message)s', datefmt='%Y-%m-%d,%H:%M:%S')
else:
formatter = logging.Formatter(
'%(asctime)s | %(levelname)s | %(message)s', datefmt='%Y-%m-%d,%H:%M:%S')
logging.root.setLevel(level)
loggers = [logging.getLogger(name)
for name in logging.root.manager.loggerDict]
for logger in loggers:
logger.setLevel(level)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logging.root.addHandler(stream_handler)
if log_file:
file_handler = logging.FileHandler(filename=log_file)
file_handler.setFormatter(formatter)
logging.root.addHandler(file_handler)
|
Cream/TinyCLIP/src/training/logger.py/0
|
{
"file_path": "Cream/TinyCLIP/src/training/logger.py",
"repo_id": "Cream",
"token_count": 413
}
| 290 |
# Image Augmentation for TinyViT
The code is based on [timm.data](https://github.com/rwightman/pytorch-image-models/tree/master/timm/data) of [pytorch-image-models](https://github.com/rwightman/pytorch-image-models) written by [Ross Wightman](https://github.com/rwightman) and the contributors. Thanks a lot!
We adapt it for TinyViT.
Apache License
## Code Structure
File | Description
---------------------------------------------|--------------------------
[`aug_random.py`](./aug_random.py) | unify all random values of augmentation with a random generator
[`dataset_wrapper.py`](./dataset_wrapper.py) | a dataset wrapper for saving logits
[`manager.py`](./manager.py) | The writter and reader for logits files
|
Cream/TinyViT/data/augmentation/README.md/0
|
{
"file_path": "Cream/TinyViT/data/augmentation/README.md",
"repo_id": "Cream",
"token_count": 282
}
| 291 |
IMG_EXTENSIONS = ('.png', '.jpg', '.jpeg')
|
Cream/TinyViT/data/augmentation/parsers/constants.py/0
|
{
"file_path": "Cream/TinyViT/data/augmentation/parsers/constants.py",
"repo_id": "Cream",
"token_count": 19
}
| 292 |
# Evaluation
Before evaluation, we need to prepare [the ImageNet-1k dataset](./PREPARATION.md) and [the checkpoints in model zoo](../README.md).
Run the following command for evaluation:
**Evaluate TinyViT with pretraining distillation**
<details>
<summary>Evaluate TinyViT-5M <img src="../.figure/distill.png"></summary>
<pre><code>python -m torch.distributed.launch --nproc_per_node 8 main.py --cfg configs/22kto1k/tiny_vit_5m_22kto1k.yaml --data-path ./ImageNet --batch-size 128 --eval --resume ./checkpoints/tiny_vit_5m_22kto1k_distill.pth
</code></pre>
</details>
<details>
<summary>Evaluate TinyViT-11M <img src="../.figure/distill.png"></summary>
<pre><code>python -m torch.distributed.launch --nproc_per_node 8 main.py --cfg configs/22kto1k/tiny_vit_11m_22kto1k.yaml --data-path ./ImageNet --batch-size 128 --eval --resume ./checkpoints/tiny_vit_11m_22kto1k_distill.pth
</code></pre>
</details>
<details>
<summary>Evaluate TinyViT-21M <img src="../.figure/distill.png"></summary>
<pre><code>python -m torch.distributed.launch --nproc_per_node 8 main.py --cfg configs/22kto1k/tiny_vit_21m_22kto1k.yaml --data-path ./ImageNet --batch-size 128 --eval --resume ./checkpoints/tiny_vit_21m_22kto1k_distill.pth
</code></pre>
</details>
<details>
<summary>Evaluate TinyViT-21M-384 <img src="../.figure/distill.png"></summary>
<pre><code>python -m torch.distributed.launch --nproc_per_node 8 main.py --cfg configs/higher_resolution/tiny_vit_21m_224to384.yaml --data-path ./ImageNet --batch-size 64 --eval --resume ./checkpoints/tiny_vit_21m_22kto1k_384_distill.pth
</code></pre>
</details>
<details>
<summary>Evaluate TinyViT-21M-512 <img src="../.figure/distill.png"></summary>
<pre><code>python -m torch.distributed.launch --nproc_per_node 8 main.py --cfg configs/higher_resolution/tiny_vit_21m_384to512.yaml --data-path ./ImageNet --batch-size 32 --eval --resume ./checkpoints/tiny_vit_21m_22kto1k_512_distill.pth
</code></pre>
</details>
**Evaluate TinyViT trained from scratch in IN-1k**
<details>
<summary>Evaluate TinyViT-5M</summary>
<pre><code>python -m torch.distributed.launch --nproc_per_node 8 main.py --cfg configs/1k/tiny_vit_5m.yaml --data-path ./ImageNet --batch-size 128 --eval --resume ./checkpoints/tiny_vit_5m_1k.pth
</code></pre>
</details>
<details>
<summary>Evaluate TinyViT-11M</summary>
<pre><code>python -m torch.distributed.launch --nproc_per_node 8 main.py --cfg configs/1k/tiny_vit_11m.yaml --data-path ./ImageNet --batch-size 128 --eval --resume ./checkpoints/tiny_vit_11m_1k.pth
</code></pre>
</details>
<details>
<summary>Evaluate TinyViT-21M</summary>
<pre><code>python -m torch.distributed.launch --nproc_per_node 8 main.py --cfg configs/1k/tiny_vit_21m.yaml --data-path ./ImageNet --batch-size 128 --eval --resume ./checkpoints/tiny_vit_21m_1k.pth
</code></pre>
</details>
**The model pretrained on IN-22k can be evaluated directly on IN-1k**
Since the model pretrained on IN-22k is not finetuned on IN-1k, the accuracy is lower than the model finetuned 22kto1k.
<details>
<summary>Evaluate TinyViT-5M-22k <img src="../.figure/distill.png"></summary>
<pre><code>python -m torch.distributed.launch --nproc_per_node 8 main.py --cfg configs/22k_distill/tiny_vit_5m_22k_distill.yaml --data-path ./ImageNet --batch-size 128 --eval --resume ./checkpoints/tiny_vit_5m_22k_distill.pth --opts DATA.DATASET imagenet
</code></pre>
</details>
<details>
<summary>Evaluate TinyViT-11M-22k <img src="../.figure/distill.png"></summary>
<pre><code>python -m torch.distributed.launch --nproc_per_node 8 main.py --cfg configs/22k_distill/tiny_vit_11m_22k_distill.yaml --data-path ./ImageNet --batch-size 128 --eval --resume ./checkpoints/tiny_vit_11m_22k_distill.pth --opts DATA.DATASET imagenet
</code></pre>
</details>
<details>
<summary>Evaluate TinyViT-21M-22k <img src="../.figure/distill.png"></summary>
<pre><code>python -m torch.distributed.launch --nproc_per_node 8 main.py --cfg configs/22k_distill/tiny_vit_21m_22k_distill.yaml --data-path ./ImageNet --batch-size 128 --eval --resume ./checkpoints/tiny_vit_21m_22k_distill.pth --opts DATA.DATASET imagenet
</code></pre>
</details>
|
Cream/TinyViT/docs/EVALUATION.md/0
|
{
"file_path": "Cream/TinyViT/docs/EVALUATION.md",
"repo_id": "Cream",
"token_count": 1596
}
| 293 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .detr import build
def build_model(args):
return build(args)
|
Cream/iRPE/DETR-with-iRPE/models/__init__.py/0
|
{
"file_path": "Cream/iRPE/DETR-with-iRPE/models/__init__.py",
"repo_id": "Cream",
"token_count": 42
}
| 294 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .cls_cvt import *
from .registry import *
from .build import build_model
|
CvT/lib/models/__init__.py/0
|
{
"file_path": "CvT/lib/models/__init__.py",
"repo_id": "CvT",
"token_count": 53
}
| 295 |
"""
Copyright (C) Microsoft Corporation. All rights reserved.
Microsoft Corporation (“Microsoft”) grants you a nonexclusive, perpetual,
royalty-free right to use, copy, and modify the software code provided by us
("Software Code"). You may not sublicense the Software Code or any use of it
(except to your affiliates and to vendors to perform work on your behalf)
through distribution, network access, service agreement, lease, rental, or
otherwise. This license does not purport to express any claim of ownership over
data you may have shared with Microsoft in the creation of the Software Code.
Unless applicable law gives you more rights, Microsoft reserves all other
rights not expressly granted herein, whether by implication, estoppel or
otherwise.
THE SOFTWARE CODE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
MICROSOFT OR ITS LICENSORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THE SOFTWARE CODE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import pandas as pd
import numpy as np
from msanomalydetector.util import *
import msanomalydetector.boundary_utils as boundary_helper
from msanomalydetector._anomaly_kernel_cython import median_filter
class SpectralResidual:
def __init__(self, series, threshold, mag_window, score_window, sensitivity, detect_mode, batch_size):
self.__series__ = series
self.__values__ = self.__series__['value'].tolist()
self.__threshold__ = threshold
self.__mag_window = mag_window
self.__score_window = score_window
self.__sensitivity = sensitivity
self.__detect_mode = detect_mode
self.__anomaly_frame = None
self.__batch_size = batch_size
if self.__batch_size <= 0:
self.__batch_size = len(series)
self.__batch_size = max(12, self.__batch_size)
self.__batch_size = min(len(series), self.__batch_size)
def detect(self):
if self.__anomaly_frame is None:
self.__anomaly_frame = self.__detect()
return self.__anomaly_frame
def __detect(self):
anomaly_frames = []
for i in range(0, len(self.__series__), self.__batch_size):
start = i
end = i + self.__batch_size
end = min(end, len(self.__series__))
if end - start >= 12:
anomaly_frames.append(self.__detect_core(self.__series__[start:end]))
else:
ext_start = max(0, end - self.__batch_size)
ext_frame = self.__detect_core(self.__series__[ext_start:end])
anomaly_frames.append(ext_frame[start-ext_start:])
return pd.concat(anomaly_frames, axis=0, ignore_index=True)
def __detect_core(self, series):
values = series['value'].values
extended_series = SpectralResidual.extend_series(values)
mags = self.spectral_residual_transform(extended_series)
anomaly_scores = self.generate_spectral_score(mags)
anomaly_frame = pd.DataFrame({Timestamp: series['timestamp'].values,
Value: values,
Mag: mags[:len(values)],
AnomalyScore: anomaly_scores[:len(values)]})
anomaly_frame[IsAnomaly] = np.where(anomaly_frame[AnomalyScore] > self.__threshold__, True, False)
if self.__detect_mode == DetectMode.anomaly_and_margin:
anomaly_index = anomaly_frame[anomaly_frame[IsAnomaly]].index.tolist()
anomaly_frame[ExpectedValue] = self.calculate_expected_value(values, anomaly_index)
boundary_units = boundary_helper.calculate_boundary_unit_entire(values,
anomaly_frame[IsAnomaly].values)
anomaly_frame[AnomalyScore] = boundary_helper.calculate_anomaly_scores(
values=values,
expected_values=anomaly_frame[ExpectedValue].values,
units=boundary_units,
is_anomaly=anomaly_frame[IsAnomaly].values
)
margins = [boundary_helper.calculate_margin(u, self.__sensitivity) for u in boundary_units]
anomaly_frame['unit'] = boundary_units
anomaly_frame[LowerBoundary] = anomaly_frame[ExpectedValue].values - margins
anomaly_frame[UpperBoundary] = anomaly_frame[ExpectedValue].values + margins
isLowerAnomaly = np.logical_and(anomaly_frame[IsAnomaly].values,
anomaly_frame[LowerBoundary].values > values)
isUpperAnomaly = np.logical_and(anomaly_frame[IsAnomaly].values,
values > anomaly_frame[UpperBoundary].values)
anomaly_frame[IsAnomaly] = np.logical_or(isLowerAnomaly, isUpperAnomaly)
return anomaly_frame
def generate_spectral_score(self, mags):
ave_mag = average_filter(mags, n=self.__score_window)
safeDivisors = np.clip(ave_mag, EPS, ave_mag.max())
raw_scores = np.abs(mags - ave_mag) / safeDivisors
scores = np.clip(raw_scores / 10.0, 0, 1.0)
return scores
def spectral_residual_transform(self, values):
"""
This method transform a time series into spectral residual series
:param values: list.
a list of float values.
:return: mag: list.
a list of float values as the spectral residual values
"""
trans = np.fft.fft(values)
mag = np.sqrt(trans.real ** 2 + trans.imag ** 2)
eps_index = np.where(mag <= EPS)[0]
mag[eps_index] = EPS
mag_log = np.log(mag)
mag_log[eps_index] = 0
spectral = np.exp(mag_log - average_filter(mag_log, n=self.__mag_window))
trans.real = trans.real * spectral / mag
trans.imag = trans.imag * spectral / mag
trans.real[eps_index] = 0
trans.imag[eps_index] = 0
wave_r = np.fft.ifft(trans)
mag = np.sqrt(wave_r.real ** 2 + wave_r.imag ** 2)
return mag
@staticmethod
def predict_next(values):
"""
Predicts the next value by sum up the slope of the last value with previous values.
Mathematically, g = 1/m * sum_{i=1}^{m} g(x_n, x_{n-i}), x_{n+1} = x_{n-m+1} + g * m,
where g(x_i,x_j) = (x_i - x_j) / (i - j)
:param values: list.
a list of float numbers.
:return : float.
the predicted next value.
"""
if len(values) <= 1:
raise ValueError(f'data should contain at least 2 numbers')
v_last = values[-1]
n = len(values)
slopes = [(v_last - v) / (n - 1 - i) for i, v in enumerate(values[:-1])]
return values[1] + sum(slopes)
@staticmethod
def extend_series(values, extend_num=5, look_ahead=5):
"""
extend the array data by the predicted next value
:param values: list.
a list of float numbers.
:param extend_num: int, default 5.
number of values added to the back of data.
:param look_ahead: int, default 5.
number of previous values used in prediction.
:return: list.
The result array.
"""
if look_ahead < 1:
raise ValueError('look_ahead must be at least 1')
extension = [SpectralResidual.predict_next(values[-look_ahead - 2:-1])] * extend_num
return np.concatenate((values, extension), axis=0)
@staticmethod
def calculate_expected_value(values, anomaly_index):
values = deanomaly_entire(values, anomaly_index)
length = len(values)
fft_coef = np.fft.fft(values)
fft_coef.real = [v if length * 3 / 8 >= i or i >= length * 5 / 8 else 0 for i, v in enumerate(fft_coef.real)]
fft_coef.imag = [v if length * 3 / 8 >= i or i >= length * 5 / 8 else 0 for i, v in enumerate(fft_coef.imag)]
exps = np.fft.ifft(fft_coef)
return exps.real
|
anomalydetector/msanomalydetector/spectral_residual.py/0
|
{
"file_path": "anomalydetector/msanomalydetector/spectral_residual.py",
"repo_id": "anomalydetector",
"token_count": 3693
}
| 296 |
<h1 align="center">
<img src="https://user-images.githubusercontent.com/9354770/171523113-70c7214b-8298-4d7e-abd9-81f5788f6e19.png" alt="Archai logo" width="384px" />
<br />
</h1>
<div align="center">
<b>Archai</b> accelerates your Neural Architecture Search (NAS) through <b>fast</b>, <b>reproducible</b> and <b>modular</b> research, enabling the generation of efficient deep networks for various applications.
</div>
<br />
<div align="center">
<img src ="https://img.shields.io/github/release/microsoft/archai?style=flat-square" alt="Release version" />
<img src ="https://img.shields.io/github/issues-raw/microsoft/archai?style=flat-square" alt="Open issues" />
<img src ="https://img.shields.io/github/contributors/microsoft/archai?style=flat-square" alt="Contributors" />
<img src ="https://img.shields.io/pypi/dm/archai?style=flat-square" alt="PyPI downloads" />
<img src ="https://img.shields.io/github/license/microsoft/archai?color=red&style=flat-square" alt="License" />
</div>
<br />
<div align="center">
<a href="#installation">Installation</a> •
<a href="#quickstart">Quickstart</a> •
<a href="#tasks">Tasks</a> •
<a href="#documentation">Documentation</a> •
<a href="#support">Support</a>
</div>
## Installation
Archai can be installed through various methods, however, it is recommended to utilize a virtual environment such as `conda` or `pyenv` for optimal results.
To install Archai via PyPI, the following command can be executed:
```bash
pip install archai
```
**Archai requires Python 3.8+ and PyTorch 1.7.0+ to function properly.**
For further information, please consult the [installation guide](https://microsoft.github.io/archai/getting_started/installation.html).
## Quickstart
In this quickstart example, we will apply Archai in Natural Language Processing to find the optimal Pareto-frontier Transformers' configurations according to a set of objectives.
### Creating the Search Space
We start by importing the `TransformerFlexSearchSpace` class which represents the search space for the Transformer architecture:
```python
from archai.discrete_search.search_spaces.nlp.transformer_flex.search_space import TransformerFlexSearchSpace
space = TransformerFlexSearchSpace("gpt2")
```
### Defining Search Objectives
Next, we define the objectives we want to optimize. In this example, we use `NonEmbeddingParamsProxy`, `TransformerFlexOnnxLatency`, and `TransformerFlexOnnxMemory` to define the objectives:
```python
from archai.discrete_search.api.search_objectives import SearchObjectives
from archai.discrete_search.evaluators.nlp.parameters import NonEmbeddingParamsProxy
from archai.discrete_search.evaluators.nlp.transformer_flex_latency import TransformerFlexOnnxLatency
from archai.discrete_search.evaluators.nlp.transformer_flex_memory import TransformerFlexOnnxMemory
search_objectives = SearchObjectives()
search_objectives.add_objective(
"non_embedding_params",
NonEmbeddingParamsProxy(),
higher_is_better=True,
compute_intensive=False,
constraint=(1e6, 1e9),
)
search_objectives.add_objective(
"onnx_latency",
TransformerFlexOnnxLatency(space),
higher_is_better=False,
compute_intensive=False,
)
search_objectives.add_objective(
"onnx_memory",
TransformerFlexOnnxMemory(space),
higher_is_better=False,
compute_intensive=False,
)
```
### Initializing the Algorithm
We use the `EvolutionParetoSearch` algorithm to conduct the search:
```python
from archai.discrete_search.algos.evolution_pareto import EvolutionParetoSearch
algo = EvolutionParetoSearch(
space,
search_objectives,
None,
"tmp",
num_iters=5,
init_num_models=10,
seed=1234,
)
```
### Performing the Search
Finally, we call the `search()` method to start the NAS process:
```python
algo.search()
```
The algorithm will iterate through different network architectures, evaluate their performance based on the defined objectives, and ultimately produce a frontier of Pareto-optimal results.
## Tasks
To demonstrate and showcase the capabilities/functionalities of Archai, a set of end-to-end tasks are provided:
* [Text Generation](https://github.com/microsoft/archai/blob/main/tasks/text_generation).
* [Face Segmentation](https://github.com/microsoft/archai/blob/main/tasks/face_segmentation).
## Documentation
The [official documentation](https://microsoft.github.io/archai) also provides a series of [notebooks](https://microsoft.github.io/archai/getting_started/notebooks.html).
## Support
If you have any questions or feedback about the Archai project or the open problems in Neural Architecture Search, please feel free to contact us using the following information:
* Email: [email protected]
* Website: https://github.com/microsoft/archai/issues
We welcome any questions, feedback, or suggestions you may have and look forward to hearing from you.
### Team
Archai has been created and maintained by [Shital Shah](https://shital.com), [Debadeepta Dey](https://debadeepta.com), [Gustavo de Rosa](https://www.microsoft.com/en-us/research/people/gderosa), Caio Mendes, [Piero Kauffmann](https://www.microsoft.com/en-us/research/people/pkauffmann), [Chris Lovett](https://lovettsoftware.com), Allie Del Giorno, Mojan Javaheripi, and [Ofer Dekel](https://www.microsoft.com/en-us/research/people/oferd) at Microsoft Research.
### Contributions
This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com.
When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repositories using our CLA.
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [[email protected]](mailto:[email protected]) with any additional questions or comments.
### Trademark
This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow Microsoft's Trademark & Brand Guidelines. Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party's policies.
### License
This project is released under the MIT License. Please review the [file](https://github.com/microsoft/archai/blob/main/LICENSE) for more details.
|
archai/README.md/0
|
{
"file_path": "archai/README.md",
"repo_id": "archai",
"token_count": 2036
}
| 297 |
from typing import Callable, Tuple
import psutil
import os
import tracemalloc
import torch
from torch import profiler
from torch import nn
import gc
def model_memory(create_model:Callable[[], nn.Module])->Tuple[nn.Module, int]:
# returns model and memory occupied by the model in process
gc.collect()
# baseline process memory
process = psutil.Process(os.getpid())
baseline_mem = process.memory_info().rss
model = create_model()
gc.collect()
new_mem = process.memory_info().rss
return model, new_mem-baseline_mem
def inference_stats(model:nn.Module, **inputs)->Tuple[int, int, int]:
# return memory usage in bytes, cpu time in us
# We basically sum "self" time of individual ops,
# i.e., not including child time.
# Pytorch also has record_function which gives
# higher CPU time, probably because it includes
# time spent other than ops.
# Sometime profiler also generates [memory] node
# which has negative value of memory.
with torch.no_grad():
with profiler.profile(activities=[profiler.ProfilerActivity.CPU], profile_memory=True, record_shapes=True, with_flops=True) as prof:
with profiler.record_function('model_inference'):
_ = model(**inputs)
t = prof.key_averages()
self_time, self_mem, flops, ti_memory, inf_cpu, inf_mem, inf_flops = 0, 0, 0, 0, 0, 0, 0
for ti in t:
if ti.key == '[memory]':
ti_memory = -ti.self_cpu_memory_usage
continue
if ti.key == 'model_inference':
inf_mem = -ti.cpu_memory_usage
inf_cpu = ti.cpu_time_total
inf_flops = ti.flops
continue
self_mem += ti.self_cpu_memory_usage
self_time += ti.self_cpu_time_total
flops += ti.flops
return self_mem, self_time, flops, inf_cpu
|
archai/archai/common/ml_perf_utils.py/0
|
{
"file_path": "archai/archai/common/ml_perf_utils.py",
"repo_id": "archai",
"token_count": 730
}
| 298 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import List
import torch
class Lighting:
"""Lighting transform."""
def __init__(self, std: float, eigval: List[float], eigvec: List[float]) -> None:
"""Initialize the lighting transform.
Args:
std: Standard deviation of the normal distribution.
eigval: Eigenvalues of the covariance matrix.
eigvec: Eigenvectors of the covariance matrix.
"""
self.std = std
self.eigval = torch.Tensor(eigval)
self.eigvec = torch.Tensor(eigvec)
def __call__(self, img: torch.Tensor) -> torch.Tensor:
if self.std == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.std)
rgb = (
self.eigvec.type_as(img)
.clone()
.mul(alpha.view(1, 3).expand(3, 3))
.mul(self.eigval.view(1, 3).expand(3, 3))
.sum(1)
.squeeze()
)
return img.add(rgb.view(3, 1, 1).expand_as(img))
|
archai/archai/datasets/cv/transforms/lighting.py/0
|
{
"file_path": "archai/archai/datasets/cv/transforms/lighting.py",
"repo_id": "archai",
"token_count": 512
}
| 299 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from archai.discrete_search.evaluators.functional import EvaluationFunction
from archai.discrete_search.evaluators.onnx_model import AvgOnnxLatency
from archai.discrete_search.evaluators.progressive_training import (
ProgressiveTraining, RayProgressiveTraining
)
from archai.discrete_search.evaluators.pt_profiler import (
TorchFlops, TorchLatency, TorchPeakCpuMemory, TorchPeakCudaMemory, TorchNumParameters
)
from archai.discrete_search.evaluators.ray import RayParallelEvaluator
__all__ = [
'EvaluationFunction', 'AvgOnnxLatency', 'ProgressiveTraining',
'RayProgressiveTraining', 'TorchFlops', 'TorchLatency',
'TorchPeakCpuMemory', 'TorchPeakCudaMemory',
'TorchNumParameters', 'RayParallelEvaluator'
]
|
archai/archai/discrete_search/evaluators/__init__.py/0
|
{
"file_path": "archai/archai/discrete_search/evaluators/__init__.py",
"repo_id": "archai",
"token_count": 264
}
| 300 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import time
import datetime
import uuid
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from overrides import overrides
from archai.discrete_search.api.archai_model import ArchaiModel
from archai.discrete_search.api.model_evaluator import AsyncModelEvaluator
from archai.common.store import ArchaiStore
class RemoteAzureBenchmarkEvaluator(AsyncModelEvaluator):
"""Simple adapter for benchmarking architectures asynchronously on Azure.
This adapter uploads an ONNX model to a Azure Blob storage container and
records the model entry on the respective Azure Table.
"""
def __init__(
self,
input_shape: Union[Tuple, List[Tuple]],
store: ArchaiStore,
experiment_name: str,
metric_key: str,
overwrite: Optional[bool] = True,
max_retries: Optional[int] = 5,
retry_interval: Optional[int] = 120,
onnx_export_kwargs: Optional[Dict[str, Any]] = None,
verbose: bool = False,
benchmark_only: bool = True
) -> None:
"""Initialize the evaluator.
Args:
input_shape: Model input shape or list of model input shapes for ONNX export.
connection_string: Storage account connection string.
blob_container_name: Name of the blob container.
table_name: Name of the table.
metric_key: Column that should be used as result.
partition_key: Partition key for the table used to record all entries.
overwrite: Whether to overwrite existing models.
max_retries: Maximum number of retries in `fetch_all`.
retry_interval: Interval between each retry attempt.
reset: Whether to reset the metrics.
onnx_export_kwargs: Dictionary containing key-value arguments for `torch.onnx.export`.
verbose: Whether to print debug messages.
"""
# TODO: Make this class more general / less pipeline-specific
self.store = store
input_shapes = [input_shape] if isinstance(input_shape, tuple) else input_shape
self.sample_input = tuple([torch.rand(*input_shape) for input_shape in input_shapes])
self.experiment_name = experiment_name
self.metric_key = metric_key
self.overwrite = overwrite
self.max_retries = max_retries
self.retry_interval = retry_interval
self.onnx_export_kwargs = onnx_export_kwargs or dict()
self.verbose = verbose
self.results = {}
self.benchmark_only = benchmark_only
# Architecture list
self.archids = []
# Test connection string works
unknown_id = str(uuid.uuid4())
_ = self.store.get_existing_status(unknown_id)
_ = self.store.list_blobs(unknown_id)
def _reset(self, entity):
changed = False
for k in ['mean', 'macs', 'params', 'stdev', 'total_inference_avg', 'error']:
if k in entity:
del entity[k]
changed = True
if changed:
self.store.update_status_entity(entity)
@overrides
def send(self, arch: ArchaiModel, budget: Optional[float] = None) -> None:
# bug in azure ml sdk requires blob store folder names not begin with digits, so we prefix with 'id_'
archid = f'id_{arch.archid}'
# Checks if architecture was already benchmarked
entity = self.store.get_existing_status(archid)
if entity is not None:
if entity["status"] == "complete":
if self.metric_key in entity:
if self.verbose:
value = entity[self.metric_key]
self.archids.append(archid)
print(f"Entry for {archid} already exists with {self.metric_key} = {value}")
return
else:
# force quantization to happen again in case the model has been retrained.
self._reset(entity)
else:
# job is still running, let it continue
if self.verbose:
print(f"Job for {archid} is running...")
self.archids.append(archid)
return
entity = self.store.get_status(archid) # this is a get or create operation.
if self.benchmark_only:
entity["benchmark_only"] = 1
elif 'benchmark_only' in entity:
del entity['benchmark_only']
self.store.update_status_entity(entity) # must be an update, not a merge.
self.store.lock_entity(entity, "uploading")
if arch.arch is not None:
try:
with TemporaryDirectory() as tmp_dir:
tmp_dir = Path(tmp_dir)
# Uploads ONNX file to blob storage and updates the table entry
arch.arch.to("cpu")
file_name = str(tmp_dir / "model.onnx")
# Exports model to ONNX
torch.onnx.export(
arch.arch,
self.sample_input,
file_name,
input_names=[f"input_{i}" for i in range(len(self.sample_input))],
**self.onnx_export_kwargs,
)
self.store.upload_blob(f'{self.experiment_name}/{archid}', file_name, "model.onnx")
entity["model_date"] = self.store.get_utc_date()
entity["model_name"] = "model.onnx"
entity["status"] = "new"
except Exception as e:
entity["error"] = str(e)
entity["status"] = "error"
else:
# then the blob store must already have a model.onnx file!
blobs = self.store.list_blobs(f'{self.experiment_name}/{archid}/model.onnx')
if len(blobs) < 1:
print(f"model.onnx is missing for architecture {archid}")
return
else:
entity['status'] = 'ready'
self.store.unlock_entity(entity)
self.archids.append(archid)
if self.verbose:
print(f"Sent {archid} to Remote Benchmark")
@overrides
def fetch_all(self) -> List[Union[float, None]]:
results = [None] * len(self.archids)
completed = [False] * len(self.archids)
# retries defines how long we wait for progress, as soon as we see something complete we
# reset this counter because we are making progress.
retries = self.max_retries
start = time.time()
count = 0
while retries > 0:
for i, archid in enumerate(self.archids):
if not completed[i]:
entity = self.store.get_existing_status(archid)
if entity is not None:
if self.metric_key in entity and entity[self.metric_key]:
results[i] = entity[self.metric_key]
if "error" in entity:
error = entity["error"]
print(f"Skipping architecture {archid} because of remote error: {error}")
completed[i] = True
retries = self.max_retries
elif entity["status"] == "complete":
print(f"Architecture {archid} is complete with {self.metric_key}={ results[i]}")
completed[i] = True
retries = self.max_retries
if all(completed):
break
count = sum(1 for c in completed if c)
if self.verbose:
remaining = len(self.archids) - count
estimate = (time.time() - start) / (count + 1) * remaining
status_dict = {
"complete": count,
"total": len(results),
"time_remaining": str(datetime.timedelta(seconds=estimate))
}
print(
f"Waiting for results. Current status: {status_dict}\n"
f"Pending Archids: {[archid for archid, status in zip(self.archids, results) if status is None]}"
)
time.sleep(self.retry_interval)
retries += 1
count = sum(1 for c in completed if c)
if count == 0:
raise Exception("Something is wrong, the uploaded models are not being processed. Please check your SNPE remote runner setup.")
# Resets state
self.archids = []
return results
|
archai/archai/discrete_search/evaluators/remote_azure_benchmark.py/0
|
{
"file_path": "archai/archai/discrete_search/evaluators/remote_azure_benchmark.py",
"repo_id": "archai",
"token_count": 4187
}
| 301 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from functools import partial
from typing import Optional
import torch
from torch import nn
class NormalConvBlock(nn.Module):
"""Normal Convolutional Block with BatchNorm and ReLU."""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Optional[int] = 3,
stride: Optional[int] = 1,
padding: Optional[int] = 1,
bias: Optional[bool] = True,
**kwargs
) -> None:
"""Initialize the module.
Args:
in_channels: Number of input channels.
out_channels: Number of output channels.
kernel_size: Kernel size.
stride: Stride.
padding: Padding.
bias: Whether to use bias.
"""
super().__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU()
def forward(self, x: torch.Tensor):
return self.relu(self.bn(self.conv(x)))
class SeparableConvBlock(nn.Module):
"""Separable Convolutional Block with BatchNorm and ReLU."""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Optional[int] = 3,
stride: Optional[int] = 1,
padding: Optional[int] = 1,
expand_ratio: Optional[float] = 1.0,
id_skip: Optional[bool] = False,
bias: Optional[bool] = True,
):
"""Initialize the module.
Args:
in_channels: Number of input channels.
out_channels: Number of output channels.
kernel_size: Kernel size.
stride: Stride.
padding: Padding.
expand_ratio: Expansion ratio.
id_skip: Whether to use skip connection.
bias: Whether to use bias.
"""
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.expand_ratio = expand_ratio
self.stride = stride
self.padding = padding
self.kernel_size = kernel_size
self.id_skip = id_skip
# Expansion phase
inp = in_channels # number of input channels
oup = int(in_channels * self.expand_ratio) # number of output channels
if expand_ratio != 1:
self._expand_conv = nn.Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=bias)
self._bn0 = nn.BatchNorm2d(num_features=oup)
# Depthwise convolution phase
self._depthwise_conv = nn.Conv2d(
in_channels=oup,
out_channels=oup,
groups=oup, # groups makes it depthwise
kernel_size=kernel_size,
stride=stride,
bias=bias,
padding=padding,
)
self._bn1 = nn.BatchNorm2d(num_features=oup)
# Output phase
self._project_conv = nn.Conv2d(in_channels=oup, out_channels=out_channels, kernel_size=1, bias=bias)
self._bn2 = nn.BatchNorm2d(num_features=out_channels)
self._act = nn.ReLU()
def forward(self, x: torch.Tensor) -> torch.Tensor:
# Expansion and Depthwise Convolution
out = x
if self.expand_ratio != 1:
out = self._bn0(self._expand_conv(out)) # No activation function here
out = self._act(self._bn1(self._depthwise_conv(out)))
# Pointwise conv.
out = self._bn2(self._project_conv(out))
# Skip connection
if self.id_skip and self.stride == 1 and self.in_channels == self.out_channels:
out = out + x
return out
OPS = {
"conv3x3": partial(NormalConvBlock, kernel_size=3, padding=1),
"conv5x5": partial(NormalConvBlock, kernel_size=5, padding=2),
"conv7x7": partial(NormalConvBlock, kernel_size=7, padding=3),
"mbconv3x3_e1": partial(SeparableConvBlock, kernel_size=3, padding=1),
"mbconv3x3_e2": partial(SeparableConvBlock, kernel_size=3, padding=1, expand_ratio=2),
"mbconv5x5_e1": partial(SeparableConvBlock, kernel_size=5, padding=2),
"mbconv5x5_e2": partial(SeparableConvBlock, kernel_size=5, padding=2, expand_ratio=2),
}
class Block(nn.Module):
"""Block of operations."""
def __init__(self, in_ch: int, out_ch: int, in_scale: int, out_scale: int, op_name: str) -> None:
"""Initialize the module.
Args:
in_ch: Number of input channels.
out_ch: Number of output channels.
in_scale: Input scale.
out_scale: Output scale.
op_name: Operation name.
"""
super().__init__()
self.in_ch, self.out_ch = in_ch, out_ch
self.in_scale, self.out_scale = in_scale, out_scale
self.op_name = op_name
assert op_name in OPS
assert (out_scale % in_scale == 0) or (in_scale % out_scale == 0)
if out_scale >= in_scale:
self.op = nn.Sequential(OPS[op_name](in_ch, out_ch, stride=int(out_scale // in_scale)))
else:
self.op = nn.Sequential(
OPS[op_name](in_ch, out_ch, stride=1),
nn.Upsample(scale_factor=int(in_scale // out_scale), mode="nearest"),
)
def forward(self, input: torch.Tensor) -> torch.Tensor:
return self.op(input)
|
archai/archai/discrete_search/search_spaces/cv/segmentation_dag/ops.py/0
|
{
"file_path": "archai/archai/discrete_search/search_spaces/cv/segmentation_dag/ops.py",
"repo_id": "archai",
"token_count": 2558
}
| 302 |
from typing import Optional, Tuple, Union
import torch
from torch import nn
from transformers.models.codegen.modeling_codegen import (
CodeGenConfig, fixed_pos_embedding, apply_rotary_pos_emb
)
from archai.discrete_search.search_spaces.config import ArchConfig
class CausalSelfAttention(nn.Module):
def __init__(self, arch_config: ArchConfig, hf_config: CodeGenConfig, hidden_size: int,
total_heads: int, op_heads: int, **kwargs):
assert hidden_size % total_heads == 0
super().__init__()
max_positions = hf_config.max_position_embeddings
self.register_buffer(
"causal_mask",
torch.tril(torch.ones((max_positions, max_positions), dtype=torch.uint8)).view(
1, 1, max_positions, max_positions
),
)
self.hidden_size = hidden_size
self.total_heads = total_heads
self.op_heads = op_heads
self.head_size = hidden_size // total_heads
self.op_size = (self.hidden_size // total_heads) * op_heads
self.max_positions = max_positions
self.scale_attn_weights = hf_config.scale_attn_weights
self.attn_dropout = nn.Dropout(hf_config.attn_pdrop)
self.scale_attn = torch.sqrt(torch.tensor(self.head_size, dtype=torch.float32)).to(torch.get_default_dtype())
self.qkv_proj = nn.Linear(self.hidden_size, self.op_size * 3, bias=False)
self.rotary_dim = getattr(hf_config, 'rotary_dim', None)
def _split_heads(self, x, n_head, dim_head, mp_num):
reshaped = x.reshape(x.shape[:-1] + (n_head // mp_num, dim_head))
reshaped = reshaped.reshape(x.shape[:-2] + (-1,) + reshaped.shape[-1:])
return reshaped
def _merge_heads(self, tensor, num_attention_heads, attn_head_size):
"""
Merges attn_head_size dim and num_attn_heads dim into n_ctx
"""
if len(tensor.shape) == 5:
tensor = tensor.permute(0, 1, 3, 2, 4).contiguous()
elif len(tensor.shape) == 4:
tensor = tensor.permute(0, 2, 1, 3).contiguous()
else:
raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}")
new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,)
return tensor.view(new_shape)
def _attn(
self,
query,
key,
value,
attention_mask=None,
head_mask=None,
):
# compute causal mask from causal mask buffer
query_length, key_length = query.size(-2), key.size(-2)
causal_mask = self.causal_mask[:, :, key_length - query_length : key_length, :key_length]
# Keep the attention weights computation in fp32 to avoid overflow issues
query = query.to(torch.float32)
key = key.to(torch.float32)
attn_weights = torch.matmul(query, key.transpose(-1, -2))
attn_weights = attn_weights / self.scale_attn
mask_value = torch.finfo(attn_weights.dtype).min
# Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
# Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
attn_weights = torch.where(causal_mask, attn_weights, mask_value)
if attention_mask is not None:
# Apply the attention mask
attn_weights = attn_weights + attention_mask
attn_weights = nn.Softmax(dim=-1)(attn_weights)
attn_weights = attn_weights.to(value.dtype)
attn_weights = self.attn_dropout(attn_weights)
# Mask heads if we want to
if head_mask is not None:
attn_weights = attn_weights * head_mask
attn_output = torch.matmul(attn_weights, value)
return attn_output, attn_weights
def forward(
self,
hidden_states: Optional[torch.FloatTensor],
attention_mask: Optional[torch.FloatTensor] = None,
layer_past: Optional[Tuple[torch.Tensor]] = None,
head_mask: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = False,
output_attentions: Optional[bool] = False,
**kwargs
) -> Union[
Tuple[torch.Tensor, Tuple[torch.Tensor]],
Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]],
]:
qkv = self.qkv_proj(hidden_states)
# TODO(enijkamp): factor out number of logical TPU-v4 cores or make forward pass agnostic
mp_num = 1
qkv_split = qkv.reshape(qkv.shape[:-1] + (mp_num, -1))
local_dim = self.head_size * self.op_heads // mp_num
query, value, key = torch.split(qkv_split, local_dim, dim=-1)
query = self._split_heads(query, self.op_heads, self.head_size, mp_num=mp_num)
key = self._split_heads(key, self.op_heads, self.head_size, mp_num=mp_num)
value = self._split_heads(value, self.op_heads, self.head_size, mp_num=mp_num)
value = value.permute(0, 2, 1, 3)
seq_len = key.shape[1]
offset = 0
if layer_past is not None:
offset = layer_past[0].shape[-2]
seq_len += offset
if self.rotary_dim is not None:
k_rot = key[:, :, :, : self.rotary_dim]
k_pass = key[:, :, :, self.rotary_dim :]
q_rot = query[:, :, :, : self.rotary_dim]
q_pass = query[:, :, :, self.rotary_dim :]
sincos = fixed_pos_embedding(k_rot, 1, seq_len=seq_len)
k_rot = apply_rotary_pos_emb(k_rot, sincos, offset=offset)
q_rot = apply_rotary_pos_emb(q_rot, sincos, offset=offset)
key = torch.cat([k_rot, k_pass], dim=-1)
query = torch.cat([q_rot, q_pass], dim=-1)
key = key.permute(0, 2, 1, 3)
query = query.permute(0, 2, 1, 3)
if layer_past is not None:
past_key = layer_past[0]
past_value = layer_past[1]
key = torch.cat((past_key, key), dim=-2)
value = torch.cat((past_value, value), dim=-2)
if use_cache is True:
present = (key, value)
else:
present = None
# compute self-attention: V x Softmax(QK^T)
attn_output, _ = self._attn(query, key, value, attention_mask, head_mask)
attn_output = self._merge_heads(attn_output, self.op_heads, self.head_size)
return attn_output, present
|
archai/archai/discrete_search/search_spaces/nlp/tfpp/ops/causal_self_attn.py/0
|
{
"file_path": "archai/archai/discrete_search/search_spaces/nlp/tfpp/ops/causal_self_attn.py",
"repo_id": "archai",
"token_count": 3028
}
| 303 |
import math
import torch
import torch.nn.functional as F
from einops import rearrange
from .fftconv import fftconv_fwd, fftconv_bwd
@torch.jit.script
def _mul_sum(y, q):
return (y * q).sum(dim=1)
# reference convolution with residual connection
def fftconv_ref(u, k, D, dropout_mask, gelu=True, k_rev=None):
seqlen = u.shape[-1]
fft_size = 2 * seqlen
k_f = torch.fft.rfft(k, n=fft_size) / fft_size
if k_rev is not None:
k_rev_f = torch.fft.rfft(k_rev, n=fft_size) / fft_size
k_f = k_f + k_rev_f.conj()
u_f = torch.fft.rfft(u.to(dtype=k.dtype), n=fft_size)
y = torch.fft.irfft(u_f * k_f, n=fft_size, norm='forward')[..., :seqlen]
out = y + u * D.unsqueeze(-1)
if gelu:
out = F.gelu(out)
if dropout_mask is not None:
return (out * rearrange(dropout_mask, 'b H -> b H 1')).to(dtype=u.dtype)
else:
return out.to(dtype=u.dtype)
# reference H3 forward pass
def fftconv_h3_ref(k, ssm_kernel, D, q, v, head_dim=1, ssm_kernel_rev=None):
seqlen = k.shape[-1]
fft_size = 2 * seqlen
kv = (rearrange(k, 'b (h d1) l -> b d1 1 h l', d1=head_dim)
* rearrange(v, 'b (h d2) l -> b 1 d2 h l', d2=head_dim)) # b d1 d2 h l
kv_f = torch.fft.rfft(kv.to(dtype=ssm_kernel.dtype), n=fft_size) / fft_size
ssm_kernel_f = torch.fft.rfft(ssm_kernel, n=fft_size) # h L+1
if ssm_kernel_rev is not None:
ssm_kernel_rev_f = torch.fft.rfft(ssm_kernel_rev, n=fft_size) # h L+1
ssm_kernel_f = ssm_kernel_f + ssm_kernel_rev_f.conj()
y = torch.fft.irfft(kv_f * ssm_kernel_f, n=fft_size, norm='forward')[..., :seqlen] # b d1 d2 h l
out = y + kv * D.unsqueeze(-1) # b d1 d2 h l
q = rearrange(q, 'b (h d1) l -> b d1 1 h l', d1=head_dim)
if head_dim > 1:
out = _mul_sum(out, q)
return rearrange(out, 'b d2 h l -> b (h d2) l').to(dtype=k.dtype)
else:
return rearrange(out * q, 'b 1 1 h l -> b h l').to(dtype=k.dtype)
class FFTConvFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, u, k, D, dropout_mask=None, gelu=True, force_fp16_output=False,
output_hbl_layout=False, v=None, head_dim=1, q=None, fftfp16=False, k_rev=None):
seqlen = u.shape[-1]
fft_size = max(2 * 2 ** int(math.ceil(math.log2(seqlen))), 16)
k_f = torch.fft.rfft(k, n=fft_size)
if k_rev is not None:
k_f = k_f + torch.fft.rfft(k_rev, n=fft_size).conj()
if u.stride(-1) != 1:
u = u.contiguous()
k_f = k_f.contiguous()
D = D.contiguous()
if v is not None and v.stride(-1) != 1:
v = v.contiguous()
if q is not None and q.stride(-1) != 1:
q = q.contiguous()
if dropout_mask is not None:
dropout_mask = dropout_mask.contiguous()
ctx.save_for_backward(u, k_f, D, dropout_mask, v, q)
ctx.output_hbl_layout = output_hbl_layout
ctx.head_dim = head_dim
ctx.gelu = gelu
ctx.fftfp16 = fftfp16
ctx.has_k_rev = k_rev is not None
out = fftconv_fwd(u, k_f, D, v, head_dim, q, dropout_mask, gelu, False, False, fft_size, force_fp16_output, output_hbl_layout, fftfp16)
return out
@staticmethod
def backward(ctx, dout):
if ctx.output_hbl_layout:
dout = rearrange(rearrange(dout, 'b h l -> h b l').contiguous(), 'h b l -> b h l')
else:
dout = dout.contiguous()
u, k_f, D, dropout_mask, v, q = ctx.saved_tensors
seqlen = u.shape[-1]
fft_size = max(2 * 2 ** int(math.ceil(math.log2(seqlen))), 16)
du, dk_f, dD, dv, dq = fftconv_bwd(dout, u, k_f, D, v, ctx.head_dim, q, dropout_mask, ctx.gelu, False, False, fft_size,
ctx.output_hbl_layout, ctx.fftfp16)
dk = torch.fft.irfft(dk_f, n=fft_size, norm='forward')[..., :seqlen]
dk_rev = (None if not ctx.has_k_rev
else torch.fft.irfft(dk_f.conj(), n=fft_size, norm='forward')[..., :seqlen])
if v is not None:
dv = dv.to(dtype=v.dtype) # We do atomicAdd in fp32 so might need to convert to fp16
return du, dk, dD, None, None, None, None, dv if v is not None else None, None, dq if q is not None else None, None, dk_rev
def fftconv_func(u, k, D, dropout_mask=None, gelu=True, force_fp16_output=False,
output_hbl_layout=False, v=None, head_dim=1, q=None, fftfp16=False, k_rev=None):
return FFTConvFunc.apply(u, k, D, dropout_mask, gelu, force_fp16_output,
output_hbl_layout, v, head_dim, q, fftfp16, k_rev)
|
archai/archai/discrete_search/search_spaces/nlp/tfpp/ops/ssm_utils/ssm_ops/fftconv.py/0
|
{
"file_path": "archai/archai/discrete_search/search_spaces/nlp/tfpp/ops/ssm_utils/ssm_ops/fftconv.py",
"repo_id": "archai",
"token_count": 2440
}
| 304 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
#
# Copyright (c) 2018, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0.
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
class OptionalParameterList(nn.ParameterList):
def extra_repr(self) -> str:
child_lines = []
for k, p in self._parameters.items():
if p is not None:
size_str = "x".join(str(size) for size in p.size())
device_str = "" if not p.is_cuda else " (GPU {})".format(p.get_device())
parastr = "Parameter containing: [{} of size {}{}]".format(torch.typename(p), size_str, device_str)
child_lines.append(" (" + str(k) + "): " + parastr)
tmpstr = "\n".join(child_lines)
return tmpstr
class ProjectedAdaptiveLogSoftmax(nn.Module):
def __init__(
self,
vocab_size: int,
d_embed: int,
d_model: int,
cutoffs: Tuple[int],
tie_projs: Tuple[bool],
emb_projs: Optional[torch.FloatTensor] = None,
emb_weights: Optional[torch.FloatTensor] = None,
div_val: Optional[int] = 1,
keep_order: Optional[bool] = True,
) -> None:
super().__init__()
self.vocab_size = vocab_size
self.d_embed = d_embed
self.d_model = d_model
self.tie_projs = tie_projs
self.div_val = div_val
self.keep_order = keep_order
self.cutoffs = cutoffs + [vocab_size]
self.cutoffs_ends = [0] + self.cutoffs
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.cutoffs[0] + self.n_clusters
# Whenever clusters are available, we need their weights and biases
if self.n_clusters > 0:
self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
if not emb_weights:
self.out_weights = nn.ParameterList()
else:
self.out_weights = emb_weights
self.out_biases = nn.ParameterList()
self.out_projs = OptionalParameterList()
self.out_shared_projs = emb_projs
# Core logic for handling different dividents
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_model != d_embed:
if tie_projs[i]:
self.out_projs.append(None)
else:
self.out_projs.append(nn.Parameter(torch.FloatTensor(d_model, d_embed)))
else:
self.out_projs.append(None)
if not emb_weights:
self.out_weights.append(nn.Parameter(torch.zeros(vocab_size, d_embed)))
self.out_biases.append(nn.Parameter(torch.zeros(vocab_size)))
else:
for i in range(len(self.cutoffs)):
cutoff_start, cutoff_end = (
self.cutoffs_ends[i],
self.cutoffs_ends[i + 1],
)
d_embed_i = d_embed // (div_val**i)
if tie_projs[i]:
self.out_projs.append(None)
else:
self.out_projs.append(nn.Parameter(torch.FloatTensor(d_model, d_embed_i)))
if not emb_weights:
self.out_weights.append(nn.Parameter(torch.zeros(cutoff_end - cutoff_start, d_embed_i)))
self.out_biases.append(nn.Parameter(torch.zeros(cutoff_end - cutoff_start)))
def _compute_logits(
self,
inputs: torch.FloatTensor,
weight: torch.FloatTensor,
bias: torch.FloatTensor,
proj: torch.FloatTensor,
) -> torch.FloatTensor:
if proj is None:
logits = F.linear(inputs, weight, bias=bias)
else:
inputs_proj = F.linear(inputs, proj.t().contiguous())
logits = F.linear(inputs_proj, weight, bias=bias)
return logits
def _get_shared_proj(self, idx: int) -> Union[None, torch.FloatTensor]:
if self.tie_projs[idx]:
if len(self.out_shared_projs) == 0:
return None
elif len(self.out_shared_projs) == 1:
return self.out_shared_projs[0]
else:
return self.out_shared_projs[idx]
return self.out_projs[idx]
def forward(self, inputs: torch.FloatTensor, labels: Optional[torch.FloatTensor] = None) -> torch.FloatTensor:
if labels is not None:
# Shift `n` tokens to predict `n+1`
inputs = inputs[..., :-1, :].contiguous()
inputs = inputs.view(-1, inputs.size(-1))
labels = labels[..., 1:].contiguous()
labels = labels.view(-1)
if inputs.size(0) != labels.size(0):
raise RuntimeError("Inputs and labels should have the same size in the batch dimension.")
else:
inputs = inputs.view(-1, inputs.size(-1))
if self.n_clusters == 0:
logits = self._compute_logits(
inputs,
self.out_weights[0],
self.out_biases[0],
self._get_shared_proj(0),
)
if labels is not None:
output = -F.log_softmax(logits, dim=-1).gather(1, labels.unsqueeze(1)).squeeze(1)
else:
output = F.log_softmax(logits, dim=-1)
else:
# Creates weights and biases to handle all available clusters
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
cutoff_start, cutoff_end = (
self.cutoffs_ends[i],
self.cutoffs_ends[i + 1],
)
weight_i = self.out_weights[0][cutoff_start:cutoff_end]
bias_i = self.out_biases[0][cutoff_start:cutoff_end]
else:
weight_i = self.out_weights[i]
bias_i = self.out_biases[i]
if i == 0:
weight_i = torch.cat([weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat([bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
# Defines the head weight, bias and projection
head_weight, head_bias, head_proj = (
weights[0],
biases[0],
self._get_shared_proj(0),
)
# Calculates the head logits and their probabilities
head_logits = self._compute_logits(inputs, head_weight, head_bias, head_proj)
head_probs = F.log_softmax(head_logits, dim=1)
if labels is not None:
output = torch.zeros_like(labels, dtype=inputs.dtype, device=inputs.device)
else:
output = inputs.new_empty((head_logits.size(0), self.vocab_size))
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
cutoff_start, cutoff_end = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
# Gathers a mask of valid indexes
mask_i = (labels >= cutoff_start) & (labels < cutoff_end)
indexes_i = mask_i.nonzero().squeeze()
if indexes_i.numel() == 0:
continue
target_i = labels.index_select(0, indexes_i) - cutoff_start
head_probs_i = head_probs.index_select(0, indexes_i)
inputs_i = inputs.index_select(0, indexes_i)
else:
inputs_i = inputs
if i == 0:
if labels is not None:
probs_i = head_probs_i.gather(1, target_i[:, None]).squeeze(1)
else:
output[:, : self.cutoffs[0]] = head_probs[:, : self.cutoffs[0]]
else:
weight_i, bias_i, proj_i = (
weights[i],
biases[i],
self._get_shared_proj(i),
)
tail_logits_i = self._compute_logits(inputs_i, weight_i, bias_i, proj_i)
tail_probs_i = F.log_softmax(tail_logits_i, dim=1)
cluster_probs_i = self.cutoffs[0] + i - 1
if labels is not None:
tail_probs_i = tail_probs_i.gather(1, target_i[:, None]).squeeze(1)
probs_i = head_probs_i[:, cluster_probs_i] + tail_probs_i
else:
probs_i = head_probs[:, cluster_probs_i, None] + tail_probs_i
output[:, cutoff_start:cutoff_end] = probs_i
if labels is not None:
if self.keep_order:
output.index_copy_(0, indexes_i, -probs_i)
else:
output[offset : offset + probs_i.size(0)].copy_(-probs_i)
offset += probs_i.size(0)
return output
|
archai/archai/discrete_search/search_spaces/nlp/transformer_flex/models/mem_transformer_utils/projected_adaptive_log_softmax.py/0
|
{
"file_path": "archai/archai/discrete_search/search_spaces/nlp/transformer_flex/models/mem_transformer_utils/projected_adaptive_log_softmax.py",
"repo_id": "archai",
"token_count": 5037
}
| 305 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Optional
from onnx import load_model
from onnxruntime.transformers.onnx_model_gpt2 import Gpt2OnnxModel
from onnxruntime.transformers.optimizer import optimize_by_onnxruntime
from archai.common.file_utils import create_file_name_identifier
from archai.common.ordered_dict_logger import OrderedDictLogger
from archai.onnx.config_utils.onnx_config_base import OnnxConfig
from archai.onnx.optimization_utils.fusion_options import FusionOptions
logger = OrderedDictLogger(source=__name__)
AVAILABLE_ONNX_MODELS = {"gpt2": Gpt2OnnxModel, "gpt2-flex": Gpt2OnnxModel}
def optimize_onnx(
onnx_model_path: str,
onnx_config: OnnxConfig,
use_gpu: Optional[bool] = False,
opt_level: Optional[int] = 1,
only_ort: Optional[bool] = False,
float16: Optional[bool] = False,
input_int32: Optional[bool] = False,
) -> str:
"""Optimize an ONNX model using a combination of standard ORT-based optimization
and additional transformer-based optimization.
Args:
onnx_model_path: Path to the ONNX model to be optimized.
onnx_config: ONNX configuration of model to be optimized.
use_gpu: Whether to use GPU during optimization.
opt_level: Level of optimization.
only_ort: Whether to only apply ORT optimization.
float16: Whether to use graph with float16.
input_int32: Whether to use inputs with int32.
Returns:
Path to the optimized ONNX model.
"""
logger.info(f"Optimizing model: {onnx_model_path}")
assert opt_level in [0, 1, 2, 99]
ort_model_path = onnx_model_path
# Applies standard ORT-based optimization
if opt_level > 0:
disabled_optimizers = []
if opt_level > 1:
# Disables some optimizers that might influence shape inference/attention fusion
if not only_ort:
disabled_optimizers = [
"MatMulScaleFusion",
"MatMulAddFusion",
"SimplifiedLayerNormFusion",
"GemmActivationFusion",
"BiasSoftmaxFusion",
]
# Performs the standard ORT optimization
ort_model_path = create_file_name_identifier(onnx_model_path, "-opt")
optimize_by_onnxruntime(
onnx_model_path,
use_gpu=use_gpu,
optimized_model_path=ort_model_path,
opt_level=opt_level,
disabled_optimizers=disabled_optimizers,
)
if not only_ort:
model_type = onnx_config.config.model_type
available_models = list(AVAILABLE_ONNX_MODELS.keys())
assert model_type in available_models, f"`model_type`: {model_type} is not supported for `only_ort=False`."
# Applies additional transformer-based optimization
if onnx_config.is_ort_graph_optimizable:
ort_model = load_model(ort_model_path)
ort_model_path = create_file_name_identifier(onnx_model_path, "-opt")
onnx_opt_model = AVAILABLE_ONNX_MODELS[model_type]
options = FusionOptions(model_type)
optimizer = onnx_opt_model(ort_model, *onnx_config.ort_graph_optimizer_args)
optimizer.optimize(options)
optimizer.topological_sort()
if float16:
ort_model_path = create_file_name_identifier(ort_model_path, "-fp16")
optimizer.convert_float_to_float16(keep_io_types=True)
if input_int32:
optimizer.change_graph_inputs_to_int32()
optimizer.save_model_to_file(ort_model_path)
return ort_model_path
|
archai/archai/onnx/optimization.py/0
|
{
"file_path": "archai/archai/onnx/optimization.py",
"repo_id": "archai",
"token_count": 1615
}
| 306 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
from typing import Dict, List
import matplotlib.pyplot as plt
import numpy as np
import torch
from overrides import overrides
from torch import nn
from archai.common.common import get_conf, get_expdir
from archai.common.ordered_dict_logger import get_global_logger
from archai.supergraph.algos.divnas.analyse_activations import compute_brute_force_sol
from archai.supergraph.algos.divnas.divnas_cell import Divnas_Cell
from archai.supergraph.algos.divnas.divop import DivOp
from archai.supergraph.datasets.data import get_data
from archai.supergraph.nas.cell import Cell
from archai.supergraph.nas.finalizers import Finalizers
from archai.supergraph.nas.model import Model
from archai.supergraph.nas.model_desc import CellDesc, EdgeDesc, ModelDesc, NodeDesc
from archai.supergraph.nas.operations import Zero
from archai.supergraph.utils.heatmap import heatmap
logger = get_global_logger()
class DivnasRankFinalizers(Finalizers):
@overrides
def finalize_model(self, model: Model, to_cpu=True, restore_device=True) -> ModelDesc:
logger.pushd('finalize')
# get config and train data loader
conf = get_conf()
conf_loader = conf['nas']['search']['loader']
data_loaders = get_data(conf_loader)
assert data_loaders.train_dl is not None
# wrap all cells in the model
self._divnas_cells: Dict[Cell, Divnas_Cell] = {}
for _, cell in enumerate(model.cells):
divnas_cell = Divnas_Cell(cell)
self._divnas_cells[cell] = divnas_cell
# go through all edges in the DAG and if they are of divop
# type then set them to collect activations
sigma = conf['nas']['search']['divnas']['sigma']
for _, dcell in enumerate(self._divnas_cells.values()):
dcell.collect_activations(DivOp, sigma)
# now we need to run one evaluation epoch to collect activations
# we do it on cpu otherwise we might run into memory issues
# later we can redo the whole logic in pytorch itself
# at the end of this each node in a cell will have the covariance
# matrix of all incoming edges' ops
model = model.cpu()
model.eval()
with torch.no_grad():
for _ in range(1):
for _, (x, _) in enumerate(data_loaders.train_dl):
_, _ = model(x), None
# update the node covariances in all cells
for dcell in self._divnas_cells.values():
dcell.update_covs()
logger.popd()
return super().finalize_model(model, to_cpu, restore_device)
@overrides
def finalize_cell(self, cell:Cell, cell_index:int,
model_desc:ModelDesc, *args, **kwargs)->CellDesc:
# first finalize each node, we will need to recreate node desc with final version
logger.info(f'cell id {cell.desc.id}')
max_final_edges = model_desc.max_final_edges
node_descs: List[NodeDesc] = []
dcell = self._divnas_cells[cell]
assert len(cell.dag) == len(list(dcell.node_covs.values()))
for i, node in enumerate(cell.dag):
node_cov = dcell.node_covs[id(node)]
logger.info(f'node {i}')
node_desc = self.finalize_node(node, i, cell.desc.nodes()[i],max_final_edges, node_cov, cell, i)
node_descs.append(node_desc)
# (optional) clear out all activation collection information
dcell.clear_collect_activations()
desc = cell.desc
finalized = CellDesc(
id = desc.id, cell_type=desc.cell_type, conf_cell=desc.conf_cell,
stems=[cell.s0_op.finalize()[0], cell.s1_op.finalize()[0]],
stem_shapes=desc.stem_shapes,
nodes = node_descs, node_shapes=desc.node_shapes,
post_op=cell.post_op.finalize()[0],
out_shape=desc.out_shape,
trainables_from = desc.trainables_from
)
return finalized
@overrides
def finalize_node(self, node:nn.ModuleList, node_index:int,
node_desc:NodeDesc, max_final_edges:int,
cov:np.array, cell: Cell, node_id: int,
*args, **kwargs)->NodeDesc:
# node is a list of edges
assert len(node) >= max_final_edges
# covariance matrix shape must be square 2-D
assert len(cov.shape) == 2
assert cov.shape[0] == cov.shape[1]
# the number of primitive operators has to be greater
# than equal to the maximum number of final edges
# allowed
assert cov.shape[0] >= max_final_edges
# get the order and alpha of all ops other than 'none'
in_ops = [(edge,op,alpha,i) for i, edge in enumerate(node) \
for op, alpha in edge._op.ops()
if not isinstance(op, Zero)]
assert len(in_ops) >= max_final_edges
# order all the ops by alpha
in_ops_sorted = sorted(in_ops, key=lambda in_op:in_op[2], reverse=True)
# keep under consideration top half of the ops
num_to_keep = max(max_final_edges, len(in_ops_sorted)//2)
top_ops = in_ops_sorted[:num_to_keep]
# get the covariance submatrix of the top ops only
cov_inds = []
for edge, op, alpha, edge_num in top_ops:
ind = self._divnas_cells[cell].node_num_to_node_op_to_cov_ind[node_id][op]
cov_inds.append(ind)
cov_top_ops = cov[np.ix_(cov_inds, cov_inds)]
assert len(cov_inds) == len(top_ops)
assert len(top_ops) >= max_final_edges
assert cov_top_ops.shape[0] == cov_top_ops.shape[1]
assert len(cov_top_ops.shape) == 2
# run brute force set selection algorithm
# only on the top ops
max_subset, max_mi = compute_brute_force_sol(cov_top_ops, max_final_edges)
# note that elements of max_subset are indices into top_ops only
selected_edges = []
for ind in max_subset:
edge, op, alpha, edge_num = top_ops[ind]
op_desc, _ = op.finalize()
new_edge = EdgeDesc(op_desc, edge.input_ids)
logger.info(f'selected edge: {edge_num}, op: {op_desc.name}')
selected_edges.append(new_edge)
# save diagnostic information to disk
expdir = get_expdir()
heatmap(cov_top_ops, fmt='.1g', cmap='coolwarm')
savename = os.path.join(
expdir, f'cell_{cell.desc.id}_node_{node_id}_cov.png')
plt.savefig(savename)
logger.info('')
return NodeDesc(selected_edges, node_desc.conv_params)
|
archai/archai/supergraph/algos/divnas/divnas_rank_finalizer.py/0
|
{
"file_path": "archai/archai/supergraph/algos/divnas/divnas_rank_finalizer.py",
"repo_id": "archai",
"token_count": 2988
}
| 307 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from overrides import overrides
from archai.common.config import Config
from archai.supergraph.algos.petridish.petridish_op import PetridishOp, TempIdentityOp
from archai.supergraph.algos.random.random_model_desc_builder import (
RandomModelDescBuilder,
)
from archai.supergraph.nas.operations import Op
class PetridishModelBuilder(RandomModelDescBuilder):
@overrides
def pre_build(self, conf_model_desc:Config)->None:
super().pre_build(conf_model_desc)
Op.register_op('petridish_normal_op',
lambda op_desc, arch_params, affine:
PetridishOp(op_desc, arch_params, False, affine))
Op.register_op('petridish_reduction_op',
lambda op_desc, arch_params, affine:
PetridishOp(op_desc, arch_params, True, affine))
Op.register_op('temp_identity_op',
lambda op_desc, arch_params, affine:
TempIdentityOp(op_desc))
# @overrides
# def build_nodes(self, stem_shapes:TensorShapes, conf_cell:Config,
# cell_index:int, cell_type:CellType, node_count:int,
# in_shape:TensorShape, out_shape:TensorShape) \
# ->Tuple[TensorShapes, List[NodeDesc]]:
# # For petridish we add one node with identity to s1.
# # This will be our seed model to start with.
# # Later in PetridishSearcher, we will add one more node in parent after each sampling.
# assert in_shape[0]==out_shape[0]
# reduction = (cell_type==CellType.Reduction)
# # channels for conv filters
# conv_params = ConvMacroParams(in_shape[0], out_shape[0])
# # identity op to connect S1 to the node
# op_desc = OpDesc('skip_connect',
# params={'conv': conv_params,
# 'stride': 2 if reduction else 1},
# in_len=1, trainables=None, children=None)
# edge = EdgeDesc(op_desc, input_ids=[1])
# new_node = NodeDesc(edges=[edge], conv_params=conv_params)
# nodes = [new_node]
# # each node has same out channels as in channels
# out_shapes = [copy.deepcopy(out_shape) for _ in nodes]
# return out_shapes, nodes
|
archai/archai/supergraph/algos/petridish/petridish_model_desc_builder.py/0
|
{
"file_path": "archai/archai/supergraph/algos/petridish/petridish_model_desc_builder.py",
"repo_id": "archai",
"token_count": 1031
}
| 308 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from abc import abstractmethod
from typing import Dict, Optional, Tuple, Union
from overrides import EnforceOverrides
from torch.utils.data.dataset import Dataset
from archai.common.config import Config
TrainTestDatasets = Tuple[Optional[Dataset], Optional[Dataset]]
ImgSize = Optional[Union[int, Tuple[int, int]]]
class DatasetProvider(EnforceOverrides):
def __init__(self, conf_dataset:Config):
super().__init__()
pass
@abstractmethod
def get_datasets(self, load_train:bool, load_test:bool,
transform_train, transform_test)->TrainTestDatasets:
pass
@abstractmethod
def get_transforms(self, img_size:ImgSize)->tuple: # of transforms
pass
DatasetProviderType = type(DatasetProvider)
_providers: Dict[str, DatasetProviderType] = {}
def register_dataset_provider(name:str, class_type:DatasetProviderType)->None:
global _providers
if name in _providers:
raise KeyError(f'dataset provider with name {name} has already been registered')
_providers[name] = class_type
def get_provider_type(name:str)->DatasetProviderType:
global _providers
return _providers[name]
|
archai/archai/supergraph/datasets/dataset_provider.py/0
|
{
"file_path": "archai/archai/supergraph/datasets/dataset_provider.py",
"repo_id": "archai",
"token_count": 457
}
| 309 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import torchvision
from overrides import overrides
from torchvision.transforms import transforms
from archai.common import utils
from archai.common.config import Config
from archai.supergraph.datasets.dataset_provider import (
DatasetProvider,
ImgSize,
TrainTestDatasets,
register_dataset_provider,
)
class Sport8Provider(DatasetProvider):
def __init__(self, conf_dataset:Config):
super().__init__(conf_dataset)
self._dataroot = utils.full_path(conf_dataset['dataroot'])
@overrides
def get_datasets(self, load_train:bool, load_test:bool,
transform_train, transform_test)->TrainTestDatasets:
trainset, testset = None, None
if load_train:
trainpath = os.path.join(self._dataroot, 'sport8', 'train')
trainset = torchvision.datasets.ImageFolder(trainpath, transform=transform_train)
if load_test:
testpath = os.path.join(self._dataroot, 'sport8', 'test')
testset = torchvision.datasets.ImageFolder(testpath, transform=transform_test)
return trainset, testset
@overrides
def get_transforms(self, img_size:ImgSize)->tuple:
# MEAN, STD computed for sport8
MEAN = [0.4734, 0.4856, 0.4526]
STD = [0.2478, 0.2444, 0.2667]
# transformations match that in
# https://github.com/antoyang/NAS-Benchmark/blob/master/DARTS/preproc.py
train_transf = [
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.2)
]
test_transf = [transforms.Resize(256), transforms.CenterCrop(224)]
normalize = [
transforms.ToTensor(),
transforms.Normalize(MEAN, STD)
]
train_transform = transforms.Compose(train_transf + normalize)
test_transform = transforms.Compose(test_transf + normalize)
return train_transform, test_transform
register_dataset_provider('sport8', Sport8Provider)
|
archai/archai/supergraph/datasets/providers/sport8_provider.py/0
|
{
"file_path": "archai/archai/supergraph/datasets/providers/sport8_provider.py",
"repo_id": "archai",
"token_count": 969
}
| 310 |
# -*- coding: utf-8 -*-
import math
import torch.nn as nn
import torch.nn.functional as F
from archai.supergraph.models.shakeshake.shakeshake import ShakeShake, Shortcut
class ShakeBottleNeck(nn.Module):
def __init__(self, in_ch, mid_ch, out_ch, cardinary, stride=1):
super(ShakeBottleNeck, self).__init__()
self.equal_io = in_ch == out_ch
self.shortcut = None if self.equal_io else Shortcut(in_ch, out_ch, stride=stride)
self.branch1 = self._make_branch(in_ch, mid_ch, out_ch, cardinary, stride)
self.branch2 = self._make_branch(in_ch, mid_ch, out_ch, cardinary, stride)
def forward(self, x):
h1 = self.branch1(x)
h2 = self.branch2(x)
h = ShakeShake.apply(h1, h2, self.training)
h0 = x if self.equal_io else self.shortcut(x)
return h + h0
def _make_branch(self, in_ch, mid_ch, out_ch, cardinary, stride=1):
return nn.Sequential(
nn.Conv2d(in_ch, mid_ch, 1, padding=0, bias=False),
nn.BatchNorm2d(mid_ch),
nn.ReLU(inplace=False),
nn.Conv2d(mid_ch, mid_ch, 3, padding=1, stride=stride, groups=cardinary, bias=False),
nn.BatchNorm2d(mid_ch),
nn.ReLU(inplace=False),
nn.Conv2d(mid_ch, out_ch, 1, padding=0, bias=False),
nn.BatchNorm2d(out_ch))
class ShakeResNeXt(nn.Module):
def __init__(self, depth, w_base, cardinary, label):
super(ShakeResNeXt, self).__init__()
n_units = (depth - 2) // 9
n_chs = [64, 128, 256, 1024]
self.n_chs = n_chs
self.in_ch = n_chs[0]
self.c_in = nn.Conv2d(3, n_chs[0], 3, padding=1)
self.layer1 = self._make_layer(n_units, n_chs[0], w_base, cardinary)
self.layer2 = self._make_layer(n_units, n_chs[1], w_base, cardinary, 2)
self.layer3 = self._make_layer(n_units, n_chs[2], w_base, cardinary, 2)
self.fc_out = nn.Linear(n_chs[3], label)
# Initialize paramters
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
h = self.c_in(x)
h = self.layer1(h)
h = self.layer2(h)
h = self.layer3(h)
h = F.relu(h)
h = F.avg_pool2d(h, 8)
h = h.view(-1, self.n_chs[3])
h = self.fc_out(h)
return h
def _make_layer(self, n_units, n_ch, w_base, cardinary, stride=1):
layers = []
mid_ch, out_ch = n_ch * (w_base // 64) * cardinary, n_ch * 4
for i in range(n_units):
layers.append(ShakeBottleNeck(self.in_ch, mid_ch, out_ch, cardinary, stride=stride))
self.in_ch, stride = out_ch, 1
return nn.Sequential(*layers)
|
archai/archai/supergraph/models/shakeshake/shake_resnext.py/0
|
{
"file_path": "archai/archai/supergraph/models/shakeshake/shake_resnext.py",
"repo_id": "archai",
"token_count": 1608
}
| 311 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Optional
import tensorwatch as tw
from archai.common.config import Config
from archai.common.ordered_dict_logger import get_global_logger
from archai.supergraph.nas.model import Model
from archai.supergraph.utils.checkpoint import CheckPoint
logger = get_global_logger()
def checkpoint_empty(checkpoint:Optional[CheckPoint])->bool:
return checkpoint is None or checkpoint.is_empty()
def create_checkpoint(conf_checkpoint:Config, resume:bool)->Optional[CheckPoint]:
"""Creates checkpoint given its config. If resume is True then attempt is
made to load existing checkpoint otherwise an empty checkpoint is created.
"""
checkpoint = CheckPoint(conf_checkpoint, resume) \
if conf_checkpoint is not None else None
logger.info({'checkpoint_empty': checkpoint_empty(checkpoint),
'conf_checkpoint_none': conf_checkpoint is None, 'resume': resume,
'checkpoint_path': None if checkpoint is None else checkpoint.filepath})
return checkpoint
def get_model_stats(model:Model,
input_tensor_shape=[1,3,32,32], clone_model=True)->tw.ModelStats:
# model stats is doing some hooks so do it last
model_stats = tw.ModelStats(model, input_tensor_shape,
clone_model=clone_model)
return model_stats
|
archai/archai/supergraph/nas/nas_utils.py/0
|
{
"file_path": "archai/archai/supergraph/nas/nas_utils.py",
"repo_id": "archai",
"token_count": 489
}
| 312 |
# Copyright (c) @IssamLaradji.
# https://github.com/IssamLaradji/sls/blob/master/src/optimizers/others/cocob.py
import math
from typing import Any, Callable, Dict, Iterable, Optional, Union
import torch
from torch import optim
class CocobBackprop(optim.Optimizer):
"""Coin Betting optimizer with Backpropagation.
It has been proposed in `Training Deep Networks without Learning Rates
Through Coin Betting`.
Reference:
https://arxiv.org/pdf/1705.07795.pdf
"""
def __init__(
self, params: Union[Iterable, Dict[str, Any]], alpha: Optional[float] = 100.0, eps: Optional[float] = 1e-8
) -> None:
"""Initialize the optimizer.
Args:
params: Iterable of parameters to optimize or dicts defining
parameter groups.
alpha: Positive number to adjust betting fraction. Theoretical convergence
gauarantee does not depend on choice of `alpha`.
eps: Positive initial wealth for betting algorithm. Theoretical convergence
gauarantee does not depend on choice of `eps`.
"""
self.alpha = alpha
self.eps = eps
defaults = dict(alpha=alpha, eps=eps)
super(CocobBackprop, self).__init__(params, defaults)
def step(self, closure: Optional[Callable] = None) -> torch.FloatTensor:
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for param in group["params"]:
if param.grad is None:
continue
grad = param.grad.data
state = self.state[param]
param_shape = param.shape
# Better bets for -ve gradient
neg_grad = -grad
if len(state) == 0:
# Happens only once at the begining of optimization start
# Set initial parameter weights and zero reward
state["initial_weight"] = param.data
state["reward"] = param.new_zeros(param_shape)
# Don't bet anything for first round
state["bet"] = param.new_zeros(param_shape)
# Initialize internal states useful for computing betting fraction
state["neg_grads_sum"] = param.new_zeros(param_shape)
state["grads_abs_sum"] = param.new_zeros(param_shape)
state["max_observed_scale"] = self.eps * param.new_ones(param_shape)
# load states in variables
initial_weight = state["initial_weight"]
reward = state["reward"]
bet = state["bet"]
neg_grads_sum = state["neg_grads_sum"]
grads_abs_sum = state["grads_abs_sum"]
max_observed_scale = state["max_observed_scale"]
# Update internal states useful for computing betting fraction
max_observed_scale = torch.max(max_observed_scale, torch.abs(grad))
grads_abs_sum += torch.abs(grad)
neg_grads_sum += neg_grad
# Based on how much the Better bets on -ve gradient prediction,
# check how much the Better won (-ve if lost)
win_amount = bet * neg_grad
# Update better's reward. Negative reward is not allowed.
reward = torch.max(reward + win_amount, torch.zeros_like(reward))
# Better decides the bet fraction based on so-far observations
bet_fraction = neg_grads_sum / (
max_observed_scale
* (torch.max(grads_abs_sum + max_observed_scale, self.alpha * max_observed_scale))
)
# Better makes the bet according to decided betting fraction.
bet = bet_fraction * (max_observed_scale + reward)
# Set parameter weights
param.data = initial_weight + bet
# save state back in memory
state["neg_grads_sum"] = neg_grads_sum
state["grads_abs_sum"] = grads_abs_sum
state["max_observed_scale"] = max_observed_scale
state["reward"] = reward
state["bet"] = bet
# For Cocob-Backprop bet_fraction need not be maintained in state. Only kept for visualization.
state["bet_fraction"] = bet_fraction
return loss
class CocobOns(optim.Optimizer):
"""Coin Betting optimizer with Online Learning.
It has been proposed in `Black-Box Reductions for Parameter-free
Online Learning in Banach Spaces`.
Reference:
https://arxiv.org/pdf/1705.07795.pdf
"""
def __init__(self, params: Union[Iterable, Dict[str, Any]], eps: Optional[float] = 1e-8):
"""Initialize the optimizer.
Args:
params: Iterable of parameters to optimize or dicts defining
parameter groups.
eps: Positive initial wealth for betting algorithm. Theoretical convergence
gauarantee does not depend on choice of `eps`.
"""
self.eps = eps
defaults = dict(eps=eps)
super(CocobOns, self).__init__(params, defaults)
def step(self, closure: Optional[Callable] = None) -> torch.FloatTensor:
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for param in group["params"]:
if param.grad is None:
continue
grad = param.grad.data
state = self.state[param]
param_shape = param.data.shape
# Clip gradients to be in (-1, 1)
grad.clamp_(-1.0, 1.0)
# Better bets for -ve gradient
neg_grad = -grad
if len(state) == 0:
# Happens only once at the begining of optimization start
# Set initial parameter weights and zero reward
state["initial_weight"] = param.data
state["wealth"] = self.eps * param.new_ones(param_shape)
# Don't bet anything for first round
state["bet_fraction"] = param.new_zeros(param_shape)
state["bet"] = param.new_zeros(param_shape)
# Initialize internal states useful for computing betting fraction
state["z_square_sum"] = param.new_zeros(param_shape)
# load states in memory
wealth = state["wealth"]
bet_fraction = state["bet_fraction"]
z_square_sum = state["z_square_sum"]
initial_weight = state["initial_weight"]
bet = state["bet"]
# Based on how much the Better bets on -ve gradient prediction,
# check how much the Better won (-ve if lost)
win_amount = bet * neg_grad
# Update better's wealth based on what he won / lost.
wealth = wealth + win_amount
# Better decides the bet fraction based on so-far observations
# z, A variable notations from Algo 1 in paper)
z = grad / (1 - (bet_fraction * grad))
z_square_sum = z_square_sum + (z * z)
A = 1 + z_square_sum
bet_fraction = bet_fraction - (2 / (2 - math.log(3))) * (z / A)
bet_fraction.clamp_(-0.5, 0.5)
# Better makes the bet according to decided betting fraction.
bet = bet_fraction * wealth
# Set parameter weights
param.data = initial_weight + bet
# save state back in memory
state["bet_fraction"] = bet_fraction
state["wealth"] = wealth
state["z_square_sum"] = z_square_sum
state["bet"] = bet
return loss
|
archai/archai/trainers/coin_betting_optimizer.py/0
|
{
"file_path": "archai/archai/trainers/coin_betting_optimizer.py",
"repo_id": "archai",
"token_count": 3794
}
| 313 |
__include__: "darts.yaml" # defaults are loaded from this file
# XNAS's parameters
nas:
search:
xnas:
to_evict: True
loader:
train_batch: 64
trainer:
grad_clip: 1.0
epochs: 50
optimizer:
type: "sgd"
lr: 0.025
decay: 0.0
momentum: 0.0
nesterov: False
warmup: null
# NOTE: XNAS uses 1500 epochs and lots of tricks like scheduled drop-path, label smoothing
# AutoAugment and weight decay of 3.10^-4 and sgd optimizer with nesterov-momentum of 0.9.
# We are deliberately choosing not to implement those features in the interest of keeping
# comparisons to other algorithms fair. So all other algorithms have the same final training procedure.
|
archai/confs/algos/xnas.yaml/0
|
{
"file_path": "archai/confs/algos/xnas.yaml",
"repo_id": "archai",
"token_count": 281
}
| 314 |
Azure
=====
This section contains examples of using Archai on Azure.
.. toctree::
:maxdepth: 2
Notebooks <azure/notebooks>
|
archai/docs/advanced_guide/cloud/azure.rst/0
|
{
"file_path": "archai/docs/advanced_guide/cloud/azure.rst",
"repo_id": "archai",
"token_count": 45
}
| 315 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from archai.datasets.cv.mnist_dataset_provider import MnistDatasetProvider
import torch
import pytorch_lightning as pl
class MNistDataModule(pl.LightningDataModule):
def __init__(self, path):
super().__init__()
self.root = path
def prepare_data(self):
self.dataset_provider = MnistDatasetProvider(root=self.root)
self.tr_data = self.dataset_provider.get_train_dataset()
self.val_data = self.dataset_provider.get_val_dataset()
self.input_shape = self.tr_data.data[0].shape
def prepare_data_per_node(self):
self.prepare_data()
def train_dataloader(self):
return torch.utils.data.DataLoader(self.tr_data, batch_size=16, shuffle=True, num_workers=4)
def val_dataloader(self):
return torch.utils.data.DataLoader(self.val_data, batch_size=16, shuffle=False, num_workers=4)
def test_dataloader(self):
# MNIST doesn't have a test dataset, so just reuse the validation dataset.
return torch.utils.data.DataLoader(self.val_data, batch_size=16, shuffle=False, num_workers=4)
|
archai/docs/advanced_guide/cloud/azure/notebooks/multi_node_search/scripts/mnist_data_module.py/0
|
{
"file_path": "archai/docs/advanced_guide/cloud/azure/notebooks/multi_node_search/scripts/mnist_data_module.py",
"repo_id": "archai",
"token_count": 457
}
| 316 |
<jupyter_start><jupyter_text>Task: Text GenerationIn this Notebook we run Archai's [Text Generation](https://github.com/microsoft/archai/tree/main/tasks/text_generation) task on Azure Machine Learning.We'll use the following components:1. [Search](./src/search.yaml) - Run Lightweight Transformer Search (LTS) to discover architectures that perform well with regards to non-embedding parameters, latency, and memory2. [Train](./src/train.yaml) - Train a chosen architecture3. [Generate text](./src/generate_text.yaml) - Given a trained architecture and a prompt, outputs the generated textThe components are defined via Yaml (more info [here](https://learn.microsoft.com/en-us/azure/machine-learning/how-to-create-component-pipeline-pythondefine-component-via-yaml)) which will call the corresponding Python scripts.Note: Our goal is to show how to create and run jobs without spending too much computing resources. Therefore, our goal is not to train a good model -- for this purpose please refer to the original task. Prerequisites- Python 3.7 or later- An Azure subscription- An Azure Resource Group- An Azure Machine Learning [Workspace](https://learn.microsoft.com/en-us/azure/machine-learning/quickstart-create-resourcescreate-the-workspace)This notebook also assumes you have a python environment setup using `pip install -e .[aml]` in your Archai repository root<jupyter_code>import os
from pathlib import Path
from IPython.display import display, Image
from IPython.core.display import HTML
from azure.ai.ml import load_job
import archai.common.azureml_helper as aml_helper
import archai.common.notebook_helper as nb_helper<jupyter_output><empty_output><jupyter_text>Get a handle to the workspaceWe load the workspace from a workspace [configuration file](https://learn.microsoft.com/en-us/azure/machine-learning/how-to-configure-environmentlocal-and-dsvm-only-create-a-workspace-configuration-file).<jupyter_code>ml_client = aml_helper.get_aml_client_from_file("../.azureml/config.json")
print(f'Using workspace: {ml_client.workspace_name} in resource group: {ml_client.resource_group_name}')<jupyter_output><empty_output><jupyter_text>Create CPU and GPU compute clustersWe provision a Linux [compute cluster](https://learn.microsoft.com/en-us/azure/machine-learning/how-to-create-attach-compute-cluster?tabs=python) for the NAS job in this Notebook. See the [full list](https://azure.microsoft.com/en-ca/pricing/details/machine-learning/) on VM sizes and prices.We also provision a GPU compute cluster, to train the architectures and generate text.<jupyter_code>cpu_compute_name = "nas-cpu-cluster-D14-v2"
cpu_compute_cluster = aml_helper.create_compute_cluster(ml_client, cpu_compute_name, size="Standard_D14_v2")
gpu_compute_name = "nas-gpu-cluster-NC6"
gpu_compute_cluster = aml_helper.create_compute_cluster(ml_client, gpu_compute_name, size="Standard_NC6")<jupyter_output>You already have a cluster named nas-cpu-cluster-D14-v2, we'll reuse it as is.
You already have a cluster named nas-gpu-cluster-NC6, we'll reuse it as is.<jupyter_text>Create an environment based on a YAML fileAzure Machine Learning maintains a set of CPU and GPU Ubuntu Linux-based base images with common system dependencies. For the set of base images and their corresponding Dockerfiles, see the [AzureML Containers](https://github.com/Azure/AzureML-Containers) repo.<jupyter_code>archai_job_env = aml_helper.create_environment_from_file(ml_client,
image="mcr.microsoft.com/azureml/openmpi3.1.2-ubuntu18.04:latest",
conda_file="conda.yaml",
version="0.0.1")<jupyter_output>Environment with name aml-archai is registered to workspace, the environment version is 0.0.1<jupyter_text>Job 1: NAS (Searching for Pareto-optimal Architectures) Load the search job from a YAML file and run it.<jupyter_code>search_job = load_job(source=os.path.join("src", "search.yaml"))
s_job = ml_client.create_or_update(search_job)<jupyter_output>[32mUploading src (0.01 MBs): 100%|##########| 10177/10177 [00:00<00:00, 10489.91it/s]
[39m<jupyter_text>Open the job overview on Azure ML Studio in your web browser (this works when you are running this notebook in VS code).<jupyter_code>import webbrowser
webbrowser.open(s_job.services["Studio"].endpoint)
job_name = s_job.name
print(f'Started job: {job_name}')<jupyter_output>Started job: salmon_plum_t9hynvf120<jupyter_text>Download the job's output.<jupyter_code>output_name = "output_dir"
download_path = "output"
aml_helper.download_job_output(ml_client, job_name=s_job.name, output_name=output_name, download_path=download_path)
downloaded_folder = Path(download_path) / "named-outputs" / output_name<jupyter_output><empty_output><jupyter_text>Show the Pareto Frontiers.<jupyter_code>param_vs_latency_img = Image(filename=downloaded_folder / "pareto_non_embedding_params_vs_onnx_latency.png")
display(param_vs_latency_img)
param_vs_memory_img = Image(filename=downloaded_folder / "pareto_non_embedding_params_vs_onnx_memory.png")
display(param_vs_memory_img)
latency_vs_memory_img = Image(filename=downloaded_folder / "pareto_onnx_latency_vs_onnx_memory.png")
display(latency_vs_memory_img)<jupyter_output><empty_output><jupyter_text>Show the search state of the last iteration.<jupyter_code>df = nb_helper.get_search_csv(downloaded_folder)
df = df[['archid', 'non_embedding_params', 'onnx_latency', 'onnx_memory', 'is_pareto']]
df[(df['onnx_latency'] < 0.9) & (df['is_pareto'] == True)]<jupyter_output><empty_output><jupyter_text>Job 2: Train (Train a Pareto architecture from Transformer-Flex.) Pick an architecture id (archid) from the CSV file to perform full training.<jupyter_code>archid = "<arch-id>"
print(f"Selected architecture: {archid}")
arch_path = nb_helper.get_arch_abs_path(archid=archid, downloaded_folder=downloaded_folder)<jupyter_output>Selected architecture: gpt2_df106863e1a0c9c140036b05661aa88e92f07701<jupyter_text>Load the training job from a YAML file, set its input, and run it. With the GPU cluster we created it should take around 3 hours.<jupyter_code>train_job = load_job(source=os.path.join("src", "train.yaml"))
train_job.inputs.arch_config_path.path = arch_path
t_job = ml_client.create_or_update(train_job)<jupyter_output><empty_output><jupyter_text>Open the job overview on Azure ML Studio in your web browser (this works when you are running this notebook in VS code).<jupyter_code>import webbrowser
webbrowser.open(t_job.services["Studio"].endpoint)
job_name = t_job.name
print(f'Started Job: {job_name}')<jupyter_output>Started Job: willing_tree_3b22csbdtg<jupyter_text>Job 3: Generating text via prompt Load the generate text job from a YAML file, set the inputs, and run it.<jupyter_code>train_job = ml_client.jobs.get(t_job.name)
path = f"azureml://subscriptions/{ml_client.subscription_id}/resourcegroups/{ml_client.resource_group_name}/" \
f"workspaces/{ml_client.workspace_name}/datastores/workspaceblobstore/paths/azureml/{train_job.name}/output_dir/"
if train_job and train_job.status == "Completed":
gen_job = load_job(source=os.path.join("src", "generate_text.yaml"))
gen_job.inputs.pre_trained_model_path.path = path
gen_job.inputs.prompt = "Machine Learning"
g_job = ml_client.create_or_update(gen_job)
else:
print(f"Job {train_job.name} is not completed yet")<jupyter_output><empty_output><jupyter_text>Open the job overview on Azure ML Studio in your web browser (this works when you are running this notebook in VS code).<jupyter_code>import webbrowser
webbrowser.open(g_job.services["Studio"].endpoint)
job_name = g_job.name
print(f'Started Job: {job_name}')<jupyter_output>Started Job: orange_bee_dk3c1xm55z<jupyter_text>Download and show the generated text.<jupyter_code>output_name = "output_path"
download_path = "generated_text"
aml_helper.download_job_output(ml_client, job_name=g_job.name, output_name=output_name, download_path=download_path)
downloaded_file = Path(download_path) / "named-outputs" / output_name / output_name
with open(downloaded_file, "r") as f:
print(f.read())<jupyter_output>Machine Learning to continue to attend the main park in the first series. The team was considered to be used to be shot in the future. The series's longest @-@ hour of the main characters and the highest @-@ time, a series, to be used in the final series. It has been played by the series of the American, which was given by the United States and was replaced by the United States during the North America.
= = Plot summary =
The episode received by many occasions. It was a three years, and the series of the rest of the United States for the family of the previous episode,
|
archai/docs/advanced_guide/cloud/azure/notebooks/text_generation/text_generation.ipynb/0
|
{
"file_path": "archai/docs/advanced_guide/cloud/azure/notebooks/text_generation/text_generation.ipynb",
"repo_id": "archai",
"token_count": 3035
}
| 317 |
Notebooks
=========
These notebooks are designed to help you understand the basics and gain hands-on experience in working with Archai.
.. toctree::
:maxdepth: 2
API <notebooks/api>
Discrete Search <notebooks/discrete_search>
Computer Vision <notebooks/cv>
Natural Language Processing <notebooks/nlp>
|
archai/docs/getting_started/notebooks.rst/0
|
{
"file_path": "archai/docs/getting_started/notebooks.rst",
"repo_id": "archai",
"token_count": 94
}
| 318 |
<jupyter_start><jupyter_text>Discrete Search Spaces<jupyter_code>from typing import List, Optional
from overrides import overrides
import numpy as np
import torch
from torch import nn<jupyter_output><empty_output><jupyter_text>The `ArchaiModel` class The `ArchaiModel` class is a base class used to wrap all model objects. `ArchaiModel` also stores an architecture ID (`ArchaiModel.archid`) and optionally a metadata dictionary (`ArchaiModel.metadata`).<jupyter_code>from archai.discrete_search.api import ArchaiModel<jupyter_output><empty_output><jupyter_text>Let's first consider a simple PyTorch model<jupyter_code>class MyModel(nn.Module):
def __init__(self, nb_layers: int = 5, kernel_size: int = 3, hidden_dim: int = 32):
super().__init__()
self.nb_layers = nb_layers
self.kernel_size = kernel_size
self.hidden_dim = hidden_dim
layer_list = []
for i in range(nb_layers):
in_ch = (1 if i == 0 else hidden_dim)
layer_list += [
nn.Conv2d(in_ch, hidden_dim, kernel_size=kernel_size, padding=(kernel_size-1)//2),
nn.BatchNorm2d(hidden_dim),
nn.ReLU(),
]
layer_list += [
nn.AdaptiveAvgPool2d(output_size=(1, 1)),
nn.Conv2d(hidden_dim, 10, kernel_size=1)
]
self.model = nn.Sequential(*layer_list)
def forward(self, x):
return self.model(x).squeeze()
def get_archid(self):
return f'({self.nb_layers}, {self.kernel_size}, {self.hidden_dim})'
model_obj = MyModel(nb_layers=2, kernel_size=3, hidden_dim=16)<jupyter_output><empty_output><jupyter_text>We can now wrap a `MyModel` instance into an `ArchaiModel`:<jupyter_code>model = ArchaiModel(
arch=model_obj,
archid=f'L={model_obj.nb_layers}, K={model_obj.kernel_size}, H={model_obj.hidden_dim}',
metadata={'optional': {'metadata'}}
)<jupyter_output><empty_output><jupyter_text>Architecture ids (`archid`) are used to identify a unique model architecture. The contents of `archid` can be decided by the search space designer, one good approach is to hash the architecture definition into a string. However, to keep things simple, in this example we'll just use a simple string representing with the three available architecture parameters (L, K and H).<jupyter_code>model.archid
model.metadata
model.arch<jupyter_output><empty_output><jupyter_text>Building a Search Space Discrete search spaces in Archai are defined using the `DiscreteSearchSpace` abstract class:```pythonclass DiscreteSearchSpace(EnforceOverrides): @abstractmethod def random_sample(self) -> ArchaiModel: ... @abstractmethod def save_arch(self, model: ArchaiModel, path: str) -> None: ... @abstractmethod def load_arch(self, path: str) -> ArchaiModel: ... @abstractmethod def save_model_weights(self, model: ArchaiModel, path: str) -> None: ... @abstractmethod def load_model_weights(self, model: ArchaiModel, path: str) -> None: ...``` To turn `MyModel` into a search space, we need to override the `DiscreteSearchSpace` abstract base class:<jupyter_code>import json
from random import Random
from archai.discrete_search.api import DiscreteSearchSpace
class CNNSearchSpace(DiscreteSearchSpace):
def __init__(self, min_layers: int = 1, max_layers: int = 12,
kernel_list=(1, 3, 5, 7), hidden_list=(16, 32, 64, 128),
seed: int = 1):
self.min_layers = min_layers
self.max_layers = max_layers
self.kernel_list = kernel_list
self.hidden_list = hidden_list
self.rng = Random(seed)
def get_archid(self, model: MyModel) -> str:
return f'L={model.nb_layers}, K={model.kernel_size}, H={model.hidden_dim}'
@overrides
def random_sample(self) -> ArchaiModel:
# Randomly chooses architecture parameters
nb_layers = self.rng.randint(self.min_layers, self.max_layers)
kernel_size = self.rng.choice(self.kernel_list)
hidden_dim = self.rng.choice(self.hidden_list)
model = MyModel(nb_layers, kernel_size, hidden_dim)
# Wraps model into ArchaiModel
return ArchaiModel(arch=model, archid=self.get_archid(model))
@overrides
def save_arch(self, model: ArchaiModel, file: str):
with open(file, 'w') as fp:
json.dump({
'nb_layers': model.arch.nb_layers,
'kernel_size': model.arch.kernel_size,
'hidden_dim': model.arch.hidden_dim
}, fp)
@overrides
def load_arch(self, file: str):
config = json.load(open(file))
model = MyModel(**config)
return ArchaiModel(arch=model, archid=self.get_archid(model))
@overrides
def save_model_weights(self, model: ArchaiModel, file: str):
state_dict = model.arch.get_state_dict()
torch.save(state_dict, file)
@overrides
def load_model_weights(self, model: ArchaiModel, file: str):
model.arch.load_state_dict(torch.load(file))
ss = CNNSearchSpace(hidden_list=[32, 64, 128])<jupyter_output><empty_output><jupyter_text>Let's try sampling an architecture<jupyter_code>m = ss.random_sample()
m<jupyter_output><empty_output><jupyter_text>Saving an architecture<jupyter_code>ss.save_arch(m, 'arch.json')
open('arch.json').read()<jupyter_output><empty_output><jupyter_text>Loading an architecture without the weights<jupyter_code>ss.load_arch('arch.json')<jupyter_output><empty_output><jupyter_text>Making the search space compatible with NAS algorithms Search spaces serve as the main interface between NAS algorithms and the application. Different classes of NAS algorithms interact with architectures from the search space using specific abstract classes: Evolutionary algorithms: - User must subclass `EvolutionarySearchSpace` and implement `EvolutionarySearchSpace.mutate` and `EvolutionarySearchSpace.crossover` Bayesian Optimization algorithms: - User must subclass `BayesOptSearchSpace` and override `BayesOptSearchSpace.encode` - Encode should take an `ArchaiModel` and produce a fixed-length vector representation of that architecture. This numerical representation will be used to train surrogate models. Example: Making `CNNSearchSpace` compatible with NAS algorithsm Let's make our search space compatible with Evolutionary and Bayesian Optimization NAS algorithms. To do that, we need to subclass `EvolutionarySearchSpace` and `BayesOptSearchSpace`, and implement `mutation`, `crossover` and `encode` method.<jupyter_code>from archai.discrete_search.api.search_space import EvolutionarySearchSpace, BayesOptSearchSpace
class CNNSearchSpaceExt(CNNSearchSpace, EvolutionarySearchSpace, BayesOptSearchSpace):
''' We are subclassing CNNSearchSpace just to save up space'''
@overrides
def mutate(self, model_1: ArchaiModel) -> ArchaiModel:
config = {
'nb_layers': model_1.arch.nb_layers,
'kernel_size': model_1.arch.kernel_size,
'hidden_dim': model_1.arch.hidden_dim
}
if self.rng.random() < 0.2:
config['nb_layers'] = self.rng.randint(self.min_layers, self.max_layers)
if self.rng.random() < 0.2:
config['kernel_size'] = self.rng.choice(self.kernel_list)
if self.rng.random() < 0.2:
config['hidden_dim'] = self.rng.choice(self.hidden_list)
mutated_model = MyModel(**config)
return ArchaiModel(
arch=mutated_model, archid=self.get_archid(mutated_model)
)
@overrides
def crossover(self, model_list: List[ArchaiModel]) -> ArchaiModel:
new_config = {
'nb_layers': self.rng.choice([m.arch.nb_layers for m in model_list]),
'kernel_size': self.rng.choice([m.arch.kernel_size for m in model_list]),
'hidden_dim': self.rng.choice([m.arch.hidden_dim for m in model_list]),
}
crossover_model = MyModel(**new_config)
return ArchaiModel(
arch=crossover_model, archid=self.get_archid(crossover_model)
)
@overrides
def encode(self, model: ArchaiModel) -> np.ndarray:
return np.array([model.arch.nb_layers, model.arch.kernel_size, model.arch.hidden_dim])
ss = CNNSearchSpaceExt(hidden_list=[32, 64, 128])<jupyter_output><empty_output><jupyter_text>Now we can generate mutations, crossover and encodings from any architecture of this search space<jupyter_code>m = ss.random_sample()
m.archid
ss.mutate(m).archid
models = [ss.random_sample() for _ in range(4)]
[print(m.archid) for m in models]
ss.crossover(models).archid
ss.encode(m)<jupyter_output><empty_output><jupyter_text>Now we can use `CNNSearchSpaceExt` with EA and BO search algorithms Built-in Search Spaces Instead of creating a search space from scratch, Archai has a list of built-in search spaces that can be used for many Machine Learning tasks. A list of built-in search spaces can be found in `archai/discrete_search/search_spaces`. Example: Semantic Segmentation Search Space (`SegmentationDagSearchSpace`)<jupyter_code>from archai.discrete_search.search_spaces.cv import SegmentationDagSearchSpace
ss = SegmentationDagSearchSpace(nb_classes=1, img_size=(64, 64), max_layers=3)
ss.mutate(ss.random_sample())<jupyter_output><empty_output>
|
archai/docs/getting_started/notebooks/discrete_search/search_space.ipynb/0
|
{
"file_path": "archai/docs/getting_started/notebooks/discrete_search/search_space.ipynb",
"repo_id": "archai",
"token_count": 3742
}
| 319 |
Discrete Search
===============
.. toctree::
:maxdepth: 2
archai.discrete_search.algos
archai.discrete_search.api
archai.discrete_search.evaluators
archai.discrete_search.predictors
archai.discrete_search.search_spaces
archai.discrete_search.utils
|
archai/docs/reference/api/archai.discrete_search.rst/0
|
{
"file_path": "archai/docs/reference/api/archai.discrete_search.rst",
"repo_id": "archai",
"token_count": 102
}
| 320 |
DiDARTS
=======
Architecture Trainer
--------------------
.. automodule:: archai.supergraph.algos.didarts.didarts_arch_trainer
:members:
:undoc-members:
Experiment Runner
-----------------
.. automodule:: archai.supergraph.algos.didarts.didarts_exp_runner
:members:
:undoc-members:
|
archai/docs/reference/api/archai.supergraph.algos.didarts.rst/0
|
{
"file_path": "archai/docs/reference/api/archai.supergraph.algos.didarts.rst",
"repo_id": "archai",
"token_count": 105
}
| 321 |
Computer Vision
===============
PyTorch-Lightning
-----------------
Trainer
^^^^^^^
.. automodule:: archai.trainers.cv.pl_trainer
:members:
:undoc-members:
|
archai/docs/reference/api/archai.trainers.cv.rst/0
|
{
"file_path": "archai/docs/reference/api/archai.trainers.cv.rst",
"repo_id": "archai",
"token_count": 59
}
| 322 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
import torch
from transformers import AutoModelForCausalLM
from archai.common.file_utils import calculate_torch_model_size
from archai.quantization.ptq import dynamic_quantization_torch
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Post-Training Quantization (PTQ) with a PyTorch model.")
parser.add_argument("pre_trained_model_path", type=str, help="Path to the pre-trained model file.")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
# Quantized model only uses maximum of 1 thread
torch.set_num_threads(1)
# Performs Post-Training Quantization (PTQ) over pre-trained model
# Also loads the original pre-trained model for debugging
model = AutoModelForCausalLM.from_pretrained(args.pre_trained_model_path)
model_qnt = dynamic_quantization_torch(model)
print(f"Model: {calculate_torch_model_size(model)}MB")
print(f"Model-QNT: {calculate_torch_model_size(model_qnt)}MB")
inputs = {"input_ids": torch.randint(1, 10, (1, 192))}
logits = model(**inputs).logits
logits_qnt = model_qnt(**inputs).logits
print(f"Difference between logits: {logits_qnt - logits}")
|
archai/scripts/quantization/ptq_with_torch.py/0
|
{
"file_path": "archai/scripts/quantization/ptq_with_torch.py",
"repo_id": "archai",
"token_count": 450
}
| 323 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
from typing import Dict, Type
from archai.common import utils
from archai.common.ordered_dict_logger import get_global_logger
from archai.supergraph.algos.darts.darts_exp_runner import DartsExperimentRunner
from archai.supergraph.algos.didarts.didarts_exp_runner import DiDartsExperimentRunner
from archai.supergraph.algos.divnas.divnas_exp_runner import DivnasExperimentRunner
from archai.supergraph.algos.gumbelsoftmax.gs_exp_runner import GsExperimentRunner
from archai.supergraph.algos.manual.manual_exp_runner import ManualExperimentRunner
from archai.supergraph.algos.petridish.petridish_exp_runner import (
PetridishExperimentRunner,
)
from archai.supergraph.algos.random.random_exp_runner import RandomExperimentRunner
from archai.supergraph.algos.xnas.xnas_exp_runner import XnasExperimentRunner
from archai.supergraph.nas.exp_runner import ExperimentRunner
def main():
logger = get_global_logger()
runner_types: Dict[str, Type[ExperimentRunner]] = {
"darts": DartsExperimentRunner,
"petridish": PetridishExperimentRunner,
"xnas": XnasExperimentRunner,
"random": RandomExperimentRunner,
"manual": ManualExperimentRunner,
"gs": GsExperimentRunner,
"divnas": DivnasExperimentRunner,
"didarts": DiDartsExperimentRunner,
}
parser = argparse.ArgumentParser(description="NAS E2E Runs")
parser.add_argument(
"--algos",
type=str,
default="darts,xnas,random,didarts,petridish,gs,manual,divnas",
help="NAS algos to run, seperated by comma",
)
parser.add_argument("--datasets", type=str, default="cifar10", help="datasets to use, separated by comma")
parser.add_argument(
"--full",
type=lambda x: x.lower() == "true",
nargs="?",
const=True,
default=False,
help="Run in full or toy mode just to check for compile errors",
)
parser.add_argument(
"--no-search",
type=lambda x: x.lower() == "true",
nargs="?",
const=True,
default=False,
help="Do not run search",
)
parser.add_argument(
"--no-eval", type=lambda x: x.lower() == "true", nargs="?", const=True, default=False, help="Do not run eval"
)
parser.add_argument(
"--exp-prefix", type=str, default="throwaway", help="Experiment prefix is used for directory names"
)
args, extra_args = parser.parse_known_args()
if "--common.experiment_name" in extra_args:
raise RuntimeError(
"Please use --exp-prefix instead of --common.experiment_name so that main.py can generate experiment directories with search and eval suffix"
)
for dataset in args.datasets.split(","):
for algo in args.algos.split(","):
algo = algo.strip()
print("Running (algo, dataset): ", (algo, dataset))
runner_type: Type[ExperimentRunner] = runner_types[algo]
# get the conf files for algo and dataset
algo_conf_filepath = f"confs/algos/{algo}.yaml" if args.full else f"confs/algos/{algo}_toy.yaml"
dataset_conf_filepath = f"confs/datasets/{dataset}.yaml"
conf_filepaths = ";".join((algo_conf_filepath, dataset_conf_filepath))
runner = runner_type(
conf_filepaths,
base_name=f"{algo}_{dataset}_{args.exp_prefix}",
# for toy and debug runs, clean exp dirs
clean_expdir=utils.is_debugging() or not args.full,
)
runner.run(search=not args.no_search, eval=not args.no_eval)
logger.close()
if __name__ == "__main__":
main()
|
archai/scripts/supergraph/main.py/0
|
{
"file_path": "archai/scripts/supergraph/main.py",
"repo_id": "archai",
"token_count": 1539
}
| 324 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import tensorwatch as tw
from archai.supergraph import models
model_names = ["resnet18", "resnet34", "resnet101", "densenet121"]
for model_name in model_names:
model = getattr(models, model_name)()
model_stats = tw.ModelStats(model, [1, 3, 224, 224], clone_model=False)
print(
f"{model_name}: flops={model_stats.Flops}, parameters={model_stats.parameters}, memory={model_stats.inference_memory}"
)
|
archai/scripts/supergraph/performance/model_stats.py/0
|
{
"file_path": "archai/scripts/supergraph/performance/model_stats.py",
"repo_id": "archai",
"token_count": 175
}
| 325 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import re
from setuptools import find_packages, setup
dependencies = [
"azure-ai-ml==1.5.0",
"azure-data-tables",
"azure-identity",
"azure-storage-blob",
"azureml-mlflow",
"datasets>=2.4.0",
"deepspeed",
"einops",
"flake8>=5.0.4",
"flash-attn",
"gorilla>=0.4.0",
"h5py",
"hyperopt",
"ipykernel",
"jupyter",
"lightning>=2.0.0",
"matplotlib",
"mldesigner",
"mlflow",
"nbimporter",
"nbsphinx",
"nbval",
"onnx>=1.10.2",
"onnxruntime>=1.10.0",
"opencv-python",
"opt_einsum",
"overrides==3.1.0",
"pandas",
"psutil",
"pydata-sphinx-theme==0.13.1",
"pytest",
"pyunpack",
"pyyaml",
"ray>=1.0.0",
"scikit-learn",
"send2trash>=1.8.0",
"sphinx",
"sphinx-book-theme",
"sphinx-git",
"sphinx-sitemap",
"sphinx_inline_tabs",
"sphinxcontrib-programoutput",
"sphinxcontrib-mermaid",
"statopt",
"tensorboard",
"tensorwatch",
"tk",
"tokenizers>=0.10.3",
"torchinfo",
"torchvision",
"tqdm",
"transformers==4.27.4",
"xformers",
]
dependencies_dict = {y: x for x, y in (re.findall(r"^(([^!=<>~ ]+)(?:[!=<>~ ].*)?$)", x)[0] for x in dependencies)}
def filter_dependencies(*pkgs):
for pkg in pkgs:
if pkg not in dependencies_dict:
raise ValueError(f"Package {pkg} not found in dependencies")
return [dependencies_dict[pkg] for pkg in pkgs]
extras_require = {}
extras_require["cv"] = filter_dependencies(
"gorilla",
"opencv-python",
"lightning",
"scikit-learn",
"torchvision",
)
extras_require["nlp"] = filter_dependencies(
"datasets", "einops", "opt_einsum", "tokenizers", "transformers"
)
extras_require["deepspeed"] = filter_dependencies("deepspeed", "mlflow")
extras_require["flash-attn"] = filter_dependencies("flash-attn")
extras_require["xformers"] = filter_dependencies("xformers")
extras_require["docs"] = filter_dependencies(
"nbimporter",
"nbsphinx",
"nbval",
"pandas",
"pydata-sphinx-theme",
"sphinx",
"sphinx-book-theme",
"sphinx-git",
"sphinx-sitemap",
"sphinx_inline_tabs",
"sphinxcontrib-programoutput",
"sphinxcontrib-mermaid",
)
extras_require["tests"] = filter_dependencies(
"azure-data-tables",
"azure-identity",
"azure-storage-blob",
"flake8",
"pytest",
"tk",
)
extras_require["aml"] = filter_dependencies(
"azure-ai-ml",
"azure-data-tables",
"azure-identity",
"azure-storage-blob",
"azureml-mlflow",
"ipykernel",
"jupyter",
"matplotlib",
"mldesigner",
"mlflow",
"lightning",
"torchvision",
)
extras_require["tasks"] = filter_dependencies(
"torchinfo"
)
extras_require["dev"] = (
extras_require["cv"]
+ extras_require["nlp"]
+ extras_require["docs"]
+ extras_require["tests"]
+ extras_require["aml"]
+ extras_require["tasks"]
+ extras_require["xformers"]
)
if os.name != "nt":
# Support for DeepSpeed is not available on native Windows
extras_require["dev"] += extras_require["deepspeed"]
install_requires = filter_dependencies(
"h5py",
"hyperopt",
"matplotlib",
"onnx",
"onnxruntime",
"overrides",
"psutil",
"pyyaml",
"ray",
"send2trash",
"statopt",
"tensorboard",
"tensorwatch",
"tqdm",
)
with open("README.md", "r", encoding="utf_8") as f:
long_description = f.read()
setup(
name="archai",
version="1.0.0",
description="Platform for Neural Architecture Search",
long_description=long_description,
long_description_content_type="text/markdown",
author="Microsoft",
url="https://github.com/microsoft/archai",
license="MIT",
install_requires=install_requires,
extras_require=extras_require,
packages=find_packages(),
include_package_data=True,
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Development Status :: 5 - Production/Stable",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
|
archai/setup.py/0
|
{
"file_path": "archai/setup.py",
"repo_id": "archai",
"token_count": 1971
}
| 326 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
import os
import sys
import dateutil.parser
import datetime
from archai.common.store import ArchaiStore
CONNECTION_NAME = 'MODEL_STORAGE_CONNECTION_STRING'
def parse_date(date):
s = f"{date}".strip()
date = dateutil.parser.isoparse(s)
date = date.replace(tzinfo=datetime.timezone.utc)
return date
def get_usage_by_device(store: ArchaiStore, report_start, report_end):
devices = {}
first = None
last = None
for e in store.get_all_status_entities():
device = e['name']
start = parse_date(e['start'])
end = parse_date(e['end'])
if report_start is not None and report_start > start:
continue
if report_end is not None and report_end < start:
continue
if report_end is not None and end > report_end:
end = report_end
if device not in devices:
devices[device] = []
devices[device] += [(start, end)]
if first is None or start < first:
first = start
if last is None or end > last:
last = end
return (devices, first, last)
def report(store: ArchaiStore, report_start, report_end):
devices, first, last = get_usage_by_device(store, report_start, report_end)
if first is None:
print("No data found")
return
# column headings
print("date,{}".format(",".join([k for k in devices])))
start = datetime.datetime(first.year, first.month, first.day, 0, 0, 0, 0, first.tzinfo)
last = datetime.datetime(last.year, last.month, last.day, 23, 59, 59, 999999, first.tzinfo)
while start < last:
du = []
end = start + datetime.timedelta(days=1)
total = (end - start).total_seconds()
for k in devices:
s = devices[k]
used = 0
for d in s:
ds = d[0]
de = d[1]
if ds > end or de < start:
continue
if ds < start:
ds = start
if de > end:
de = end
u = (de - ds).total_seconds()
if u < 0:
print("?")
used += u
x = int((used * 100) / total)
du += [x]
st = start.strftime("%x")
print("{},{}".format(st, ",".join([str(x) for x in du])))
start = end
total_seconds = (last - first).total_seconds()
total_used = []
for k in devices:
s = devices[k]
used = 0
for d in s:
u = (d[1] - d[0]).total_seconds()
used += u
x = int((used * 100) / total_seconds)
total_used += [x]
print("total,{}".format(",".join([str(x) for x in total_used])))
if __name__ == '__main__':
con_str = os.getenv(CONNECTION_NAME)
if not con_str:
print(f"Please specify your {CONNECTION_NAME} environment variable.")
sys.exit(1)
parser = argparse.ArgumentParser(
description='Report on Qualcomm device utilization in an optional date range. ' +
'Reports percentage utilization per day.')
parser.add_argument('--start', help='Set the "start" date to start the search. (default None).')
parser.add_argument('--end', help='Set the "end" date to end the search. (default None).')
args = parser.parse_args()
start = None
end = None
if args.start:
start = dateutil.parser.parse(args.start)
start = start.replace(tzinfo=datetime.timezone.utc)
if args.end:
end = dateutil.parser.parse(args.end)
end = end.replace(tzinfo=datetime.timezone.utc)
storage_account_name, storage_account_key = ArchaiStore.parse_connection_string(con_str)
store = ArchaiStore(storage_account_name, storage_account_key, table_name='usage')
report(store, start, end)
|
archai/tasks/face_segmentation/aml/azure/report_device_usage.py/0
|
{
"file_path": "archai/tasks/face_segmentation/aml/azure/report_device_usage.py",
"repo_id": "archai",
"token_count": 1774
}
| 327 |
#!/bin/bash
MODEL_NAME="model"
if [ "$1" == "--help" ] ; then
echo "### Usage: convert_tf.sh [model_name]"
echo "Converts the given tensorflow model to .dlc then quantizes it."
echo "Default model path is 'model/model.pb'."
exit 1
fi
if [ "$1" != "" ]; then
MODEL_NAME=$1
fi
if [ ! -f "model/${MODEL_NAME}.pb" ]; then
echo "### Model does not exist: model/${MODEL_NAME}.pb"
exit 1
fi
mkdir -p ./snpe_models
# pb 2 dlc
snpe-tensorflow-to-dlc \
-i "model/${MODEL_NAME}.pb" \
-d input_rgb "1,256,256,3" \
--out_node "logits_cls" \
-o "snpe_models/${MODEL_NAME}.dlc" \
--show_unconsumed_nodes
#--debug
# quantize
snpe-dlc-quantize \
--input_dlc "snpe_models/${MODEL_NAME}.dlc" \
--input_list "data/quant/input_list.txt" \
--output_dlc "snpe_models/${MODEL_NAME}.quant.dlc" \
--use_enhanced_quantizer
|
archai/tasks/face_segmentation/aml/snpe/convert_tf.sh/0
|
{
"file_path": "archai/tasks/face_segmentation/aml/snpe/convert_tf.sh",
"repo_id": "archai",
"token_count": 400
}
| 328 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
import cv2
import numpy as np
import os
import tqdm
import pandas as pd
import sys
import matplotlib.pyplot as plt
from sklearn.metrics import PrecisionRecallDisplay
from PIL import Image
# Check the outputs of the Mask C-RNN model inference
def _get_dataset_image(filename, image_shape, dataset):
inp_f = os.path.splitext(filename)[0] + ".png"
img_file = os.path.join(dataset, inp_f)
if not os.path.isfile(img_file):
print(f"### dataset {img_file} not found")
sys.exit(1)
img = cv2.imread(img_file)[..., ::-1] # BGR to RGB
img = cv2.resize(img, image_shape, interpolation=cv2.INTER_LINEAR)
img = img[..., ::-1] # BGR to RGB
return img
def _get_dataset_gt(img_name, dataset, img_shape, use_pillow=False):
seg_name = img_name + '_seg.png'
gt_f = os.path.join(dataset, seg_name)
if not os.path.isfile(gt_f):
print(f"### ground truth {gt_f} not found")
sys.exit(1)
gt_seg = cv2.imread(gt_f, cv2.IMREAD_GRAYSCALE)
if gt_seg.shape[:2] != img_shape:
if use_pillow:
img = Image.fromarray(gt_seg, 'L')
img = img.resize(img_shape[:2], Image.NEAREST)
gt_seg = np.array(img)
else:
# cv2 resize is (newHeight, newWidth)
newsize = [img_shape[1], img_shape[0]]
gt_seg = cv2.resize(gt_seg, newsize, interpolation=cv2.INTER_NEAREST)
return gt_seg
def show_output(input_shape, transpose, dataset, outputs):
_, w, h, c = input_shape
img_shape = (w, h)
output_list = [x for x in os.listdir(outputs) if x.endswith('.raw')]
output_list.sort()
for out_f in output_list:
img = _get_dataset_image(out_f, img_shape, dataset)
logits = np.fromfile(os.path.join(outputs, out_f), dtype=np.float32)
if (transpose):
logits = logits.reshape((-1, img_shape[0], img_shape[1])).transpose(transpose)
else:
logits = logits.reshape((img_shape[0], img_shape[1], -1))
cls_seg = np.argmax(logits, axis=-1)
# debug visualize
norm = cv2.normalize(cls_seg, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)
cls_seg_color = cv2.applyColorMap(norm, cv2.COLORMAP_JET)
# concatenate on x-axis so result is 512 wide and 256 high.
canvas = np.concatenate([img, cls_seg_color], axis=1)
cv2.imshow('img', canvas)
key = cv2.waitKey() & 0xFF
if key == 27:
break
def softmax(x, axis):
return np.exp(x) / np.sum(np.exp(x), axis=axis, keepdims=True)
def normalize(x, axis):
return x / np.expand_dims(np.linalg.norm(x, axis=axis), axis=axis)
def get_confusion_matrix(gt_label, pred_label, valid_mask, num_classes):
assert gt_label.dtype in [np.int32, np.int64]
assert pred_label.dtype in [np.int32, np.int64]
index = (gt_label * num_classes + pred_label).astype('int32')
label_count = np.bincount(index[valid_mask].flat, minlength=num_classes * num_classes)
confusion_matrix = np.zeros((num_classes, num_classes))
yy, xx = np.meshgrid(np.arange(num_classes), np.arange(num_classes), indexing='ij')
ii = yy * num_classes + xx
confusion_matrix[yy, xx] = label_count[ii]
return confusion_matrix
def get_metrics(input_shape, transpose, dataset, outputs, num_classes=19, use_pillow=False):
output_list = [x for x in os.listdir(outputs) if x.endswith('.raw')]
output_list.sort()
if len(output_list) == 0:
print("No output files matching 'outputs/*.raw' found")
return
print(f"Collecting metrics on {len(output_list)} output .raw files...")
width, height, c = input_shape
img_shape = (width, height)
confusion_matx = None
bins = int(1e6)
pos_by_score = np.zeros((num_classes, bins + 1))
neg_by_score = np.zeros((num_classes, bins + 1))
with tqdm.tqdm(total=len(output_list)) as pbar:
for out_f in output_list:
img_name = os.path.splitext(os.path.basename(out_f))[0].split('.')[0]
gt_seg = _get_dataset_gt(img_name, dataset, img_shape, use_pillow)
ignore_mask = (gt_seg == 255)
gt_seg[ignore_mask] = 0
gt_seg = gt_seg.astype(np.int32)
valid_mask = np.logical_not(ignore_mask)
full_path = os.path.join(outputs, out_f)
logits = np.fromfile(full_path, dtype=np.float32)
size = np.product(logits.shape)
found_classes = int(size / (img_shape[0] * img_shape[1]))
if found_classes < num_classes:
raise Exception(f"Result {out_f} has unexpected number of predictions {found_classes}, " +
"expecting {num_classes}")
if transpose:
logits = logits.reshape((found_classes, img_shape[0], img_shape[1])).transpose(transpose)
else:
logits = logits.reshape((img_shape[0], img_shape[1], found_classes))
probs = softmax(logits.astype(np.float64), axis=-1)
pd_seg = np.argmax(probs, axis=-1)
# debug visualize
# gt_seg_color = cv2.applyColorMap((255 * gt_seg / 19).astype(np.uint8), cv2.COLORMAP_JET)
# pd_seg_color = cv2.applyColorMap((255 * pd_seg / 19).astype(np.uint8), cv2.COLORMAP_JET)
# canvas = np.concatenate([gt_seg_color, pd_seg_color], axis=1)
# cv2.imshow('img', canvas)
# cv2.waitKey(0)
matrix = get_confusion_matrix(gt_seg, pd_seg, valid_mask, num_classes)
if confusion_matx is None:
confusion_matx = matrix
else:
confusion_matx += matrix
scores = (probs * bins).round().astype(np.int32) # (b, h, w, num_classes)
for c in range(num_classes):
cls_mask = np.logical_and(gt_seg == c, valid_mask) # (b, h, w)
cls_score = scores[..., c] # (b, h, w)
pos_by_score[c] += np.bincount(cls_score[cls_mask], minlength=bins + 1)
neg_by_score[c] += np.bincount(cls_score[np.logical_not(cls_mask)], minlength=bins + 1)
pbar.update(1)
class_names = ['background', 'skin', 'nose', 'right_eye', 'left_eye', 'right_brow', 'left_brow', 'right_ear',
'left_ear', 'mouth_interior', 'top_lip', 'bottom_lip', 'neck', 'hair', 'beard', 'clothing',
'glasses', 'headwear', 'facewear']
assert len(class_names) == 19
# compute iou and f1
gt_pos = confusion_matx.sum(1) # (num_classes,)
pd_pos = confusion_matx.sum(0) # (num_classes,)
tp = np.diag(confusion_matx) # (num_classes,)
iou = tp / np.maximum(1, gt_pos + pd_pos - tp) # (num_classes,)
f1 = 2 * tp / np.maximum(1, gt_pos + pd_pos) # (num_classes,)
# compute weighted iou/f1 (excluding background and facewear class)
weight = 1 / np.sqrt(gt_pos[1:18])
if len(iou) > 1:
overall_iou = np.sum(iou[1:18] * weight) / np.sum(weight)
overall_f1 = np.sum(f1[1:18] * weight) / np.sum(weight)
else:
overall_iou = iou[0]
overall_f1 = f1[0]
# compute precision recall curve
_, ax = plt.subplots(figsize=(6, 7))
AP = []
for c in range(num_classes):
# get per class total count and total positives, sorted in score descending order
cls_neg = neg_by_score[c][::-1]
cls_pos = pos_by_score[c][::-1]
tps = np.cumsum(cls_pos)
fps = np.cumsum(cls_neg)
precision = tps / np.maximum(1, tps + fps)
# assert np.all(np.logical_not(np.isnan(precision)))
# precision[np.isnan(precision)] = 0
if tps[-1] == 0:
recall = tps / 0.0000001
else:
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
precision = np.r_[precision[sl], 1]
recall = np.r_[recall[sl], 0]
average_precision = -np.sum(np.diff(recall) * np.array(precision)[:-1])
AP.append(average_precision)
# draw figure
display = PrecisionRecallDisplay(
recall=recall,
precision=precision,
average_precision=average_precision,
)
display.plot(ax=ax, name=class_names[c])
handles, labels = display.ax_.get_legend_handles_labels()
# set the legend and the axes
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.legend(handles=handles, labels=labels, loc="best")
ax.set_title("Precision recall curve")
chart = os.path.join(outputs, 'pr_curve.png')
plt.savefig(chart)
plt.close() # fixes a huge memory leak
# save metrics
csv_file = os.path.join(outputs, 'test_results.csv')
with open(csv_file, 'w', encoding='utf-8') as f:
df = pd.DataFrame(np.stack([iou, f1, AP], axis=0), columns=class_names[:num_classes], index=['iou', 'f1', 'AP'])
df.loc[:, 'overall'] = pd.Series([overall_iou, overall_f1], index=['iou', 'f1'])
df.to_csv(f)
print(df)
return (csv_file, chart, float(overall_f1))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Check the outputs of the Mask C-RNN model inference and produce ' +
'a .csv file named test_results.csv and a .png plot named pr_curve.png')
parser.add_argument('--input', help='Location of the original input images ' +
'(defaults to INPUT_DATASET environment variable)')
parser.add_argument('--show', '-s', help='Show the outputs on screen, press space to advance to the next image ' +
'and escape to cancel', action="store_true")
parser.add_argument('--output', '-o', help='Location of the outputs to analyze (default "snpe_output")',
default='snpe_output')
parser.add_argument('--transpose', '-t', help='Transpose channels by (1,2,0)', action="store_true")
parser.add_argument('--num_classes', type=int, help="Number of classes predicted (default 19)", default=19)
parser.add_argument('--pillow', help="Resize images using Pillow instead of numpy", action="store_true")
parser.add_argument('--input_shape', help="Resize images this size, must match the shape of the model output " +
"(default '256,256,3')")
args = parser.parse_args()
use_pillow = args.pillow
dataset = args.input
if not dataset:
dataset = os.getenv("INPUT_DATASET")
if not dataset:
print("please provide --input or set your INPUT_DATASET environment vairable")
sys.exit(1)
transpose = args.transpose
if transpose:
transpose = (1, 2, 0)
if not os.path.isdir(dataset):
print("input dataset not found: " + dataset)
sys.exit(1)
output_dir = args.output
if not os.path.isdir(output_dir):
print("Experiment 'output' dir not found: " + output_dir)
sys.exit(1)
input_shape = (256, 256, 3)
if args.input_shape:
input_shape = tuple(eval(args.image_shape))
if args.show:
show_output(input_shape, transpose, dataset, output_dir)
else:
get_metrics(input_shape, transpose, dataset, output_dir, args.num_classes, use_pillow)
|
archai/tasks/face_segmentation/aml/vision/collect_metrics.py/0
|
{
"file_path": "archai/tasks/face_segmentation/aml/vision/collect_metrics.py",
"repo_id": "archai",
"token_count": 5384
}
| 329 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
import sys
from archai.discrete_search.api import ArchaiModel
from archai.common.config import Config
from archai.discrete_search.evaluators.remote_azure_benchmark import RemoteAzureBenchmarkEvaluator
from aml.util.setup import configure_store
def reset_dlc(store, experiment_name, entity):
""" Reset the qualcomm dlc files and associated metrics for the given entity."""
changed = False
name = entity['name']
prefix = f'{experiment_name}/{name}'
print(f"Resetting .dlc files for model {name}")
store.delete_blobs(prefix, 'model.dlc')
store.delete_blobs(prefix, 'model.quant.dlc')
for k in ['mean', 'macs', 'params', 'stdev', 'total_inference_avg', 'error', 'f1_1k', 'f1_10k', 'f1_1k_f', 'f1_onnx', 'pipeline_id']:
if k in entity:
del entity[k]
changed = True
if changed:
store.update_status_entity(entity)
def main():
# input and output arguments
parser = argparse.ArgumentParser(description="Runs Snapdragon F1 scoring on the final fully trained models produced by train_pareto.py.")
parser.add_argument("--config", type=str, help="location of the aml_search.yaml file", required=True)
args = parser.parse_args()
config = Config(args.config, resolve_env_vars=True)
aml_config = config['aml']
experiment_name = aml_config['experiment_name']
metric_key = 'final_val_iou'
search_config = config['search']
ss_config = search_config['search_space']
ss_params = ss_config['params']
in_channels = ss_params['in_channels']
img_size = ss_params['img_size']
target_config = search_config.get('target', {})
# change the metric key to the one used for Snapdragon F1 scoring
target_config['metric_key'] = 'f1_1k'
target_name = target_config.pop('name', 'cpu')
device_evaluator = None
if target_name != 'snp':
print(f"Snapdragon target is not configured in {args.config}")
sys.exit(1)
store = configure_store(aml_config)
fully_trained = [
e for e in store.get_all_status_entities(status='complete')
if metric_key in e
]
if len(fully_trained) == 0:
print(f"No 'complete' models found with required metric '{metric_key}'")
sys.exit(1)
# the RemoteAzureBenchmarkEvaluator only needs the archid actually, doesn't need the nn.Module.
models = []
for e in fully_trained:
name = e['name']
# if this has not been F1 scored yet then add it to our list.
if 'benchmark_only' in e:
models += [ArchaiModel(None, archid=name[3:])]
# make sure we re-quantize the new fully trained model.
reset_dlc(store, experiment_name, e)
# kick off remote device training without the benchmark_only flag so we get the
# F1 scores for these fully trained models. Note the above results_path ensures the trained
# models are uploaded back to our models blob store.
input_shape = (1, in_channels, *img_size[::-1])
device_evaluator = RemoteAzureBenchmarkEvaluator(
input_shape=input_shape,
store=store,
experiment_name=experiment_name,
onnx_export_kwargs={'opset_version': 11},
benchmark_only=0, # do full F1 scoring this time.
**target_config
)
for model in models:
device_evaluator.send(model)
device_evaluator.fetch_all()
if __name__ == "__main__":
main()
|
archai/tasks/face_segmentation/snp_test.py/0
|
{
"file_path": "archai/tasks/face_segmentation/snp_test.py",
"repo_id": "archai",
"token_count": 1342
}
| 330 |
#
# Config file for NAS search - Debug run
#
# job args
seed: 0
num_jobs_per_gpu: 2
num_latency_measurements: 15
num_input_per_latency_measurement: 15
# search args
num_iters: 7
init_num_models: 32
num_random_mix: 32
num_crossovers: 8
mutations_per_parent: 4
max_unseen_population: 32
# Search space args
r_range: [1, 2, 3, 4]
e_range: [2, 3, 4, 5, 6]
k_range:
- 3
- 5
- 7
channel_mult_range: [0.25, 0.5, 0.75, 1.0, 1.25]
depth_mult_range: [0.25, 0.5, 0.75, 1.0, 1.25]
# model trainer args
data_path: face_synthetics/dataset_100000
output_dir: ./output
max_num_images: 20000
train_crop_size: 128
epochs: 30
batch_size: 128
lr: 0.001
opt: adamw
lr_scheduler: steplr
lr_step_size: 100
lr_gamma: 0.5
wd: 0.00001
|
archai/tasks/facial_landmark_detection/search_config.yaml/0
|
{
"file_path": "archai/tasks/facial_landmark_detection/search_config.yaml",
"repo_id": "archai",
"token_count": 324
}
| 331 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import shutil
from archai.datasets.nlp.fast_hf_dataset_provider import FastHfDatasetProvider
TEST_CACHE_DIR='test_fast_hf_dataset_cache'
def test_fast_hf_dataset_provider_from_hub():
dataset_provider = FastHfDatasetProvider.from_hub(
"glue",
dataset_config_name="sst2",
tokenizer_name="Salesforce/codegen-350M-mono",
mapping_column_name=["sentence"],
use_shared_memory=False,
cache_dir=TEST_CACHE_DIR
)
# Assert that we can individually load training, validation and test datasets
with dataset_provider.get_train_dataset(seq_len=256) as train_dataset:
assert len(train_dataset) == 3514
with dataset_provider.get_val_dataset(seq_len=256) as val_dataset:
assert len(val_dataset) == 85
with dataset_provider.get_test_dataset(seq_len=256) as test_dataset:
assert len(test_dataset) == 169
def test_fast_hf_dataset_provider_from_cache():
dataset_provider = FastHfDatasetProvider.from_cache(TEST_CACHE_DIR)
# Assert that we can individually load training, validation and test datasets
with dataset_provider.get_train_dataset(seq_len=256) as train_dataset:
assert len(train_dataset) == 3514
with dataset_provider.get_val_dataset(seq_len=256) as val_dataset:
assert len(val_dataset) == 85
with dataset_provider.get_test_dataset(seq_len=256) as test_dataset:
assert len(test_dataset) == 169
shutil.rmtree(TEST_CACHE_DIR)
|
archai/tests/datasets/nlp/test_fast_hf_dataset_provider.py/0
|
{
"file_path": "archai/tests/datasets/nlp/test_fast_hf_dataset_provider.py",
"repo_id": "archai",
"token_count": 642
}
| 332 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import List, Optional
from unittest.mock import MagicMock
from overrides import overrides
from archai.api.dataset_provider import DatasetProvider
from archai.discrete_search.api.archai_model import ArchaiModel
from archai.discrete_search.api.model_evaluator import (
AsyncModelEvaluator,
ModelEvaluator,
)
class MyModelEvaluator(ModelEvaluator):
def __init__(self, dataset) -> None:
super().__init__()
@overrides
def evaluate(self, arch: ArchaiModel, budget: Optional[float] = None) -> float:
return 0.0
class MyAsyncModelEvaluator(AsyncModelEvaluator):
def __init__(self, dataset) -> None:
super().__init__()
@overrides
def send(self, arch: ArchaiModel, budget: Optional[float] = None) -> None:
return MagicMock()
@overrides
def fetch_all(self) -> List[Optional[float]]:
return list()
def test_model_evaluator():
arch = ArchaiModel(arch=MagicMock(), archid="test_archid", metadata={})
dataset = MagicMock()
# Assert that mocked value is returned
model_evaluator = MyModelEvaluator(dataset)
value = model_evaluator.evaluate(arch, budget=None)
assert value == 0.0
def test_async_model_evaluator():
arch = ArchaiModel(arch=MagicMock(), archid="test_archid", metadata={})
dataset = MagicMock()
# Assert that mocked method runs
async_model_evaluator = MyAsyncModelEvaluator(dataset)
assert async_model_evaluator.send(arch, budget=None)
# Assert that mocked value is returned
values = async_model_evaluator.fetch_all()
assert isinstance(values, list)
|
archai/tests/discrete_search/api/test_model_evaluator.py/0
|
{
"file_path": "archai/tests/discrete_search/api/test_model_evaluator.py",
"repo_id": "archai",
"token_count": 615
}
| 333 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import pytest
import torch
from archai.discrete_search.search_spaces.nlp.transformer_flex.models.configuration_gpt2_flex import (
GPT2FlexConfig,
)
from archai.discrete_search.search_spaces.nlp.transformer_flex.models.modeling_gpt2_flex import (
GPT2FlexLMHeadModel,
GPT2FlexModel,
)
@pytest.fixture
def config():
return GPT2FlexConfig(vocab_size=128, n_embd=768, n_layer=2)
def test_gpt2_flex_lm_head_model_init(config):
model = GPT2FlexLMHeadModel(config)
# Assert that the model's transformer attribute has the correct type
assert isinstance(model.transformer, GPT2FlexModel)
assert isinstance(model.lm_head, torch.nn.Linear)
assert model.lm_head.in_features == config.n_embd
assert model.lm_head.out_features == config.vocab_size
def test_gpt2_flex_lm_head_model_forward_pass(config):
model = GPT2FlexLMHeadModel(config)
# Assert that the model is able to forward pass
input_tensor = torch.randint(0, config.vocab_size, (1, 32))
output = model(input_tensor)
assert output.logits.shape == (1, 32, config.vocab_size)
|
archai/tests/discrete_search/search_spaces/nlp/transformer_flex/models/test_modeling_gpt2_flex.py/0
|
{
"file_path": "archai/tests/discrete_search/search_spaces/nlp/transformer_flex/models/test_modeling_gpt2_flex.py",
"repo_id": "archai",
"token_count": 442
}
| 334 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import pytest
import torch
from archai.quantization.modules import (
FakeDynamicQuant,
FakeDynamicQuantConv1d,
FakeDynamicQuantLinear,
FakeQuantEmbedding,
)
@pytest.fixture
def fake_quant_embedding():
return FakeQuantEmbedding(num_embeddings=5, embedding_dim=3)
@pytest.fixture
def fake_dynamic_quant_linear():
return FakeDynamicQuantLinear(in_features=3, out_features=2)
@pytest.fixture
def fake_dynamic_quant_conv1d():
return FakeDynamicQuantConv1d(in_channels=3, out_channels=2, kernel_size=3)
def test_fake_quant_embedding_init(fake_quant_embedding):
# Assert that the `fake_quant_embedding` is initialized correctly
assert fake_quant_embedding.num_embeddings == 5
assert fake_quant_embedding.embedding_dim == 3
assert isinstance(fake_quant_embedding.weight_fake_quant, FakeDynamicQuant)
def test_fake_quant_embedding_fake_quant_weight(fake_quant_embedding):
# Assert that the `fake_quant_weight` has correct shape and type
fake_quant_weight = fake_quant_embedding.fake_quant_weight
assert fake_quant_weight.shape == (5, 3)
assert isinstance(fake_quant_weight, torch.Tensor)
def test_fake_quant_embedding_forward(fake_quant_embedding):
x = torch.tensor([0, 1, 2, 3, 4])
# Assert that the `output` has correct shape and type
output = fake_quant_embedding(x)
assert output.shape == (5, 3)
assert isinstance(output, torch.Tensor)
def test_fake_quant_embedding_from_float():
mod = torch.nn.Embedding(num_embeddings=5, embedding_dim=3)
qconfig = {}
# Assert that the `quantized_mod` has correct attributes, values and types
quantized_mod = FakeQuantEmbedding.from_float(mod, qconfig)
assert quantized_mod.num_embeddings == mod.num_embeddings
assert quantized_mod.embedding_dim == mod.embedding_dim
assert quantized_mod.weight.model_parallel is False
def test_fake_quant_embedding_to_float(fake_quant_embedding):
# Assert that the `float_mod` has correct attributes, values and types
float_mod = fake_quant_embedding.to_float()
assert float_mod.num_embeddings == fake_quant_embedding.num_embeddings
assert float_mod.embedding_dim == fake_quant_embedding.embedding_dim
assert float_mod.weight.model_parallel is True
def test_fake_dynamic_quant_linear_init(fake_dynamic_quant_linear):
# Assert that the `fake_dynamic_quant_linear` is initialized correctly
assert fake_dynamic_quant_linear.in_features == 3
assert fake_dynamic_quant_linear.out_features == 2
assert isinstance(fake_dynamic_quant_linear.weight_fake_quant, FakeDynamicQuant)
assert isinstance(fake_dynamic_quant_linear.input_pre_process, FakeDynamicQuant)
def test_fake_dynamic_quant_linear_fake_quant_weight(fake_dynamic_quant_linear):
# Assert that the `fake_quant_weight` has correct shape and type
fake_quant_weight = fake_dynamic_quant_linear.fake_quant_weight
assert fake_quant_weight.shape == (2, 3)
assert isinstance(fake_quant_weight, torch.Tensor)
def test_fake_dynamic_quant_linear_forward(fake_dynamic_quant_linear):
x = torch.randn(4, 3)
# Assert that the `output` has correct shape and type
output = fake_dynamic_quant_linear(x)
assert output.shape == (4, 2)
assert isinstance(output, torch.Tensor)
def test_fake_dynamic_quant_linear_from_float():
mod = torch.nn.Linear(in_features=3, out_features=2)
qconfig = torch.quantization.get_default_qat_qconfig("qnnpack")
# Assert that the `quantized_mod` has correct attributes, values and types
quantized_mod = FakeDynamicQuantLinear.from_float(mod, qconfig)
assert quantized_mod.in_features == mod.in_features
assert quantized_mod.out_features == mod.out_features
assert torch.equal(quantized_mod.weight, mod.weight)
assert torch.equal(quantized_mod.bias, mod.bias)
assert isinstance(quantized_mod.weight_fake_quant, FakeDynamicQuant)
assert isinstance(quantized_mod.input_pre_process, FakeDynamicQuant)
def test_fake_dynamic_quant_linear_to_float(fake_dynamic_quant_linear):
# Assert that the `float_mod` has correct attributes, values and types
float_mod = fake_dynamic_quant_linear.to_float()
assert float_mod.in_features == fake_dynamic_quant_linear.in_features
assert float_mod.out_features == fake_dynamic_quant_linear.out_features
assert torch.equal(float_mod.weight, fake_dynamic_quant_linear.weight_fake_quant(fake_dynamic_quant_linear.weight))
assert torch.equal(float_mod.bias, fake_dynamic_quant_linear.bias)
def test_fake_dynamic_quant_conv1d_init(fake_dynamic_quant_conv1d):
# Assert that the `fake_dynamic_quant_conv1d` is initialized correctly
assert fake_dynamic_quant_conv1d.in_channels == 3
assert fake_dynamic_quant_conv1d.out_channels == 2
assert fake_dynamic_quant_conv1d.kernel_size == (3,)
assert isinstance(fake_dynamic_quant_conv1d.weight_fake_quant, FakeDynamicQuant)
assert isinstance(fake_dynamic_quant_conv1d.input_pre_process, FakeDynamicQuant)
def test_fake_dynamic_quant_conv1d_fake_quant_weight(fake_dynamic_quant_conv1d):
# Assert that the `fake_quant_weight` has correct shape and type
fake_quant_weight = fake_dynamic_quant_conv1d.fake_quant_weight
assert fake_quant_weight.shape == (2, 3, 3)
assert isinstance(fake_quant_weight, torch.Tensor)
def test_fake_dynamic_quant_conv1d_forward(fake_dynamic_quant_conv1d):
x = torch.randn(3, 3)
# Assert that the `output` has correct shape and type
output = fake_dynamic_quant_conv1d(x)
assert output.shape == (2, 1)
assert isinstance(output, torch.Tensor)
def test_fake_dynamic_quant_conv1d_from_float():
mod = torch.nn.Conv1d(in_channels=3, out_channels=2, kernel_size=3)
qconfig = torch.quantization.get_default_qat_qconfig("qnnpack")
# Assert that the `quantized_mod` has correct attributes, values and types
quantized_mod = FakeDynamicQuantConv1d.from_float(mod, qconfig)
assert quantized_mod.in_channels == mod.in_channels
assert quantized_mod.out_channels == mod.out_channels
assert quantized_mod.kernel_size == mod.kernel_size
assert torch.equal(quantized_mod.weight, mod.weight)
assert torch.equal(quantized_mod.bias, mod.bias)
assert isinstance(quantized_mod.weight_fake_quant, FakeDynamicQuant)
assert isinstance(quantized_mod.input_pre_process, FakeDynamicQuant)
def test_fake_dynamic_quant_conv1d_to_float(fake_dynamic_quant_conv1d):
# Assert that the `float_mod` has correct attributes, values and types
float_mod = fake_dynamic_quant_conv1d.to_float()
assert float_mod.in_channels == fake_dynamic_quant_conv1d.in_channels
assert float_mod.out_channels == fake_dynamic_quant_conv1d.out_channels
assert float_mod.kernel_size == fake_dynamic_quant_conv1d.kernel_size
assert torch.equal(float_mod.weight, fake_dynamic_quant_conv1d.weight_fake_quant(fake_dynamic_quant_conv1d.weight))
assert torch.equal(float_mod.bias, fake_dynamic_quant_conv1d.bias)
|
archai/tests/quantization/test_modules.py/0
|
{
"file_path": "archai/tests/quantization/test_modules.py",
"repo_id": "archai",
"token_count": 2523
}
| 335 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import tempfile
import torch
from transformers import GPT2Config, GPT2LMHeadModel
from archai.trainers.nlp.nvidia_trainer import save_checkpoint
def test_save_checkpoint():
output_dir = tempfile.mkdtemp()
model = GPT2LMHeadModel(config=GPT2Config(vocab_size=1, n_layer=1))
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
scaler = torch.cuda.amp.GradScaler()
trainer_state = {"step": 0}
# Assert that the checkpoint file exists
save_checkpoint(
output_dir=output_dir,
model=model,
optimizer=optimizer,
scheduler=scheduler,
scaler=scaler,
trainer_state=trainer_state,
fp16=False,
save_all_checkpoints=False,
is_best_model=False,
)
checkpoint_path = os.path.join(output_dir, "checkpoint-last.pt")
assert os.path.exists(checkpoint_path)
# Assert that the checkpoint file contains the expected data
checkpoint = torch.load(checkpoint_path)
assert checkpoint["model_config"] == model.config
for key in checkpoint["model_state"]:
assert torch.equal(checkpoint["model_state"][key], model.state_dict()[key])
for key in checkpoint["optimizer_state"]:
assert checkpoint["optimizer_state"][key] == optimizer.state_dict()[key]
for key in checkpoint["scheduler_state"]:
assert checkpoint["scheduler_state"][key] == scheduler.state_dict()[key]
assert checkpoint["scaler_state"] is None
assert checkpoint["trainer_state"] == trainer_state
# Assert that the best model checkpoint file exists
save_checkpoint(
output_dir=output_dir,
model=model,
optimizer=optimizer,
scheduler=scheduler,
scaler=scaler,
trainer_state=trainer_state,
fp16=False,
save_all_checkpoints=False,
is_best_model=True,
)
checkpoint_path = os.path.join(output_dir, "checkpoint-best.pt")
assert os.path.exists(checkpoint_path)
# Assert that the best model checkpoint file contains the expected data
checkpoint = torch.load(checkpoint_path)
assert checkpoint["model_config"] == model.config
for key in checkpoint["model_state"]:
assert torch.equal(checkpoint["model_state"][key], model.state_dict()[key])
for key in checkpoint["optimizer_state"]:
assert checkpoint["optimizer_state"][key] == optimizer.state_dict()[key]
for key in checkpoint["scheduler_state"]:
assert checkpoint["scheduler_state"][key] == scheduler.state_dict()[key]
assert checkpoint["scaler_state"] is None
assert checkpoint["trainer_state"] == trainer_state
|
archai/tests/trainers/nlp/test_nvidia_trainer.py/0
|
{
"file_path": "archai/tests/trainers/nlp/test_nvidia_trainer.py",
"repo_id": "archai",
"token_count": 1049
}
| 336 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from msrest.exceptions import (
ClientException,
ClientRequestError,
AuthenticationError,
)
class AzureDevOpsClientError(ClientException):
pass
class AzureDevOpsAuthenticationError(AuthenticationError):
pass
class AzureDevOpsClientRequestError(ClientRequestError):
pass
class AzureDevOpsServiceError(AzureDevOpsClientRequestError):
"""AzureDevOpsServiceError.
"""
def __init__(self, wrapped_exception):
self.inner_exception = None
if wrapped_exception.inner_exception is not None:
self.inner_exception = AzureDevOpsServiceError(wrapped_exception.inner_exception)
super(AzureDevOpsServiceError, self).__init__(message=wrapped_exception.message,
inner_exception=self.inner_exception)
self.message = wrapped_exception.message
self.exception_id = wrapped_exception.exception_id
self.type_name = wrapped_exception.type_name
self.type_key = wrapped_exception.type_key
self.error_code = wrapped_exception.error_code
self.event_id = wrapped_exception.event_id
self.custom_properties = wrapped_exception.custom_properties
|
azure-devops-python-api/azure-devops/azure/devops/exceptions.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/exceptions.py",
"repo_id": "azure-devops-python-api",
"token_count": 507
}
| 337 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.