text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
import subprocess
import argparse
import os, sys, time
import torch
from parameters16g_es_corpusb import *
## Xinyu: Copied from run_hf.sh, remember to change CUDA_VISIBLE_DEVICES & nproc_per_node
# --model_name_or_path $1 \
# --output_dir $2 \
# --data_dir $3 \
# --train_file $4 \
# --validation_file $5 \
# --pad_to_max_length\
# --per_device_train_batch_size $6 \
# --gradient_accumulation_steps $7 \
# --learning_rate $8 \
# --num_train_epochs $9 \
# --seed ${10} \
# --remove_unused_columns False \
# --num_beams 4 \
# --save_strategy epoch\
# --evaluation_strategy no \
# --logging_steps 200 \
# --max_train_samples ${11} \
# --max_predict_samples ${12} \
# --predict_with_generate \
# --do_predict ${13} \
# --test_file ${14} \
# --do_eval False \
# --do_train ${15} \
# --prediction_mode ${16} \
# --overwrite_cache\
# --overwrite_output_dir
sh_parameters_gen = {
1: None,
2: None,
3: corpus_dir,
4: gen_train_iter_file,
5: gen_val_src_file,
6: gen_per_device_train_batch_size,
7: gen_gradient_accumulation_steps,
8: gen_learning_rate,
9: 1, # num of epoch by default to 1
10: None,
11: gen_train_samples_per_iter,
12: None,
13: None,
14: None,
15: None,
16: None,
17:None,
18:None,
19:gen_per_device_eval_batch_size
}
def print_cmd_launch(cmd, iter, info):
print(f"\n\n\n================= iter {iter} ============================")
print(f"{info}\n")
print(f"Launching cmd:\n{cmd}\n")
print(f"========================================================\n\n\n")
def run_command(bash_command):
try:
process = subprocess.Popen(bash_command.split())
process.wait()
output, error = process.communicate()
print(error)
print(output)
except Exception as e:
exit()
def build_step0_cmd(gen_model_path,alpha):
sh_parameters_gen[1] = gen_model_path
sh_parameters_gen[2] = gen_output_dir # inference only
sh_parameters_gen[4] = gen_train_src_toy_file
sh_parameters_gen[10] = int(time.time())
sh_parameters_gen[12] = gen_train_samples_per_iter
sh_parameters_gen[13] = True
sh_parameters_gen[14] = gen_train_src_file
sh_parameters_gen[15] = False
sh_parameters_gen[16] = "gen"
sh_parameters_gen[17] = gen_num_beams
sh_parameters_gen[18] = alpha
return f"sh run_hf.sh {' '.join([str(v) for v in sh_parameters_gen.values()])}"
def build_step1_cmd(gen_model_path,alpha):
sh_parameters_gen[1] = gen_model_path
sh_parameters_gen[2] = gen_output_dir # inference only
sh_parameters_gen[4] = gen_train_src_toy_file
sh_parameters_gen[10] = int(time.time())
sh_parameters_gen[12] = ver_train_samples_per_iter
sh_parameters_gen[13] = True
sh_parameters_gen[14] = ver_train_src_file
sh_parameters_gen[15] = False
sh_parameters_gen[16] = "ver"
sh_parameters_gen[18] = alpha
return f"sh run_hf.sh {' '.join([str(v) for v in sh_parameters_gen.values()])}"
def build_step4_cmd(gen_model_path, gen_save_prefix,alpha):
sh_parameters_gen[1] = gen_model_path
sh_parameters_gen[2] = os.path.join(gen_output_dir, gen_save_prefix)
sh_parameters_gen[4] = gen_train_iter_file
sh_parameters_gen[10] = int(time.time())
sh_parameters_gen[12] = gen_train_samples_per_iter
sh_parameters_gen[13] = False
sh_parameters_gen[14] = gen_train_src_file
sh_parameters_gen[15] = True
sh_parameters_gen[16] = "gen"
sh_parameters_gen[18] = alpha
return f"sh run_hf.sh {' '.join([str(v) for v in sh_parameters_gen.values()])}"
gen_save_prefix, gen_model_path, ver_save_prefix, ver_model_path = None, None, None, None
if __name__ == "__main__":
for i in range(1, max_iter+1):
gen_load_prefix, gen_save_prefix = f"gen_iter_{i - 1}", f"gen_iter_{i}"
gen_model_path = initial_gen_path if i == 1 else os.path.join(gen_output_dir, gen_load_prefix)
## 0. Generaotr do self-sampling (SS-corpus) --> "gen_train_iter_unlabeled.jsonl"
step0_cmd = build_step0_cmd(gen_model_path,gan_alpha)
# print_cmd_launch(step0_cmd, i, "STEP 0"); run_command(step0_cmd)
## 1. Gen create GAN-VER-corpus --> "ver_train_iter_unlabled.jsonl"
step1_cmd = build_step1_cmd(gen_model_path,gan_alpha)
print_cmd_launch(step1_cmd, i, "STEP 1"); run_command(step1_cmd)
## 2. NLI label GAN Ver-Train-corpus --> "ver_train_iter.jsonl"
cmd_nli = f"sh run_nli_es.sh" ### Xinyu: Adjust its inference bsz
#print_cmd_launch(cmd_nli, i, "STEP 2"); run_command(cmd_nli)
## 3. Train Ver on GAN Ver-train-coprus & Label SS-corpus --> "gen_train_iter.jsonl"
ver_load_prefix, ver_save_prefix = f"ver_iter_{i - 1}", f"ver_iter_{i}"
ver_model_path = initial_ver_path if i == 1 else os.path.join(ver_output_dir, ver_load_prefix)
ver_save_path = os.path.join(ver_output_dir, ver_save_prefix)
cmd_ver = f"sh run_ver_es.sh {ver_model_path} {ver_save_path}"
print_cmd_launch(cmd_ver, i, "STEP 3"); run_command(cmd_ver)
## 4. Train Gen on labeled SS-corpus & create SS-corpus for next iteration --> "gen_train_iter_unlabled.jsonl"
step4_cmd = build_step4_cmd(gen_model_path, gen_save_prefix,gan_alpha) # notice we will not do prediction here because file format misalgins
print_cmd_launch(step4_cmd, i, "STEP 4"); run_command(step4_cmd)
|
ContextualSP/logigan/pre-training/launcher_es.py/0
|
{
"file_path": "ContextualSP/logigan/pre-training/launcher_es.py",
"repo_id": "ContextualSP",
"token_count": 2366
}
| 243 |
(('Who', '?x#a#ns:people.person'), 51406)
(('star', '?x#ns:film.actor.film/ns:film.performance.film#M'), 48417)
(('writer', '?x#ns:film.writer.film#M'), 47417)
(('editor', '?x#ns:film.editor.film#M'), 46570)
(('cinematographer', '?x#ns:film.cinematographer.film#M'), 46418)
(('produced', '?x#ns:film.film.produced_by|ns:film.film.production_companies#?x'), 43642)
(('directed', '?x#ns:film.film.directed_by#?x'), 42819)
(('written', '?x#ns:film.film.written_by#?x'), 41922)
(('edited', '?x#ns:film.film.edited_by#?x'), 41921)
(('producer', '?x#ns:film.producer.film|ns:film.production_company.films#M'), 40972)
(('director', '?x#ns:film.director.film#M'), 37057)
(('film', '?x#a#ns:film.film'), 34118)
(('Was M', '?x#is#M'), 32809)
(('person', '?x#a#ns:people.person'), 32496)
(('female', '?x#ns:people.person.gender#ns:m.02zsn'), 32330)
(('male', '?x#ns:people.person.gender#ns:m.05zppz'), 31206)
(('influenced', '?x#ns:influence.influence_node.influenced_by#M'), 27499)
(('screenwriter', '?x#a#ns:film.writer'), 24674)
(('spouse', '?x#ns:people.person.spouse_s/ns:people.marriage.spouse|ns:fictional_universe.fictional_character.married_to/ns:fictional_universe.marriage_of_fictional_characters.spouses#?x'), 24555)
(('sibling', '?x#ns:people.person.sibling_s/ns:people.sibling_relationship.sibling|ns:fictional_universe.fictional_character.siblings/ns:fictional_universe.sibling_relationship_of_fictional_characters.siblings#?x'), 23428)
(('actor', '?x#a#ns:film.actor'), 22955)
(('produced', '?x#ns:film.film.executive_produced_by#?x'), 22780)
(('cinematographer', '?x#a#ns:film.cinematographer'), 22511)
(('executive', '?x#ns:film.producer.films_executive_produced#M'), 20925)
(('influenced', '?x#ns:influence.influence_node.influenced_by#?x'), 20887)
(('art', '?x#ns:film.film_art_director.films_art_directed#M'), 20259)
(('parent', '?x#ns:people.person.children|ns:fictional_universe.fictional_character.children|ns:organization.organization.child/ns:organization.organization_relationship.child#?x'), 20191)
(('art director', '?x#ns:film.film_art_director.films_art_directed#M'), 20068)
(('Were M', '?x#is#M'), 19401)
(('executive produced', '?x#ns:film.film.executive_produced_by#?x'), 19198)
(('child', '?x#ns:people.person.parents|ns:fictional_universe.fictional_character.parents|ns:organization.organization.parent/ns:organization.organization_relationship.parent#?x'), 19088)
(('employed', '?x#ns:people.person.employment_history/ns:business.employment_tenure.company#M'), 18797)
(('sibling', '?x#ns:people.person.sibling_s/ns:people.sibling_relationship.sibling|ns:fictional_universe.fictional_character.siblings/ns:fictional_universe.sibling_relationship_of_fictional_characters.siblings#M'), 17879)
(('spouse', '?x#ns:people.person.spouse_s/ns:people.marriage.spouse|ns:fictional_universe.fictional_character.married_to/ns:fictional_universe.marriage_of_fictional_characters.spouses#M'), 17857)
(('founder', '?x#ns:organization.organization_founder.organizations_founded#M'), 17771)
(('marry', '?x#ns:people.person.spouse_s/ns:people.marriage.spouse|ns:fictional_universe.fictional_character.married_to/ns:fictional_universe.marriage_of_fictional_characters.spouses#M'), 17691)
(('executive producer', '?x#ns:film.producer.films_executive_produced#M'), 17685)
(('employee', '?x#ns:people.person.employment_history/ns:business.employment_tenure.company#M'), 16601)
(('Did M', '?x#is#M'), 16421)
(('costume designer', '?x#ns:film.film_costumer_designer.costume_design_for_film#M'), 16040)
(('French', '?x#ns:people.person.nationality#ns:m.0f8l9c'), 15216)
(('marry', '?x#ns:people.person.spouse_s/ns:people.marriage.spouse|ns:fictional_universe.fictional_character.married_to/ns:fictional_universe.marriage_of_fictional_characters.spouses#?x'), 15041)
(('film producer', '?x#a#ns:film.producer'), 14742)
(('edited', '?x#ns:film.film.edited_by#M'), 14224)
(('Canadian', '?x#ns:people.person.nationality#ns:m.0d060g'), 14202)
(('written', '?x#ns:film.film.written_by#M'), 13856)
(('directed', '?x#ns:film.film.directed_by#M'), 13619)
(('Italian', '?x#ns:people.person.nationality#ns:m.03rjj'), 13458)
(('married', '?x#ns:people.person.spouse_s/ns:people.marriage.spouse|ns:fictional_universe.fictional_character.married_to/ns:fictional_universe.marriage_of_fictional_characters.spouses#M'), 13342)
(('write', '?x#ns:film.writer.film#M'), 12890)
(('actor', '?x#ns:film.actor.film/ns:film.performance.character#M'), 12864)
(('British', '?x#ns:people.person.nationality#ns:m.07ssc'), 12582)
(('Spanish', '?x#ns:people.person.nationality#ns:m.06mkj'), 12174)
(('Japanese', '?x#ns:people.person.nationality#ns:m.03_3d'), 12024)
(('film editor', '?x#a#ns:film.editor'), 11782)
(('parent', '?x#ns:people.person.children|ns:fictional_universe.fictional_character.children|ns:organization.organization.child/ns:organization.organization_relationship.child#M'), 11670)
(('film director', '?x#a#ns:film.director'), 11669)
(('founder', '?x#ns:organization.organization_founder.organizations_founded#?x'), 11610)
(('child', '?x#ns:people.person.parents|ns:fictional_universe.fictional_character.parents|ns:organization.organization.parent/ns:organization.organization_relationship.parent#M'), 11413)
(('Chinese', '?x#ns:people.person.nationality#ns:m.0d05w3'), 11400)
(('founded', '?x#ns:organization.organization.founders#M'), 11340)
(('founded', '?x#ns:organization.organization.founders#?x'), 11234)
(('prequel', '?x#ns:film.film.sequel#M'), 10983)
(('Mexican', '?x#ns:people.person.nationality#ns:m.0b90_r'), 10964)
(('edited', '?x#ns:film.editor.film#M'), 10959)
(('employee', '?x#ns:people.person.employment_history/ns:business.employment_tenure.company#?x'), 10918)
(('character', '?x#a#ns:fictional_universe.fictional_character'), 10806)
(('Swedish', '?x#ns:people.person.nationality#ns:m.0d0vqn'), 10620)
(('German', '?x#ns:people.person.nationality#ns:m.0345h'), 10586)
(('company', '?x#a#ns:business.employer'), 10367)
(('costume', '?x#ns:film.film_costumer_designer.costume_design_for_film#M'), 10309)
(('edit', '?x#ns:film.editor.film#M'), 10139)
(('American', '?x#ns:people.person.nationality#ns:m.09c7w0'), 9988)
(('influenced', '?x#ns:influence.influence_node.influenced#M'), 9940)
(('sequel', '?x#ns:film.film.prequel#M'), 9878)
(('influence', '?x#ns:influence.influence_node.influenced#M'), 9772)
(('produce', '?x#ns:film.producer.film|ns:film.production_company.films#M'), 9471)
(('art', '?x#a#ns:film.film_art_director'), 9069)
(('art director', '?x#a#ns:film.film_art_director'), 9058)
(('Dutch', '?x#ns:people.person.nationality#ns:m.059j2'), 8942)
(('costume designer', '?x#a#ns:film.film_costumer_designer'), 8890)
(('influence', '?x#ns:influence.influence_node.influenced#?x'), 8794)
(('produced', '?x#ns:film.film.produced_by|ns:film.film.production_companies#M'), 8739)
(('founded', '?x#ns:organization.organization_founder.organizations_founded#M'), 8675)
(('star', '?x#ns:film.film.starring/ns:film.performance.actor#?x'), 8507)
(('married', '?x#ns:people.person.spouse_s/ns:people.marriage.spouse|ns:fictional_universe.fictional_character.married_to/ns:fictional_universe.marriage_of_fictional_characters.spouses#?x'), 8431)
(('executive produced', '?x#ns:film.film.executive_produced_by#M'), 8394)
(('producer', '?x#a#ns:film.producer'), 8055)
(('directed', '?x#ns:film.director.film#M'), 8032)
(('direct', '?x#ns:film.director.film#M'), 7955)
(('wrote', '?x#ns:film.writer.film#M'), 7944)
(('editor', '?x#a#ns:film.editor'), 7763)
(('employer', '?x#ns:business.employer.employees/ns:business.employment_tenure.person#?x'), 7666)
(('producer', '?x#ns:film.producer.films_executive_produced#M'), 7591)
(('designer', '?x#ns:film.film_costumer_designer.costume_design_for_film#M'), 6901)
(('star', '?x#ns:film.actor.film/ns:film.performance.film#?x'), 6900)
(('film', '?x#a#ns:film.producer'), 6743)
(('influenced', '?x#ns:influence.influence_node.influenced#?x'), 6605)
(('director', '?x#a#ns:film.director'), 6509)
(('cinematographer', '?x#ns:film.cinematographer.film#?x'), 6406)
(('producer', '?x#ns:film.producer.film|ns:film.production_company.films#?x'), 6350)
(('editor', '?x#ns:film.editor.film#?x'), 6185)
(('executive produce', '?x#ns:film.producer.films_executive_produced#M'), 6182)
(('distributed', '?x#ns:film.film.distributors/ns:film.film_film_distributor_relationship.distributor#M'), 5837)
(('executive produced', '?x#ns:film.producer.films_executive_produced#M'), 5800)
(('employed', '?x#ns:people.person.employment_history/ns:business.employment_tenure.company#?x'), 5711)
(('produced', '?x#ns:film.film.executive_produced_by#M'), 5620)
(('actor', '?x#ns:film.actor.film/ns:film.performance.character#?x'), 5594)
(('employ', '?x#ns:business.employer.employees/ns:business.employment_tenure.person#M'), 5566)
(('writer', '?x#ns:film.writer.film#?x'), 5528)
(('employ', '?x#ns:business.employer.employees/ns:business.employment_tenure.person#?x'), 5464)
(('influence', '?x#ns:influence.influence_node.influenced_by#M'), 5284)
(('film', '?x#a#ns:film.director'), 5189)
(('acquired', '?x#ns:organization.organization.acquired_by/ns:business.acquisition.acquiring_company#M'), 5104)
(('designer', '?x#a#ns:film.film_costumer_designer'), 5018)
(('distributor', '?x#ns:film.film_distributor.films_distributed/ns:film.film_film_distributor_relationship.film#M'), 5017)
(('produced', '?x#ns:film.producer.film|ns:film.production_company.films#M'), 4999)
(('played', '?x#ns:film.actor.film/ns:film.performance.character#M'), 4986)
(('star', '?x#ns:film.film.starring/ns:film.performance.actor#M'), 4772)
(('starred', '?x#ns:film.actor.film/ns:film.performance.film#M'), 4719)
(('distributor', '?x#a#ns:film.film_distributor'), 4647)
(('production', '?x#a#ns:film.production_company'), 4645)
(('film distributor', '?x#a#ns:film.film_distributor'), 4638)
(('production company', '?x#a#ns:film.production_company'), 4629)
(('employer', '?x#ns:business.employer.employees/ns:business.employment_tenure.person#M'), 4563)
(('director', '?x#ns:film.director.film#?x'), 4288)
(('film', '?x#a#ns:film.editor'), 4063)
(('found', '?x#ns:organization.organization_founder.organizations_founded#M'), 4063)
(('costume', '?x#a#ns:film.film_costumer_designer'), 3873)
(('distributed', '?x#ns:film.film.distributors/ns:film.film_film_distributor_relationship.distributor#?x'), 3542)
(('director', '?x#ns:film.film_art_director.films_art_directed#M'), 3258)
(('play', '?x#ns:film.actor.film/ns:film.performance.character#M'), 3226)
(('write', '?x#ns:film.film.written_by#?x'), 3077)
(('written', '?x#ns:film.writer.film#M'), 2986)
(('art', '?x#ns:film.film_art_director.films_art_directed#?x'), 2985)
(('employ', '?x#ns:people.person.employment_history/ns:business.employment_tenure.company#M'), 2978)
(('art director', '?x#ns:film.film_art_director.films_art_directed#?x'), 2918)
(('produce', '?x#ns:film.film.produced_by|ns:film.film.production_companies#?x'), 2907)
(('acquire', '?x#ns:organization.organization.companies_acquired/ns:business.acquisition.company_acquired#M'), 2882)
(('direct', '?x#ns:film.film.directed_by#?x'), 2825)
(('executive produce', '?x#ns:film.film.executive_produced_by#?x'), 2820)
(('executive producer', '?x#ns:film.producer.films_executive_produced#?x'), 2777)
(('edit', '?x#ns:film.film.edited_by#?x'), 2698)
(('influence', '?x#ns:influence.influence_node.influenced_by#?x'), 2598)
(('executive', '?x#ns:film.producer.films_executive_produced#?x'), 2572)
(('found', '?x#ns:organization.organization.founders#M'), 2518)
(('founded', '?x#ns:organization.organization_founder.organizations_founded#?x'), 2517)
(('executive', '?x#ns:film.film.executive_produced_by#M'), 2479)
(('write', '?x#ns:film.writer.film#?x'), 2455)
(('costume designer', '?x#ns:film.film_costumer_designer.costume_design_for_film#?x'), 2334)
(('nationality', '?x#^ns:people.person.nationality#?x'), 2291)
(('costume', '?x#ns:film.film_costumer_designer.costume_design_for_film#?x'), 2251)
(('write', '?x#ns:film.film.written_by#M'), 2185)
(('produce', '?x#ns:film.producer.film|ns:film.production_company.films#?x'), 2116)
(('gender', '?x#^ns:people.person.gender#?x'), 2001)
(('prequel', '?x#ns:film.film.sequel#?x'), 1956)
(('found', '?x#ns:organization.organization_founder.organizations_founded#?x'), 1939)
(('acquired', '?x#ns:organization.organization.acquired_by/ns:business.acquisition.acquiring_company#?x'), 1918)
(('edit', '?x#ns:film.editor.film#?x'), 1884)
(('executive produce', '?x#ns:film.film.executive_produced_by#M'), 1837)
(('direct', '?x#ns:film.director.film#?x'), 1790)
(('executive', '?x#ns:film.film.executive_produced_by#?x'), 1758)
(('sequel', '?x#ns:film.film.prequel#?x'), 1702)
(('produce', '?x#ns:film.producer.films_executive_produced#M'), 1663)
(('acquire', '?x#ns:organization.organization.companies_acquired/ns:business.acquisition.company_acquired#?x'), 1620)
(('employed', '?x#ns:business.employer.employees/ns:business.employment_tenure.person#M'), 1620)
(('distribute', '?x#ns:film.film.distributors/ns:film.film_film_distributor_relationship.distributor#M'), 1592)
(('executive produce', '?x#ns:film.producer.films_executive_produced#?x'), 1555)
(('edit', '?x#ns:film.film.edited_by#M'), 1542)
(('starred', '?x#ns:film.film.starring/ns:film.performance.actor#M'), 1287)
(('direct', '?x#ns:film.film.directed_by#M'), 1195)
(('acquire', '?x#ns:organization.organization.acquired_by/ns:business.acquisition.acquiring_company#M'), 1186)
(('acquired', '?x#ns:organization.organization.companies_acquired/ns:business.acquisition.company_acquired#M'), 1180)
(('distributed', '?x#ns:film.film_distributor.films_distributed/ns:film.film_film_distributor_relationship.film#M'), 1145)
(('producer', '?x#ns:film.producer.films_executive_produced#?x'), 1131)
(('found', '?x#ns:organization.organization.founders#?x'), 1122)
(('employed', '?x#ns:business.employer.employees/ns:business.employment_tenure.person#?x'), 1104)
(('film director', '?x#ns:film.director.film#M'), 1096)
(('produce', '?x#ns:film.film.produced_by|ns:film.film.production_companies#M'), 1056)
(('nationality', '?x#^ns:people.person.nationality#M'), 1011)
(('wrote', '?x#ns:film.writer.film#?x'), 902)
(('distribute', '?x#ns:film.film_distributor.films_distributed/ns:film.film_film_distributor_relationship.film#M'), 899)
(('distributor', '?x#ns:film.film_distributor.films_distributed/ns:film.film_film_distributor_relationship.film#?x'), 813)
(('country', '?x#^ns:people.person.nationality#?x'), 779)
(('produced', '?x#ns:film.producer.films_executive_produced#M'), 770)
(('country of nationality', '?x#^ns:people.person.nationality#?x'), 748)
(('directed', '?x#ns:film.director.film#?x'), 643)
(('starred', '?x#ns:film.film.starring/ns:film.performance.actor#?x'), 643)
(('founder', '?x#ns:organization.organization.founders#?x'), 634)
(('edited', '?x#ns:film.editor.film#?x'), 631)
(('employee', '?x#ns:business.employer.employees/ns:business.employment_tenure.person#?x'), 622)
(('employ', '?x#ns:people.person.employment_history/ns:business.employment_tenure.company#?x'), 601)
(('produced', '?x#ns:film.producer.film|ns:film.production_company.films#?x'), 596)
(('executive produced', '?x#ns:film.producer.films_executive_produced#?x'), 520)
(('country', '?x#^ns:people.person.nationality#M'), 514)
(('distribute', '?x#ns:film.film_distributor.films_distributed/ns:film.film_film_distributor_relationship.film#?x'), 500)
(('employer', '?x#ns:people.person.employment_history/ns:business.employment_tenure.company#?x'), 476)
(('child', '?x#ns:people.person.children|ns:fictional_universe.fictional_character.children|ns:organization.organization.child/ns:organization.organization_relationship.child#?x'), 420)
(('acquired', '?x#ns:organization.organization.companies_acquired/ns:business.acquisition.company_acquired#?x'), 412)
(('parent', '?x#ns:people.person.parents|ns:fictional_universe.fictional_character.parents|ns:organization.organization.parent/ns:organization.organization_relationship.parent#?x'), 401)
(('acquire', '?x#ns:organization.organization.acquired_by/ns:business.acquisition.acquiring_company#?x'), 372)
(('distribute', '?x#ns:film.film.distributors/ns:film.film_film_distributor_relationship.distributor#?x'), 354)
(('distributed', '?x#ns:film.film_distributor.films_distributed/ns:film.film_film_distributor_relationship.film#?x'), 237)
(('cinematographer', '?x#ns:film.film.cinematography#?x'), 187)
(('gender', '?x#^ns:people.person.gender#M'), 163)
(('writer', '?x#ns:film.film.written_by#?x'), 138)
(('starred', '?x#ns:film.actor.film/ns:film.performance.film#?x'), 134)
(('film', '?x#ns:film.director.film#M'), 132)
(('sequel', '?x#ns:film.film.sequel#?x'), 126)
(('country', '?x#ns:business.employer.employees/ns:business.employment_tenure.person#M'), 125)
(('film editor', '?x#ns:film.editor.film#M'), 125)
(('country of nationality', '?x#^ns:people.person.nationality#M'), 118)
(('wrote', '?x#ns:film.film.written_by#M'), 102)
(('art director', '?x#ns:film.film.film_art_direction_by#?x'), 96)
(('art', '?x#ns:film.film.film_art_direction_by#?x'), 95)
(('prequel', '?x#ns:film.film.prequel#?x'), 86)
(('country', '?x#ns:people.person.nationality#?x'), 85)
(('director', '?x#ns:film.film.directed_by#?x'), 84)
(('film producer', '?x#ns:film.producer.film|ns:film.production_company.films#M'), 80)
(('editor', '?x#ns:film.film.edited_by#?x'), 75)
(('employer employ', '?x#ns:business.employer.employees/ns:business.employment_tenure.person#M'), 74)
(('film director direct', '?x#ns:film.director.film#M'), 70)
(('executive producer executive produce', '?x#ns:film.producer.films_executive_produced#M'), 68)
(('producer produce', '?x#ns:film.producer.film|ns:film.production_company.films#M'), 66)
(('director', '?x#ns:film.film_art_director.films_art_directed#?x'), 64)
(('writer write', '?x#ns:film.writer.film#M'), 62)
(('executive producer', '?x#ns:film.film.executive_produced_by#?x'), 57)
(('costume designer', '?x#ns:film.film.costume_design_by#?x'), 55)
(('film director', '?x#ns:film.director.film#?x'), 52)
(('film director directed', '?x#ns:film.director.film#M'), 52)
(('played', '?x#ns:film.actor.film/ns:film.performance.character#?x'), 47)
(('nationality', '?x#ns:people.person.nationality#?x'), 47)
(('designer', '?x#ns:film.film_costumer_designer.costume_design_for_film#?x'), 45)
(('written', '?x#ns:film.film.directed_by#M'), 44)
(('director direct', '?x#ns:film.director.film#M'), 43)
(('written', '?x#ns:film.writer.film#?x'), 40)
(('produced', '?x#ns:film.producer.films_executive_produced#?x'), 39)
(('producer', '?x#ns:film.film_distributor.films_distributed/ns:film.film_film_distributor_relationship.film#M'), 37)
(('nationality employ', '?x#^ns:people.person.nationality#?x'), 32)
(('founder found', '?x#ns:organization.organization_founder.organizations_founded#M'), 31)
(('editor edit', '?x#ns:film.editor.film#M'), 31)
(('nationality employed', '?x#ns:people.person.nationality#?x'), 30)
(('distributor', '?x#ns:film.film.distributors/ns:film.film_film_distributor_relationship.distributor#?x'), 29)
(('film distributor', '?x#ns:film.film_distributor.films_distributed/ns:film.film_film_distributor_relationship.film#M'), 28)
(('wrote', '?x#ns:film.film.written_by#?x'), 24)
(('actor played', '?x#ns:film.actor.film/ns:film.performance.character#M'), 24)
(('film editor', '?x#ns:film.editor.film#?x'), 21)
(('producer executive produce', '?x#ns:film.producer.films_executive_produced#M'), 21)
(('employed', '?x#ns:organization.organization.founders#M'), 21)
(('producer direct', '?x#ns:film.producer.film|ns:film.production_company.films#M'), 20)
(('art director , director', '?x#ns:film.film_art_director.films_art_directed#M'), 20)
(('distributor employ', '?x#ns:film.film_distributor.films_distributed/ns:film.film_film_distributor_relationship.film#M'), 20)
(('executive producer produce', '?x#ns:film.producer.films_executive_produced#M'), 20)
(('founder', '?x#ns:people.person.employment_history/ns:business.employment_tenure.company#M'), 19)
(('employer', '?x#ns:organization.organization.founders#M'), 19)
(('film', '?x#ns:film.actor.film/ns:film.performance.film#M'), 18)
(('executive producer executive', '?x#ns:film.producer.films_executive_produced#M'), 17)
(('director', '?x#a#ns:film.film_art_director'), 15)
(('employ', '?x#ns:film.film_distributor.films_distributed/ns:film.film_film_distributor_relationship.film#M'), 15)
(('employer employ', '?x#ns:business.employer.employees/ns:business.employment_tenure.person#?x'), 14)
(('employer distribute', '?x#ns:film.film_distributor.films_distributed/ns:film.film_film_distributor_relationship.film#M'), 13)
(('founder founded', '?x#ns:organization.organization.founders#?x'), 13)
|
ContextualSP/poset_decoding/data/phrase_table/0
|
{
"file_path": "ContextualSP/poset_decoding/data/phrase_table",
"repo_id": "ContextualSP",
"token_count": 8291
}
| 244 |
# .coveragerc to control coverage.py
[report]
# regrexes for lines to exclude from consideration
exclude_lines =
if __name__ == .__main__.:
ValueError
TypeError
NotImplementedError
omit =
matchzoo/__init__.py
matchzoo/version.py
matchzoo/*/__init__.py
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/.coveragerc/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/.coveragerc",
"repo_id": "ContextualSP",
"token_count": 97
}
| 245 |
.. MatchZoo documentation master file, created by
sphinx-quickstart on Mon May 28 16:40:41 2018.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to MatchZoo's documentation!
====================================
.. image:: https://travis-ci.org/NTMC-Community/MatchZoo-py.svg?branch=master
:alt: ci
:target: https://travis-ci.org/NTMC-Community/MatchZoo-py/
.. image:: ../../artworks/matchzoo-logo.png
:alt: logo
:align: center
MatchZoo is a toolkit for text matching. It was developed with a focus on facilitating the designing, comparing and sharing of deep text matching models. There are a number of deep matching methods, such as DRMM, MatchPyramid, MV-LSTM, aNMM, DUET, ARC-I, ARC-II, DSSM, and CDSSM, designed with a unified interface. Potential tasks related to MatchZoo include document retrieval, question answering, conversational response ranking, paraphrase identification, etc. We are always happy to receive any code contributions, suggestions, comments from all our MatchZoo users.
.. toctree::
:maxdepth: 2
:caption: Contents:
modules
model_reference
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/docs/source/index.rst/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/docs/source/index.rst",
"repo_id": "ContextualSP",
"token_count": 377
}
| 246 |
from .lambda_callback import LambdaCallback
from .histogram import Histogram
from .ngram import Ngram
from .padding import BasicPadding
from .padding import DRMMPadding
from .padding import BertPadding
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/dataloader/callbacks/__init__.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/dataloader/callbacks/__init__.py",
"repo_id": "ContextualSP",
"token_count": 50
}
| 247 |
"""Matchzoo toolkit for token embedding."""
import csv
import typing
import numpy as np
import pandas as pd
import matchzoo as mz
class Embedding(object):
"""
Embedding class.
Examples::
>>> import matchzoo as mz
>>> train_raw = mz.datasets.toy.load_data()
>>> pp = mz.preprocessors.NaivePreprocessor()
>>> train = pp.fit_transform(train_raw, verbose=0)
>>> vocab_unit = mz.build_vocab_unit(train, verbose=0)
>>> term_index = vocab_unit.state['term_index']
>>> embed_path = mz.datasets.embeddings.EMBED_RANK
To load from a file:
>>> embedding = mz.embedding.load_from_file(embed_path)
>>> matrix = embedding.build_matrix(term_index)
>>> matrix.shape[0] == len(term_index)
True
To build your own:
>>> data = {'A':[0, 1], 'B':[2, 3]}
>>> embedding = mz.Embedding(data, 2)
>>> matrix = embedding.build_matrix({'A': 2, 'B': 1, '_PAD': 0})
>>> matrix.shape == (3, 2)
True
"""
def __init__(self, data: dict, output_dim: int):
"""
Embedding.
:param data: Dictionary to use as term to vector mapping.
:param output_dim: The dimension of embedding.
"""
self._data = data
self._output_dim = output_dim
def build_matrix(
self,
term_index: typing.Union[
dict, mz.preprocessors.units.Vocabulary.TermIndex]
) -> np.ndarray:
"""
Build a matrix using `term_index`.
:param term_index: A `dict` or `TermIndex` to build with.
:param initializer: A callable that returns a default value for missing
terms in data. (default: a random uniform distribution in range)
`(-0.2, 0.2)`).
:return: A matrix.
"""
input_dim = len(term_index)
matrix = np.empty((input_dim, self._output_dim))
valid_keys = self._data.keys()
for term, index in term_index.items():
if term in valid_keys:
matrix[index] = self._data[term]
else:
matrix[index] = np.random.uniform(-0.2, 0.2, size=self._output_dim)
return matrix
def load_from_file(file_path: str, mode: str = 'word2vec') -> Embedding:
"""
Load embedding from `file_path`.
:param file_path: Path to file.
:param mode: Embedding file format mode, one of 'word2vec', 'fasttext'
or 'glove'.(default: 'word2vec')
:return: An :class:`matchzoo.embedding.Embedding` instance.
"""
embedding_data = {}
output_dim = 0
if mode == 'word2vec' or mode == 'fasttext':
with open(file_path, 'r') as f:
output_dim = int(f.readline().strip().split(' ')[-1])
for line in f:
current_line = line.rstrip().split(' ')
embedding_data[current_line[0]] = current_line[1:]
elif mode == 'glove':
with open(file_path, 'r') as f:
output_dim = len(f.readline().rstrip().split(' ')) - 1
f.seek(0)
for line in f:
current_line = line.rstrip().split(' ')
embedding_data[current_line[0]] = current_line[1:]
else:
raise TypeError(f"{mode} is not a supported embedding type."
f"`word2vec`, `fasttext` or `glove` expected.")
return Embedding(embedding_data, output_dim)
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/embedding/embedding.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/embedding/embedding.py",
"repo_id": "ContextualSP",
"token_count": 1574
}
| 248 |
"""CrossEntropy metric for Classification."""
import numpy as np
from matchzoo.engine.base_metric import ClassificationMetric
from matchzoo.utils import one_hot
class CrossEntropy(ClassificationMetric):
"""Cross entropy metric."""
ALIAS = ['cross_entropy', 'ce']
def __init__(self):
""":class:`CrossEntropy` constructor."""
def __repr__(self) -> str:
""":return: Formated string representation of the metric."""
return f"{self.ALIAS[0]}"
def __call__(
self,
y_true: np.array,
y_pred: np.array,
eps: float = 1e-12
) -> float:
"""
Calculate cross entropy.
Example:
>>> y_true = [0, 1]
>>> y_pred = [[0.25, 0.25], [0.01, 0.90]]
>>> CrossEntropy()(y_true, y_pred)
0.7458274358333028
:param y_true: The ground true label of each document.
:param y_pred: The predicted scores of each document.
:param eps: The Log loss is undefined for p=0 or p=1,
so probabilities are clipped to max(eps, min(1 - eps, p)).
:return: Average precision.
"""
y_pred = np.clip(y_pred, eps, 1. - eps)
y_true = [
one_hot(y, num_classes=y_pred.shape[1]) for y in y_true
]
return -np.sum(y_true * np.log(y_pred + 1e-9)) / y_pred.shape[0]
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/metrics/cross_entropy.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/metrics/cross_entropy.py",
"repo_id": "ContextualSP",
"token_count": 631
}
| 249 |
"""An implementation of DIIN Model."""
import typing
import torch
import torch.nn as nn
from matchzoo import preprocessors
from matchzoo.engine.param_table import ParamTable
from matchzoo.engine.base_callback import BaseCallback
from matchzoo.engine.base_preprocessor import BasePreprocessor
from matchzoo.engine.param import Param
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine import hyper_spaces
from matchzoo.dataloader import callbacks
from matchzoo.modules import CharacterEmbedding, SemanticComposite, Matching, DenseNet
class DIIN(BaseModel):
"""
DIIN model.
Examples:
>>> model = DIIN()
>>> model.params['embedding_input_dim'] = 10000
>>> model.params['embedding_output_dim'] = 300
>>> model.params['mask_value'] = 0
>>> model.params['char_embedding_input_dim'] = 100
>>> model.params['char_embedding_output_dim'] = 8
>>> model.params['char_conv_filters'] = 100
>>> model.params['char_conv_kernel_size'] = 5
>>> model.params['first_scale_down_ratio'] = 0.3
>>> model.params['nb_dense_blocks'] = 3
>>> model.params['layers_per_dense_block'] = 8
>>> model.params['growth_rate'] = 20
>>> model.params['transition_scale_down_ratio'] = 0.5
>>> model.params['conv_kernel_size'] = (3, 3)
>>> model.params['pool_kernel_size'] = (2, 2)
>>> model.params['dropout_rate'] = 0.2
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(
with_embedding=True
)
params.add(Param(name='mask_value', value=0,
desc="The value to be masked from inputs."))
params.add(Param(name='char_embedding_input_dim', value=100,
desc="The input dimension of character embedding layer."))
params.add(Param(name='char_embedding_output_dim', value=8,
desc="The output dimension of character embedding layer."))
params.add(Param(name='char_conv_filters', value=100,
desc="The filter size of character convolution layer."))
params.add(Param(name='char_conv_kernel_size', value=5,
desc="The kernel size of character convolution layer."))
params.add(Param(name='first_scale_down_ratio', value=0.3,
desc="The channel scale down ratio of the convolution layer "
"before densenet."))
params.add(Param(name='nb_dense_blocks', value=3,
desc="The number of blocks in densenet."))
params.add(Param(name='layers_per_dense_block', value=8,
desc="The number of convolution layers in dense block."))
params.add(Param(name='growth_rate', value=20,
desc="The filter size of each convolution layer in dense "
"block."))
params.add(Param(name='transition_scale_down_ratio', value=0.5,
desc="The channel scale down ratio of the convolution layer "
"in transition block."))
params.add(Param(name='conv_kernel_size', value=(3, 3),
desc="The kernel size of convolution layer in dense block."))
params.add(Param(name='pool_kernel_size', value=(2, 2),
desc="The kernel size of pooling layer in transition block."))
params.add(Param(
'dropout_rate', 0.0,
hyper_space=hyper_spaces.quniform(
low=0.0, high=0.8, q=0.01),
desc="The dropout rate."
))
return params
@classmethod
def get_default_preprocessor(
cls,
truncated_mode: str = 'pre',
truncated_length_left: typing.Optional[int] = None,
truncated_length_right: typing.Optional[int] = None,
filter_mode: str = 'df',
filter_low_freq: float = 1,
filter_high_freq: float = float('inf'),
remove_stop_words: bool = False,
ngram_size: typing.Optional[int] = 1,
) -> BasePreprocessor:
"""
Model default preprocessor.
The preprocessor's transform should produce a correctly shaped data
pack that can be used for training.
:return: Default preprocessor.
"""
return preprocessors.BasicPreprocessor(
truncated_mode=truncated_mode,
truncated_length_left=truncated_length_left,
truncated_length_right=truncated_length_right,
filter_mode=filter_mode,
filter_low_freq=filter_low_freq,
filter_high_freq=filter_high_freq,
remove_stop_words=remove_stop_words,
ngram_size=ngram_size
)
@classmethod
def get_default_padding_callback(
cls,
fixed_length_left: int = 10,
fixed_length_right: int = 30,
pad_word_value: typing.Union[int, str] = 0,
pad_word_mode: str = 'pre',
with_ngram: bool = True,
fixed_ngram_length: int = None,
pad_ngram_value: typing.Union[int, str] = 0,
pad_ngram_mode: str = 'pre'
) -> BaseCallback:
"""
Model default padding callback.
The padding callback's on_batch_unpacked would pad a batch of data to
a fixed length.
:return: Default padding callback.
"""
return callbacks.BasicPadding(
fixed_length_left=fixed_length_left,
fixed_length_right=fixed_length_right,
pad_word_value=pad_word_value,
pad_word_mode=pad_word_mode,
with_ngram=with_ngram,
fixed_ngram_length=fixed_ngram_length,
pad_ngram_value=pad_ngram_value,
pad_ngram_mode=pad_ngram_mode
)
def build(self):
"""Build model structure."""
# Embedding
self.embedding = self._make_default_embedding_layer()
self.char_embedding = CharacterEmbedding(
char_embedding_input_dim=self._params['char_embedding_input_dim'],
char_embedding_output_dim=self._params['char_embedding_output_dim'],
char_conv_filters=self._params['char_conv_filters'],
char_conv_kernel_size=self._params['char_conv_kernel_size']
)
self.exact_maching = Matching(matching_type='exact')
all_embed_dim = self._params['embedding_output_dim'] \
+ self._params['char_conv_filters'] + 1
# Encoding
self.left_encoder = SemanticComposite(
all_embed_dim, self._params['dropout_rate'])
self.right_encoder = SemanticComposite(
all_embed_dim, self._params['dropout_rate'])
# Interaction
self.matching = Matching(matching_type='mul')
# Feature Extraction
self.conv = nn.Conv2d(
in_channels=all_embed_dim,
out_channels=int(all_embed_dim * self._params['first_scale_down_ratio']),
kernel_size=1
)
self.dense_net = DenseNet(
in_channels=int(all_embed_dim * self._params['first_scale_down_ratio']),
nb_dense_blocks=self._params['nb_dense_blocks'],
layers_per_dense_block=self._params['layers_per_dense_block'],
growth_rate=self._params['growth_rate'],
transition_scale_down_ratio=self._params['transition_scale_down_ratio'],
conv_kernel_size=self._params['conv_kernel_size'],
pool_kernel_size=self._params['pool_kernel_size']
)
self.max_pooling = nn.AdaptiveMaxPool2d((1, 1))
# Output
self.out_layer = self._make_output_layer(self.dense_net.out_channels)
self.dropout = nn.Dropout(p=self._params['dropout_rate'])
def forward(self, inputs):
"""Forward."""
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# L = `input_word_left` sequence length
# R = `input_word_right` sequence length
# C = word length
# D1 = word embedding size
# D2 = character embedding size
# shape = [B, L]
# shape = [B, R]
input_word_left, input_word_right = inputs['text_left'], inputs['text_right']
mask_word_left = (input_word_left == self._params['mask_value'])
mask_word_right = (input_word_right == self._params['mask_value'])
# shape = [B, L, C]
# shape = [B, R, C]
input_char_left, input_char_right = inputs['ngram_left'], inputs['ngram_right']
# shape = [B, L, D1]
# shape = [B, R, D1]
embed_word_left = self.dropout(self.embedding(input_word_left.long()))
embed_word_right = self.dropout(self.embedding(input_word_right.long()))
# shape = [B, L, D2]
# shape = [B, R, D2]
embed_char_left = self.dropout(self.char_embedding(input_char_left.long()))
embed_char_right = self.dropout(self.char_embedding(input_char_right.long()))
# shape = [B, L, 1]
# shape = [B, R, 1]
exact_match_left, exact_match_right = self.exact_maching(
input_word_left, input_word_right)
exact_match_left = exact_match_left.masked_fill(mask_word_left, 0)
exact_match_right = exact_match_right.masked_fill(mask_word_right, 0)
exact_match_left = torch.unsqueeze(exact_match_left, dim=-1)
exact_match_right = torch.unsqueeze(exact_match_right, dim=-1)
# shape = [B, L, D]
# shape = [B, R, D]
embed_left = torch.cat(
[embed_word_left, embed_char_left, exact_match_left], dim=-1)
embed_right = torch.cat(
[embed_word_right, embed_char_right, exact_match_right], dim=-1)
encode_left = self.left_encoder(embed_left)
encode_right = self.right_encoder(embed_right)
# shape = [B, L, R, D]
interaction = self.matching(encode_left, encode_right)
interaction = self.conv(self.dropout(interaction.permute(0, 3, 1, 2)))
interaction = self.dense_net(interaction)
interaction = self.max_pooling(interaction).squeeze(dim=-1).squeeze(dim=-1)
output = self.out_layer(interaction)
return output
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/models/diin.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/models/diin.py",
"repo_id": "ContextualSP",
"token_count": 4758
}
| 250 |
"""Bert module."""
import typing
import torch
import torch.nn as nn
from pytorch_transformers import BertModel
class BertModule(nn.Module):
"""
Bert module.
BERT (from Google) released with the paper BERT: Pre-training of Deep
Bidirectional Transformers for Language Understanding by Jacob Devlin,
Ming-Wei Chang, Kenton Lee and Kristina Toutanova.
:param mode: String, supported mode can be referred
https://huggingface.co/pytorch-transformers/pretrained_models.html.
"""
def __init__(self, mode: str = 'bert-base-uncased'):
""":class:`BertModule` constructor."""
super().__init__()
self.bert = BertModel.from_pretrained(mode)
def forward(self, x, y):
"""Forward."""
input_ids = torch.cat((x, y), dim=-1)
token_type_ids = torch.cat((
torch.zeros_like(x),
torch.ones_like(y)), dim=-1).long()
attention_mask = (input_ids != 0)
return self.bert(input_ids=input_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask)
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/modules/bert_module.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/modules/bert_module.py",
"repo_id": "ContextualSP",
"token_count": 444
}
| 251 |
"""Wrapper function organizes a number of transform functions."""
import typing
import functools
from .units.unit import Unit
def chain_transform(units: typing.List[Unit]) -> typing.Callable:
"""
Compose unit transformations into a single function.
:param units: List of :class:`matchzoo.StatelessUnit`.
"""
@functools.wraps(chain_transform)
def wrapper(arg):
"""Wrapper function of transformations composition."""
for unit in units:
arg = unit.transform(arg)
return arg
unit_names = ' => '.join(unit.__class__.__name__ for unit in units)
wrapper.__name__ += ' of ' + unit_names
return wrapper
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/chain_transform.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/chain_transform.py",
"repo_id": "ContextualSP",
"token_count": 232
}
| 252 |
import abc
import typing
class Unit(metaclass=abc.ABCMeta):
"""Process unit do not persive state (i.e. do not need fit)."""
@abc.abstractmethod
def transform(self, input_: typing.Any):
"""Abstract base method, need to be implemented in subclass."""
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/units/unit.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/units/unit.py",
"repo_id": "ContextualSP",
"token_count": 91
}
| 253 |
"""Define Keras tensor type."""
import typing
TensorType = typing.Any
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/utils/tensor_type.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/utils/tensor_type.py",
"repo_id": "ContextualSP",
"token_count": 23
}
| 254 |
import torch
import pytest
from matchzoo.modules import Matching
def test_matching():
x = torch.randn(2, 3, 2)
y = torch.randn(2, 4, 2)
z = torch.randn(2, 3, 3)
for matching_type in ['dot', 'mul', 'plus', 'minus', 'concat']:
Matching(matching_type=matching_type)(x, y)
with pytest.raises(ValueError):
Matching(matching_type='error')
with pytest.raises(RuntimeError):
Matching()(x, z)
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tests/modules/test_modules.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tests/modules/test_modules.py",
"repo_id": "ContextualSP",
"token_count": 191
}
| 255 |
<jupyter_start><jupyter_code>%run init.ipynb
preprocessor = mz.models.CDSSM.get_default_preprocessor(
ngram_size = 3
)
train_pack_processed = preprocessor.fit_transform(train_pack_raw)
valid_pack_processed = preprocessor.transform(dev_pack_raw)
test_pack_processed = preprocessor.transform(test_pack_raw)
preprocessor.context
triletter_callback = mz.dataloader.callbacks.Ngram(preprocessor, mode='sum')
trainset = mz.dataloader.Dataset(
data_pack=train_pack_processed,
mode='pair',
num_dup=2,
num_neg=1,
callbacks=[triletter_callback]
)
testset = mz.dataloader.Dataset(
data_pack=test_pack_processed,
callbacks=[triletter_callback]
)
padding_callback = mz.models.CDSSM.get_default_padding_callback(
with_ngram=True,
fixed_ngram_length=preprocessor.context['ngram_vocab_size']
)
trainloader = mz.dataloader.DataLoader(
dataset=trainset,
batch_size=20,
stage='train',
sort=False,
resample=True,
callback=padding_callback
)
testloader = mz.dataloader.DataLoader(
dataset=testset,
batch_size=20,
stage='dev',
sort=False,
callback=padding_callback
)
model = mz.models.CDSSM()
model.params['task'] = ranking_task
model.params['vocab_size'] = preprocessor.context['ngram_vocab_size']
model.params['filters'] = 64
model.params['kernel_size'] = 3
model.params['conv_activation_func'] = 'tanh'
model.params['mlp_num_layers'] = 1
model.params['mlp_num_units'] = 64
model.params['mlp_num_fan_out'] = 64
model.params['mlp_activation_func'] = 'tanh'
model.params['dropout_rate'] = 0.8
model.build()
print(model)
print('Trainable params: ', sum(p.numel() for p in model.parameters() if p.requires_grad))
optimizer = torch.optim.Adadelta(model.parameters())
trainer = mz.trainers.Trainer(
model=model,
optimizer=optimizer,
trainloader=trainloader,
validloader=testloader,
validate_interval=None,
epochs=10
)
trainer.run()<jupyter_output><empty_output>
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tutorials/ranking/cdssm.ipynb/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tutorials/ranking/cdssm.ipynb",
"repo_id": "ContextualSP",
"token_count": 768
}
| 256 |
set seed=1
set config_file=train_configs_bert/concat.none.jsonnet
set model_file=checkpoints_sparc/sparc_bert_concat_none_model
set tables_file=dataset_sparc/tables.json
set database_path=dataset_sparc/database
set dataset_path=dataset_sparc
set train_data_path=dataset_sparc/train.json
set validation_data_path=dataset_sparc/dev.json
allennlp train -s %model_file% %config_file% ^
--include-package dataset_reader.sparc_reader ^
--include-package models.sparc_parser ^
-o {"""model.serialization_dir""":"""%model_file%""","""random_seed""":"""%seed%""","""numpy_seed""":"""%seed%""","""pytorch_seed""":"""%seed%""","""dataset_reader.tables_file""":"""%tables_file%""","""dataset_reader.database_path""":"""%database_path%""","""train_data_path""":"""%train_data_path%""","""validation_data_path""":"""%validation_data_path%""","""model.dataset_path""":"""%dataset_path%"""}
|
ContextualSP/semantic_parsing_in_context/bash_files/windows/train_sparc_bert.bat/0
|
{
"file_path": "ContextualSP/semantic_parsing_in_context/bash_files/windows/train_sparc_bert.bat",
"repo_id": "ContextualSP",
"token_count": 339
}
| 257 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from allennlp.common.util import JsonDict
from allennlp.data import DatasetReader, Instance
from allennlp.models import Model
from allennlp.predictors.predictor import Predictor
from overrides import overrides
@Predictor.register("sparc")
class SparcPredictor(Predictor):
def __init__(self, model: Model, dataset_reader: DatasetReader) -> None:
super().__init__(model, dataset_reader)
@overrides
def predict_json(self, inputs: JsonDict) -> JsonDict:
instance = self._json_to_instance(inputs)
# Now get result
results = self.predict_instance(instance)
return results
@overrides
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
"""
Expects JSON that looks like ``{"question": "...", "table": "..."}``.
"""
utter_list = []
if "interaction" in json_dict:
"""
predict mode
"""
for ins in json_dict["interaction"]:
utter_list.append(ins["utterance"])
else:
"""
demo mode
"""
utter_list = json_dict["question"].split(";")
db_id = json_dict["database_id"]
instance = self._dataset_reader.text_to_instance(utter_list, # type: ignore
db_id)
return instance
|
ContextualSP/semantic_parsing_in_context/predictor/sparc_predictor.py/0
|
{
"file_path": "ContextualSP/semantic_parsing_in_context/predictor/sparc_predictor.py",
"repo_id": "ContextualSP",
"token_count": 650
}
| 258 |
## Introduction
This paper introduces [UniSAr](https://arxiv.org/pdf/2203.07781.pdf), which extends existing autoregressive language models to incorporate three non-invasive extensions to make them structure-aware:
(1) adding structure mark to encode database schema, conversation context, and their relationships;
(2) constrained decoding to decode well structured SQL for a given database schema; and
(3) SQL completion to complete potential missing JOIN relationships in SQL based on database schema.
[//]: # (On seven well-known text-to-SQL datasets covering multi-domain, multi-table and multi-turn, UniSAr demonstrates highly comparable or better performance to the most advanced specifically-designed text-to-SQL models.)
## Dataset and Model
[Spider](https://github.com/taoyds/spider) -> `./data/spider`
[Fine-tuned BART model](https://huggingface.co/dreamerdeo/mark-bart/tree/main) -> `./models/spider_sl`
(Please download this model by `git-lfs` to avoid the [issue](https://github.com/DreamerDeo/UniSAr_text2sql/issues/1).)
```angular2html
sudo apt-get install git-lfs
git lfs install
git clone https://huggingface.co/dreamerdeo/mark-bart
```
## Main dependencies
* Python version >= 3.6
* PyTorch version >= 1.5.0
* `pip install -r requirements.txt`
* fairseq is going though changing without backward compatibility. Install `fairseq` from source and use [this](https://github.com/nicola-decao/fairseq/tree/fixing_prefix_allowed_tokens_fn) commit for reproducibilty. See [here](https://github.com/pytorch/fairseq/pull/3276) for the current PR that should fix `fairseq/master`.
## Evaluation Pipeline
Step 1: Preprocess via adding schema-linking and value-linking tag.
`python step1_schema_linking.py`
Step 2: Building the input and output for BART.
`python step2_serialization.py`
Step 3: Evaluation Script with/without constrained decoding.
`python step3_evaluate.py --constrain`
## Results
Prediction: `69.34`
Prediction with Constrain Decoding: `70.02`
## Interactive
`python interactive.py --logdir ./models/spider-sl --db_id student_1 --db-path ./data/spider/database --schema-path ./data/spider/tables.json`
## Reference Code
`https://github.com/ryanzhumich/editsql`
`https://github.com/benbogin/spider-schema-gnn-global`
`https://github.com/ElementAI/duorat`
`https://github.com/facebookresearch/GENRE`
|
ContextualSP/unified_parser_text_to_sql/README.md/0
|
{
"file_path": "ContextualSP/unified_parser_text_to_sql/README.md",
"repo_id": "ContextualSP",
"token_count": 718
}
| 259 |
import os
import traceback
import re
import sys
import json
import sqlite3
import random
from os import listdir, makedirs
from collections import OrderedDict
from nltk import word_tokenize, tokenize
from os.path import isfile, isdir, join, split, exists, splitext
from ..process_sql import get_sql
from .schema import Schema, get_schemas_from_json
if __name__ == "__main__":
sql = "SELECT name , country , age FROM singer ORDER BY age DESC"
db_id = "concert_singer"
table_file = "tables.json"
schemas, db_names, tables = get_schemas_from_json(table_file)
schema = schemas[db_id]
table = tables[db_id]
schema = Schema(schema, table)
sql_label = get_sql(schema, sql)
|
ContextualSP/unified_parser_text_to_sql/third_party/spider/preprocess/parse_sql_one.py/0
|
{
"file_path": "ContextualSP/unified_parser_text_to_sql/third_party/spider/preprocess/parse_sql_one.py",
"repo_id": "ContextualSP",
"token_count": 251
}
| 260 |
# Searching the Search Space of Vision Transformer
**This is an official implementation of S3.**
In this work, instead of searching the architecture in a predefined search space, with the help of AutoFormer, we proposed to search the search space to automatically find a great search space first. After that we search the architectures in the searched space.
In addition, we provide insightful observations and guidelines for general vision transformer design.
<div align="center">
<img width="80%" alt="S3 overview" src=".figure/overview.jpg"/>
</div>
## Environment Setup
To set up the enviroment you can easily run the following command:
```buildoutcfg
conda create -n SSS python=3.6
conda activate SSS
pip install -r requirements.txt
```
## Data Preparation
You need to first download the [ImageNet-2012](http://www.image-net.org/) to the folder `./data/imagenet` and move the validation set to the subfolder `./data/imagenet/val`. To move the validation set, you cloud use the following script: <https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh>
The directory structure is the standard layout as following.
```
/path/to/imagenet/
train/
class1/
img1.jpeg
class2/
img2.jpeg
val/
class1/
img3.jpeg
class/2
img4.jpeg
```
## Model Zoo
For evaluation, we provide the checkpoints and configs of our models.
After downloading the models, you can do the evaluation following the description in *Evaluation*).
Model download links:
Model | Params. | Top-1 Acc. % | Top-5 Acc. % | Model
--- |:---:|:---:|:---:|:---:
AutoFormerV2-T | 28M | 82.1 | 95.8 | [link](https://github.com/silent-chen/AutoFormerV2-model-zoo/releases/download/v1.0.0/S3-T.pth)/[config](./configs/S3-T.yaml)
AutoFormerV2-S | 50M | 83.7 | 96.4 | [link](https://github.com/silent-chen/AutoFormerV2-model-zoo/releases/download/v1.0.0/S3-S.pth)/[config](./configs/S3-S.yaml)
AutoFormerV2-B | 71M | 84.0 | 96.6 | [link](https://github.com/silent-chen/AutoFormerV2-model-zoo/releases/download/v1.0.0/S3-B.pth)/[config](./configs/S3-B.yaml)
### Evaluation
To evaluate our trained models, you need to put the downloaded model in `/PATH/TO/CHECKPOINT`. After that you could use the following command to test the model (Please change your config file and model checkpoint according to different models. Here we use the AutoFormer-B as an example).
```buildoutcfg
python -m torch.distributed.launch --nproc_per_node=8 --use_env evaluation.py --data-path /PATH/TO/IMAGENT \
--dist-eval --cfg ./config/S3-B.yaml --resume /PATH/TO/CHECKPOINT --eval
```
## Performance
We give the performance comparison between S3 and other state-of-the-art methods under different resources constraint in terms of Top-1 accuracy on ImageNet. Our method achieves very competitive performance, being superior to the recent DeiT, ViT, Swin.
<div align="center">
<img width="80%" alt="Performance" src=".figure/performance.jpg"/>
</div>
## Bibtex
If this repo is helpful for you, please consider to cite it. Thank you! :)
```bibtex
@article{S3,
title={Searching the Search Space of Vision Transformer},
author={Minghao, Chen and Kan, Wu and Bolin, Ni and Houwen, Peng and Bei, Liu and Jianlong, Fu and Hongyang, Chao and Haibin, Ling},
booktitle={Conference and Workshop on Neural Information Processing Systems (NeurIPS)},
year={2021}
}
```
## Acknowledgements
The codes are inspired by [HAT](https://github.com/mit-han-lab/hardware-aware-transformers), [timm](https://github.com/rwightman/pytorch-image-models), [DeiT](https://github.com/facebookresearch/deit), [SPOS](https://github.com/megvii-model/SinglePathOneShot), [AutoFormer](https://github.com/microsoft/Cream/tree/main/AutoFormer), [Swin](https://github.com/microsoft/Swin-Transformer) .
|
Cream/AutoFormerV2/README.md/0
|
{
"file_path": "Cream/AutoFormerV2/README.md",
"repo_id": "Cream",
"token_count": 1230
}
| 261 |
import logging
import torch.nn as nn
from ..runner import load_checkpoint
from .weight_init import constant_init, kaiming_init, normal_init
def conv3x3(in_planes, out_planes, dilation=1):
"3x3 convolution with padding"
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
padding=dilation,
dilation=dilation)
def make_vgg_layer(inplanes,
planes,
num_blocks,
dilation=1,
with_bn=False,
ceil_mode=False):
layers = []
for _ in range(num_blocks):
layers.append(conv3x3(inplanes, planes, dilation))
if with_bn:
layers.append(nn.BatchNorm2d(planes))
layers.append(nn.ReLU(inplace=True))
inplanes = planes
layers.append(nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=ceil_mode))
return layers
class VGG(nn.Module):
"""VGG backbone.
Args:
depth (int): Depth of vgg, from {11, 13, 16, 19}.
with_bn (bool): Use BatchNorm or not.
num_classes (int): number of classes for classification.
num_stages (int): VGG stages, normally 5.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
not freezing any parameters.
bn_eval (bool): Whether to set BN layers as eval mode, namely, freeze
running stats (mean and var).
bn_frozen (bool): Whether to freeze weight and bias of BN layers.
"""
arch_settings = {
11: (1, 1, 2, 2, 2),
13: (2, 2, 2, 2, 2),
16: (2, 2, 3, 3, 3),
19: (2, 2, 4, 4, 4)
}
def __init__(self,
depth,
with_bn=False,
num_classes=-1,
num_stages=5,
dilations=(1, 1, 1, 1, 1),
out_indices=(0, 1, 2, 3, 4),
frozen_stages=-1,
bn_eval=True,
bn_frozen=False,
ceil_mode=False,
with_last_pool=True):
super(VGG, self).__init__()
if depth not in self.arch_settings:
raise KeyError('invalid depth {} for vgg'.format(depth))
assert num_stages >= 1 and num_stages <= 5
stage_blocks = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
assert len(dilations) == num_stages
assert max(out_indices) <= num_stages
self.num_classes = num_classes
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.bn_eval = bn_eval
self.bn_frozen = bn_frozen
self.inplanes = 3
start_idx = 0
vgg_layers = []
self.range_sub_modules = []
for i, num_blocks in enumerate(self.stage_blocks):
num_modules = num_blocks * (2 + with_bn) + 1
end_idx = start_idx + num_modules
dilation = dilations[i]
planes = 64 * 2**i if i < 4 else 512
vgg_layer = make_vgg_layer(
self.inplanes,
planes,
num_blocks,
dilation=dilation,
with_bn=with_bn,
ceil_mode=ceil_mode)
vgg_layers.extend(vgg_layer)
self.inplanes = planes
self.range_sub_modules.append([start_idx, end_idx])
start_idx = end_idx
if not with_last_pool:
vgg_layers.pop(-1)
self.range_sub_modules[-1][1] -= 1
self.module_name = 'features'
self.add_module(self.module_name, nn.Sequential(*vgg_layers))
if self.num_classes > 0:
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
elif isinstance(m, nn.Linear):
normal_init(m, std=0.01)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
outs = []
vgg_layers = getattr(self, self.module_name)
for i, num_blocks in enumerate(self.stage_blocks):
for j in range(*self.range_sub_modules[i]):
vgg_layer = vgg_layers[j]
x = vgg_layer(x)
if i in self.out_indices:
outs.append(x)
if self.num_classes > 0:
x = x.view(x.size(0), -1)
x = self.classifier(x)
outs.append(x)
if len(outs) == 1:
return outs[0]
else:
return tuple(outs)
def train(self, mode=True):
super(VGG, self).train(mode)
if self.bn_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
if self.bn_frozen:
for params in m.parameters():
params.requires_grad = False
vgg_layers = getattr(self, self.module_name)
if mode and self.frozen_stages >= 0:
for i in range(self.frozen_stages):
for j in range(*self.range_sub_modules[i]):
mod = vgg_layers[j]
mod.eval()
for param in mod.parameters():
param.requires_grad = False
|
Cream/CDARTS/CDARTS_detection/mmcv/cnn/vgg.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/cnn/vgg.py",
"repo_id": "Cream",
"token_count": 3264
}
| 262 |
from __future__ import division
import cv2
def _scale_size(size, scale):
"""Rescale a size by a ratio.
Args:
size (tuple): w, h.
scale (float): Scaling factor.
Returns:
tuple[int]: scaled size.
"""
w, h = size
return int(w * float(scale) + 0.5), int(h * float(scale) + 0.5)
interp_codes = {
'nearest': cv2.INTER_NEAREST,
'bilinear': cv2.INTER_LINEAR,
'bicubic': cv2.INTER_CUBIC,
'area': cv2.INTER_AREA,
'lanczos': cv2.INTER_LANCZOS4
}
def imresize(img, size, return_scale=False, interpolation='bilinear'):
"""Resize image to a given size.
Args:
img (ndarray): The input image.
size (tuple): Target (w, h).
return_scale (bool): Whether to return `w_scale` and `h_scale`.
interpolation (str): Interpolation method, accepted values are
"nearest", "bilinear", "bicubic", "area", "lanczos".
Returns:
tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or
`resized_img`.
"""
h, w = img.shape[:2]
resized_img = cv2.resize(
img, size, interpolation=interp_codes[interpolation])
if not return_scale:
return resized_img
else:
w_scale = size[0] / w
h_scale = size[1] / h
return resized_img, w_scale, h_scale
def imresize_like(img, dst_img, return_scale=False, interpolation='bilinear'):
"""Resize image to the same size of a given image.
Args:
img (ndarray): The input image.
dst_img (ndarray): The target image.
return_scale (bool): Whether to return `w_scale` and `h_scale`.
interpolation (str): Same as :func:`resize`.
Returns:
tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or
`resized_img`.
"""
h, w = dst_img.shape[:2]
return imresize(img, (w, h), return_scale, interpolation)
def imrescale(img, scale, return_scale=False, interpolation='bilinear'):
"""Resize image while keeping the aspect ratio.
Args:
img (ndarray): The input image.
scale (float or tuple[int]): The scaling factor or maximum size.
If it is a float number, then the image will be rescaled by this
factor, else if it is a tuple of 2 integers, then the image will
be rescaled as large as possible within the scale.
return_scale (bool): Whether to return the scaling factor besides the
rescaled image.
interpolation (str): Same as :func:`resize`.
Returns:
ndarray: The rescaled image.
"""
h, w = img.shape[:2]
if isinstance(scale, (float, int)):
if scale <= 0:
raise ValueError(
'Invalid scale {}, must be positive.'.format(scale))
scale_factor = scale
elif isinstance(scale, tuple):
max_long_edge = max(scale)
max_short_edge = min(scale)
scale_factor = min(max_long_edge / max(h, w),
max_short_edge / min(h, w))
else:
raise TypeError(
'Scale must be a number or tuple of int, but got {}'.format(
type(scale)))
new_size = _scale_size((w, h), scale_factor)
rescaled_img = imresize(img, new_size, interpolation=interpolation)
if return_scale:
return rescaled_img, scale_factor
else:
return rescaled_img
|
Cream/CDARTS/CDARTS_detection/mmcv/image/transforms/resize.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/image/transforms/resize.py",
"repo_id": "Cream",
"token_count": 1486
}
| 263 |
import time
from .hook import Hook
class IterTimerHook(Hook):
def before_epoch(self, runner):
self.t = time.time()
def before_iter(self, runner):
runner.log_buffer.update({'data_time': time.time() - self.t})
def after_iter(self, runner):
runner.log_buffer.update({'time': time.time() - self.t})
self.t = time.time()
|
Cream/CDARTS/CDARTS_detection/mmcv/runner/hooks/iter_timer.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/runner/hooks/iter_timer.py",
"repo_id": "Cream",
"token_count": 151
}
| 264 |
import os.path as osp
import sys
from argparse import ArgumentParser
from importlib import import_module
from addict import Dict
from .misc import collections_abc
from .path import check_file_exist
class ConfigDict(Dict):
def __missing__(self, name):
raise KeyError(name)
def __getattr__(self, name):
try:
value = super(ConfigDict, self).__getattr__(name)
except KeyError:
ex = AttributeError("'{}' object has no attribute '{}'".format(
self.__class__.__name__, name))
except Exception as e:
ex = e
else:
return value
raise ex
def add_args(parser, cfg, prefix=''):
for k, v in cfg.items():
if isinstance(v, str):
parser.add_argument('--' + prefix + k)
elif isinstance(v, int):
parser.add_argument('--' + prefix + k, type=int)
elif isinstance(v, float):
parser.add_argument('--' + prefix + k, type=float)
elif isinstance(v, bool):
parser.add_argument('--' + prefix + k, action='store_true')
elif isinstance(v, dict):
add_args(parser, v, k + '.')
elif isinstance(v, collections_abc.Iterable):
parser.add_argument('--' + prefix + k, type=type(v[0]), nargs='+')
else:
print('connot parse key {} of type {}'.format(prefix + k, type(v)))
return parser
class Config(object):
"""A facility for config and config files.
It supports common file formats as configs: python/json/yaml. The interface
is the same as a dict object and also allows access config values as
attributes.
Example:
>>> cfg = Config(dict(a=1, b=dict(b1=[0, 1])))
>>> cfg.a
1
>>> cfg.b
{'b1': [0, 1]}
>>> cfg.b.b1
[0, 1]
>>> cfg = Config.fromfile('tests/data/config/a.py')
>>> cfg.filename
"/home/kchen/projects/mmcv/tests/data/config/a.py"
>>> cfg.item4
'test'
>>> cfg
"Config [path: /home/kchen/projects/mmcv/tests/data/config/a.py]: "
"{'item1': [1, 2], 'item2': {'a': 0}, 'item3': True, 'item4': 'test'}"
"""
@staticmethod
def fromfile(filename):
filename = osp.abspath(osp.expanduser(filename))
check_file_exist(filename)
if filename.endswith('.py'):
module_name = osp.basename(filename)[:-3]
if '.' in module_name:
raise ValueError('Dots are not allowed in config file path.')
config_dir = osp.dirname(filename)
sys.path.insert(0, config_dir)
mod = import_module(module_name)
sys.path.pop(0)
cfg_dict = {
name: value
for name, value in mod.__dict__.items()
if not name.startswith('__')
}
elif filename.endswith(('.yml', '.yaml', '.json')):
import mmcv
cfg_dict = mmcv.load(filename)
else:
raise IOError('Only py/yml/yaml/json type are supported now!')
return Config(cfg_dict, filename=filename)
@staticmethod
def auto_argparser(description=None):
"""Generate argparser from config file automatically (experimental)
"""
partial_parser = ArgumentParser(description=description)
partial_parser.add_argument('config', help='config file path')
cfg_file = partial_parser.parse_known_args()[0].config
cfg = Config.fromfile(cfg_file)
parser = ArgumentParser(description=description)
parser.add_argument('config', help='config file path')
add_args(parser, cfg)
return parser, cfg
def __init__(self, cfg_dict=None, filename=None):
if cfg_dict is None:
cfg_dict = dict()
elif not isinstance(cfg_dict, dict):
raise TypeError('cfg_dict must be a dict, but got {}'.format(
type(cfg_dict)))
super(Config, self).__setattr__('_cfg_dict', ConfigDict(cfg_dict))
super(Config, self).__setattr__('_filename', filename)
if filename:
with open(filename, 'r') as f:
super(Config, self).__setattr__('_text', f.read())
else:
super(Config, self).__setattr__('_text', '')
@property
def filename(self):
return self._filename
@property
def text(self):
return self._text
def __repr__(self):
return 'Config (path: {}): {}'.format(self.filename,
self._cfg_dict.__repr__())
def __len__(self):
return len(self._cfg_dict)
def __getattr__(self, name):
return getattr(self._cfg_dict, name)
def __getitem__(self, name):
return self._cfg_dict.__getitem__(name)
def __setattr__(self, name, value):
if isinstance(value, dict):
value = ConfigDict(value)
self._cfg_dict.__setattr__(name, value)
def __setitem__(self, name, value):
if isinstance(value, dict):
value = ConfigDict(value)
self._cfg_dict.__setitem__(name, value)
def __iter__(self):
return iter(self._cfg_dict)
|
Cream/CDARTS/CDARTS_detection/mmcv/utils/config.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/utils/config.py",
"repo_id": "Cream",
"token_count": 2414
}
| 265 |
from enum import Enum
import numpy as np
from mmcv.utils import is_str
class Color(Enum):
"""An enum that defines common colors.
Contains red, green, blue, cyan, yellow, magenta, white and black.
"""
red = (0, 0, 255)
green = (0, 255, 0)
blue = (255, 0, 0)
cyan = (255, 255, 0)
yellow = (0, 255, 255)
magenta = (255, 0, 255)
white = (255, 255, 255)
black = (0, 0, 0)
def color_val(color):
"""Convert various input to color tuples.
Args:
color (:obj:`Color`/str/tuple/int/ndarray): Color inputs
Returns:
tuple[int]: A tuple of 3 integers indicating BGR channels.
"""
if is_str(color):
return Color[color].value
elif isinstance(color, Color):
return color.value
elif isinstance(color, tuple):
assert len(color) == 3
for channel in color:
assert channel >= 0 and channel <= 255
return color
elif isinstance(color, int):
assert color >= 0 and color <= 255
return color, color, color
elif isinstance(color, np.ndarray):
assert color.ndim == 1 and color.size == 3
assert np.all((color >= 0) & (color <= 255))
color = color.astype(np.uint8)
return tuple(color)
else:
raise TypeError('Invalid type for color: {}'.format(type(color)))
|
Cream/CDARTS/CDARTS_detection/mmcv/visualization/color.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/visualization/color.py",
"repo_id": "Cream",
"token_count": 556
}
| 266 |
import torch
class AnchorGenerator(object):
def __init__(self, base_size, scales, ratios, scale_major=True, ctr=None):
self.base_size = base_size
self.scales = torch.Tensor(scales)
self.ratios = torch.Tensor(ratios)
self.scale_major = scale_major
self.ctr = ctr
self.base_anchors = self.gen_base_anchors()
@property
def num_base_anchors(self):
return self.base_anchors.size(0)
def gen_base_anchors(self):
w = self.base_size
h = self.base_size
if self.ctr is None:
x_ctr = 0.5 * (w - 1)
y_ctr = 0.5 * (h - 1)
else:
x_ctr, y_ctr = self.ctr
h_ratios = torch.sqrt(self.ratios)
w_ratios = 1 / h_ratios
if self.scale_major:
ws = (w * w_ratios[:, None] * self.scales[None, :]).view(-1)
hs = (h * h_ratios[:, None] * self.scales[None, :]).view(-1)
else:
ws = (w * self.scales[:, None] * w_ratios[None, :]).view(-1)
hs = (h * self.scales[:, None] * h_ratios[None, :]).view(-1)
base_anchors = torch.stack(
[
x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1)
],
dim=-1).round()
return base_anchors
def _meshgrid(self, x, y, row_major=True):
xx = x.repeat(len(y))
yy = y.view(-1, 1).repeat(1, len(x)).view(-1)
if row_major:
return xx, yy
else:
return yy, xx
def grid_anchors(self, featmap_size, stride=16, device='cuda'):
base_anchors = self.base_anchors.to(device)
feat_h, feat_w = featmap_size
shift_x = torch.arange(0, feat_w, device=device) * stride
shift_y = torch.arange(0, feat_h, device=device) * stride
shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1)
shifts = shifts.type_as(base_anchors)
# first feat_w elements correspond to the first row of shifts
# add A anchors (1, A, 4) to K shifts (K, 1, 4) to get
# shifted anchors (K, A, 4), reshape to (K*A, 4)
all_anchors = base_anchors[None, :, :] + shifts[:, None, :]
all_anchors = all_anchors.view(-1, 4)
# first A rows correspond to A anchors of (0, 0) in feature map,
# then (0, 1), (0, 2), ...
return all_anchors
def valid_flags(self, featmap_size, valid_size, device='cuda'):
feat_h, feat_w = featmap_size
valid_h, valid_w = valid_size
assert valid_h <= feat_h and valid_w <= feat_w
valid_x = torch.zeros(feat_w, dtype=torch.uint8, device=device)
valid_y = torch.zeros(feat_h, dtype=torch.uint8, device=device)
valid_x[:valid_w] = 1
valid_y[:valid_h] = 1
valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)
valid = valid_xx & valid_yy
valid = valid[:, None].expand(
valid.size(0), self.num_base_anchors).contiguous().view(-1)
return valid
|
Cream/CDARTS/CDARTS_detection/mmdet/core/anchor/anchor_generator.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/anchor/anchor_generator.py",
"repo_id": "Cream",
"token_count": 1566
}
| 267 |
import numpy as np
import torch
from .random_sampler import RandomSampler
class IoUBalancedNegSampler(RandomSampler):
"""IoU Balanced Sampling
arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
Sampling proposals according to their IoU. `floor_fraction` of needed RoIs
are sampled from proposals whose IoU are lower than `floor_thr` randomly.
The others are sampled from proposals whose IoU are higher than
`floor_thr`. These proposals are sampled from some bins evenly, which are
split by `num_bins` via IoU evenly.
Args:
num (int): number of proposals.
pos_fraction (float): fraction of positive proposals.
floor_thr (float): threshold (minimum) IoU for IoU balanced sampling,
set to -1 if all using IoU balanced sampling.
floor_fraction (float): sampling fraction of proposals under floor_thr.
num_bins (int): number of bins in IoU balanced sampling.
"""
def __init__(self,
num,
pos_fraction,
floor_thr=-1,
floor_fraction=0,
num_bins=3,
**kwargs):
super(IoUBalancedNegSampler, self).__init__(num, pos_fraction,
**kwargs)
assert floor_thr >= 0 or floor_thr == -1
assert 0 <= floor_fraction <= 1
assert num_bins >= 1
self.floor_thr = floor_thr
self.floor_fraction = floor_fraction
self.num_bins = num_bins
def sample_via_interval(self, max_overlaps, full_set, num_expected):
max_iou = max_overlaps.max()
iou_interval = (max_iou - self.floor_thr) / self.num_bins
per_num_expected = int(num_expected / self.num_bins)
sampled_inds = []
for i in range(self.num_bins):
start_iou = self.floor_thr + i * iou_interval
end_iou = self.floor_thr + (i + 1) * iou_interval
tmp_set = set(
np.where(
np.logical_and(max_overlaps >= start_iou,
max_overlaps < end_iou))[0])
tmp_inds = list(tmp_set & full_set)
if len(tmp_inds) > per_num_expected:
tmp_sampled_set = self.random_choice(tmp_inds,
per_num_expected)
else:
tmp_sampled_set = np.array(tmp_inds, dtype=np.int)
sampled_inds.append(tmp_sampled_set)
sampled_inds = np.concatenate(sampled_inds)
if len(sampled_inds) < num_expected:
num_extra = num_expected - len(sampled_inds)
extra_inds = np.array(list(full_set - set(sampled_inds)))
if len(extra_inds) > num_extra:
extra_inds = self.random_choice(extra_inds, num_extra)
sampled_inds = np.concatenate([sampled_inds, extra_inds])
return sampled_inds
def _sample_neg(self, assign_result, num_expected, **kwargs):
neg_inds = torch.nonzero(assign_result.gt_inds == 0)
if neg_inds.numel() != 0:
neg_inds = neg_inds.squeeze(1)
if len(neg_inds) <= num_expected:
return neg_inds
else:
max_overlaps = assign_result.max_overlaps.cpu().numpy()
# balance sampling for negative samples
neg_set = set(neg_inds.cpu().numpy())
if self.floor_thr > 0:
floor_set = set(
np.where(
np.logical_and(max_overlaps >= 0,
max_overlaps < self.floor_thr))[0])
iou_sampling_set = set(
np.where(max_overlaps >= self.floor_thr)[0])
elif self.floor_thr == 0:
floor_set = set(np.where(max_overlaps == 0)[0])
iou_sampling_set = set(
np.where(max_overlaps > self.floor_thr)[0])
else:
floor_set = set()
iou_sampling_set = set(
np.where(max_overlaps > self.floor_thr)[0])
floor_neg_inds = list(floor_set & neg_set)
iou_sampling_neg_inds = list(iou_sampling_set & neg_set)
num_expected_iou_sampling = int(num_expected *
(1 - self.floor_fraction))
if len(iou_sampling_neg_inds) > num_expected_iou_sampling:
if self.num_bins >= 2:
iou_sampled_inds = self.sample_via_interval(
max_overlaps, set(iou_sampling_neg_inds),
num_expected_iou_sampling)
else:
iou_sampled_inds = self.random_choice(
iou_sampling_neg_inds, num_expected_iou_sampling)
else:
iou_sampled_inds = np.array(
iou_sampling_neg_inds, dtype=np.int)
num_expected_floor = num_expected - len(iou_sampled_inds)
if len(floor_neg_inds) > num_expected_floor:
sampled_floor_inds = self.random_choice(
floor_neg_inds, num_expected_floor)
else:
sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int)
sampled_inds = np.concatenate(
(sampled_floor_inds, iou_sampled_inds))
if len(sampled_inds) < num_expected:
num_extra = num_expected - len(sampled_inds)
extra_inds = np.array(list(neg_set - set(sampled_inds)))
if len(extra_inds) > num_extra:
extra_inds = self.random_choice(extra_inds, num_extra)
sampled_inds = np.concatenate((sampled_inds, extra_inds))
sampled_inds = torch.from_numpy(sampled_inds).long().to(
assign_result.gt_inds.device)
return sampled_inds
|
Cream/CDARTS/CDARTS_detection/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py",
"repo_id": "Cream",
"token_count": 3156
}
| 268 |
from collections import abc
import numpy as np
import torch
def cast_tensor_type(inputs, src_type, dst_type):
if isinstance(inputs, torch.Tensor):
return inputs.to(dst_type)
elif isinstance(inputs, str):
return inputs
elif isinstance(inputs, np.ndarray):
return inputs
elif isinstance(inputs, abc.Mapping):
return type(inputs)({
k: cast_tensor_type(v, src_type, dst_type)
for k, v in inputs.items()
})
elif isinstance(inputs, abc.Iterable):
return type(inputs)(
cast_tensor_type(item, src_type, dst_type) for item in inputs)
else:
return inputs
|
Cream/CDARTS/CDARTS_detection/mmdet/core/fp16/utils.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/fp16/utils.py",
"repo_id": "Cream",
"token_count": 298
}
| 269 |
from .build_loader import build_dataloader, build_dataloader_arch
from .sampler import DistributedGroupSampler, GroupSampler
__all__ = ['GroupSampler', 'DistributedGroupSampler', 'build_dataloader', 'build_dataloader_arch']
|
Cream/CDARTS/CDARTS_detection/mmdet/datasets/loader/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/datasets/loader/__init__.py",
"repo_id": "Cream",
"token_count": 73
}
| 270 |
from .anchor_head import AnchorHead
from .guided_anchor_head import GuidedAnchorHead, FeatureAdaption
from .fcos_head import FCOSHead
from .rpn_head import RPNHead
from .ga_rpn_head import GARPNHead
from .retina_head import RetinaHead
from .ga_retina_head import GARetinaHead
from .ssd_head import SSDHead
__all__ = [
'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption', 'RPNHead',
'GARPNHead', 'RetinaHead', 'GARetinaHead', 'SSDHead', 'FCOSHead'
]
|
Cream/CDARTS/CDARTS_detection/mmdet/models/anchor_heads/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/anchor_heads/__init__.py",
"repo_id": "Cream",
"token_count": 174
}
| 271 |
predefine_archs = {
'fbnet_b': {
'genotypes' : [
'conv3', 'ir_k3_e1',
'ir_k3_e6', 'ir_k5_e6', 'ir_k3_e1', 'ir_k3_e1',
'ir_k5_e6', 'ir_k5_e3', 'ir_k3_e6', 'ir_k5_e6',
'ir_k5_e6', 'ir_k5_e1', 'skip' , 'ir_k5_e3',
'ir_k5_e6', 'ir_k3_e1', 'ir_k5_e1', 'ir_k5_e3',
'ir_k5_e6', 'ir_k5_e1', 'ir_k5_e6', 'ir_k5_e6',
'ir_k3_e6', 'conv1', 'avgpool'],
'strides' : [
2, 1,
2, 1, 1, 1,
2, 1, 1, 1,
2, 1, 1, 1,
1, 1, 1, 1,
2, 1, 1, 1,
1, 1, 7],
'out_channels' : [
16, 16,
24, 24, 24, 24,
32, 32, 32, 32,
64, 64, 64, 64,
112, 112, 112, 112,
184, 184, 184, 184,
352, 1984, 1984,
],
'dropout_ratio' : 0.2,
'search_space': 'fbsb',
},
'fbnet_hit': {
'genotypes' : [
'conv3',
'ir_k3_e3', 'ir_k3_e3', 'ir_k3_e3_r2', 'ir_k3_e3',
'ir_k5_e6', 'ir_k5_e6', 'ir_k3_e3', 'ir_k3_e3',
'ir_k7_e6', 'ir_k5_e6', 'ir_k5_e6_r2', 'ir_k5_e3',
'ir_k5_e6', 'ir_k5_e6_r2', 'ir_k5_e6', 'ir_k5_e6_r2',
'ir_k7_e6', 'ir_k5_e6', 'ir_k5_e6_r2', 'ir_k5_e6',
'ir_k3_e3', 'conv1'],
'strides' : [
2,
2, 1, 1, 1,
2, 1, 1, 1,
2, 1, 1, 1,
1, 1, 1, 1,
2, 1, 1, 1,
1, 1],
'out_channels' : [
16,
48, 48, 48, 48,
96, 96, 96, 96,
184, 184, 184, 184,
256, 256, 256, 256,
352, 352, 352, 352,
1024, 2048
],
'dropout_ratio' : 0.2,
'search_space': 'fbsb',
},
}
|
Cream/CDARTS/CDARTS_detection/mmdet/models/backbones/fbnet_arch.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/backbones/fbnet_arch.py",
"repo_id": "Cream",
"token_count": 1049
}
| 272 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
from mmdet.core import (auto_fp16, bbox_target, delta2bbox, force_fp32,
multiclass_nms)
from ..builder import build_loss
from ..losses import accuracy
from ..registry import HEADS
@HEADS.register_module
class BBoxHead(nn.Module):
"""Simplest RoI head, with only two fc layers for classification and
regression respectively"""
def __init__(self,
with_avg_pool=False,
with_cls=True,
with_reg=True,
roi_feat_size=7,
in_channels=256,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=1.0)):
super(BBoxHead, self).__init__()
assert with_cls or with_reg
self.with_avg_pool = with_avg_pool
self.with_cls = with_cls
self.with_reg = with_reg
self.roi_feat_size = _pair(roi_feat_size)
self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1]
self.in_channels = in_channels
self.num_classes = num_classes
self.target_means = target_means
self.target_stds = target_stds
self.reg_class_agnostic = reg_class_agnostic
self.fp16_enabled = False
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
in_channels = self.in_channels
if self.with_avg_pool:
self.avg_pool = nn.AvgPool2d(self.roi_feat_size)
else:
in_channels *= self.roi_feat_area
if self.with_cls:
self.fc_cls = nn.Linear(in_channels, num_classes)
if self.with_reg:
out_dim_reg = 4 if reg_class_agnostic else 4 * num_classes
self.fc_reg = nn.Linear(in_channels, out_dim_reg)
self.debug_imgs = None
def init_weights(self):
if self.with_cls:
nn.init.normal_(self.fc_cls.weight, 0, 0.01)
nn.init.constant_(self.fc_cls.bias, 0)
if self.with_reg:
nn.init.normal_(self.fc_reg.weight, 0, 0.001)
nn.init.constant_(self.fc_reg.bias, 0)
@auto_fp16()
def forward(self, x):
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
cls_score = self.fc_cls(x) if self.with_cls else None
bbox_pred = self.fc_reg(x) if self.with_reg else None
return cls_score, bbox_pred
def get_target(self, sampling_results, gt_bboxes, gt_labels,
rcnn_train_cfg):
pos_proposals = [res.pos_bboxes for res in sampling_results]
neg_proposals = [res.neg_bboxes for res in sampling_results]
pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results]
pos_gt_labels = [res.pos_gt_labels for res in sampling_results]
reg_classes = 1 if self.reg_class_agnostic else self.num_classes
cls_reg_targets = bbox_target(
pos_proposals,
neg_proposals,
pos_gt_bboxes,
pos_gt_labels,
rcnn_train_cfg,
reg_classes,
target_means=self.target_means,
target_stds=self.target_stds)
return cls_reg_targets
@force_fp32(apply_to=('cls_score', 'bbox_pred'))
def loss(self,
cls_score,
bbox_pred,
labels,
label_weights,
bbox_targets,
bbox_weights,
reduction_override=None):
losses = dict()
if cls_score is not None:
avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)
losses['loss_cls'] = self.loss_cls(
cls_score,
labels,
label_weights,
avg_factor=avg_factor,
reduction_override=reduction_override)
losses['acc'] = accuracy(cls_score, labels)
if bbox_pred is not None:
pos_inds = labels > 0
if self.reg_class_agnostic:
pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), 4)[pos_inds]
else:
pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), -1,
4)[pos_inds, labels[pos_inds]]
losses['loss_bbox'] = self.loss_bbox(
pos_bbox_pred,
bbox_targets[pos_inds],
bbox_weights[pos_inds],
avg_factor=bbox_targets.size(0),
reduction_override=reduction_override)
return losses
@force_fp32(apply_to=('cls_score', 'bbox_pred'))
def get_det_bboxes(self,
rois,
cls_score,
bbox_pred,
img_shape,
scale_factor,
rescale=False,
cfg=None):
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
scores = F.softmax(cls_score, dim=1) if cls_score is not None else None
if bbox_pred is not None:
bboxes = delta2bbox(rois[:, 1:], bbox_pred, self.target_means,
self.target_stds, img_shape)
else:
bboxes = rois[:, 1:].clone()
if img_shape is not None:
bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1] - 1)
bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0] - 1)
if rescale:
if isinstance(scale_factor, float):
bboxes /= scale_factor
else:
scale_factor = torch.from_numpy(scale_factor).to(bboxes.device)
bboxes = (bboxes.view(bboxes.size(0), -1, 4) /
scale_factor).view(bboxes.size()[0], -1)
if cfg is None:
return bboxes, scores
else:
det_bboxes, det_labels = multiclass_nms(bboxes, scores,
cfg.score_thr, cfg.nms,
cfg.max_per_img)
return det_bboxes, det_labels
@force_fp32(apply_to=('bbox_preds', ))
def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas):
"""Refine bboxes during training.
Args:
rois (Tensor): Shape (n*bs, 5), where n is image number per GPU,
and bs is the sampled RoIs per image.
labels (Tensor): Shape (n*bs, ).
bbox_preds (Tensor): Shape (n*bs, 4) or (n*bs, 4*#class).
pos_is_gts (list[Tensor]): Flags indicating if each positive bbox
is a gt bbox.
img_metas (list[dict]): Meta info of each image.
Returns:
list[Tensor]: Refined bboxes of each image in a mini-batch.
"""
img_ids = rois[:, 0].long().unique(sorted=True)
assert img_ids.numel() == len(img_metas)
bboxes_list = []
for i in range(len(img_metas)):
inds = torch.nonzero(rois[:, 0] == i).squeeze()
num_rois = inds.numel()
bboxes_ = rois[inds, 1:]
label_ = labels[inds]
bbox_pred_ = bbox_preds[inds]
img_meta_ = img_metas[i]
pos_is_gts_ = pos_is_gts[i]
bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_,
img_meta_)
# filter gt bboxes
pos_keep = 1 - pos_is_gts_
keep_inds = pos_is_gts_.new_ones(num_rois)
keep_inds[:len(pos_is_gts_)] = pos_keep
bboxes_list.append(bboxes[keep_inds])
return bboxes_list
@force_fp32(apply_to=('bbox_pred', ))
def regress_by_class(self, rois, label, bbox_pred, img_meta):
"""Regress the bbox for the predicted class. Used in Cascade R-CNN.
Args:
rois (Tensor): shape (n, 4) or (n, 5)
label (Tensor): shape (n, )
bbox_pred (Tensor): shape (n, 4*(#class+1)) or (n, 4)
img_meta (dict): Image meta info.
Returns:
Tensor: Regressed bboxes, the same shape as input rois.
"""
assert rois.size(1) == 4 or rois.size(1) == 5
if not self.reg_class_agnostic:
label = label * 4
inds = torch.stack((label, label + 1, label + 2, label + 3), 1)
bbox_pred = torch.gather(bbox_pred, 1, inds)
assert bbox_pred.size(1) == 4
if rois.size(1) == 4:
new_rois = delta2bbox(rois, bbox_pred, self.target_means,
self.target_stds, img_meta['img_shape'])
else:
bboxes = delta2bbox(rois[:, 1:], bbox_pred, self.target_means,
self.target_stds, img_meta['img_shape'])
new_rois = torch.cat((rois[:, [0]], bboxes), dim=1)
return new_rois
|
Cream/CDARTS/CDARTS_detection/mmdet/models/bbox_heads/bbox_head.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/bbox_heads/bbox_head.py",
"repo_id": "Cream",
"token_count": 5149
}
| 273 |
import mmcv
from mmdet.core import tensor2imgs, bbox_mapping
from .base import BaseDetector
from .test_mixins import RPNTestMixin
from .. import builder
from ..registry import DETECTORS
@DETECTORS.register_module
class RPN(BaseDetector, RPNTestMixin):
def __init__(self,
backbone,
neck,
rpn_head,
train_cfg,
test_cfg,
pretrained=None):
super(RPN, self).__init__()
self.backbone = builder.build_backbone(backbone)
self.neck = builder.build_neck(neck) if neck is not None else None
self.rpn_head = builder.build_head(rpn_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
def init_weights(self, pretrained=None):
super(RPN, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
if self.with_neck:
self.neck.init_weights()
self.rpn_head.init_weights()
def extract_feat(self, img):
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_train(self,
img,
img_meta,
gt_bboxes=None,
gt_bboxes_ignore=None):
if self.train_cfg.rpn.get('debug', False):
self.rpn_head.debug_imgs = tensor2imgs(img)
x = self.extract_feat(img)
rpn_outs = self.rpn_head(x)
rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta, self.train_cfg.rpn)
losses = self.rpn_head.loss(
*rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
return losses
def simple_test(self, img, img_meta, rescale=False):
x = self.extract_feat(img)
proposal_list = self.simple_test_rpn(x, img_meta, self.test_cfg.rpn)
if rescale:
for proposals, meta in zip(proposal_list, img_meta):
proposals[:, :4] /= meta['scale_factor']
# TODO: remove this restriction
return proposal_list[0].cpu().numpy()
def aug_test(self, imgs, img_metas, rescale=False):
proposal_list = self.aug_test_rpn(
self.extract_feats(imgs), img_metas, self.test_cfg.rpn)
if not rescale:
for proposals, img_meta in zip(proposal_list, img_metas[0]):
img_shape = img_meta['img_shape']
scale_factor = img_meta['scale_factor']
flip = img_meta['flip']
proposals[:, :4] = bbox_mapping(proposals[:, :4], img_shape,
scale_factor, flip)
# TODO: remove this restriction
return proposal_list[0].cpu().numpy()
def show_result(self, data, result, img_norm_cfg, dataset=None, top_k=20):
"""Show RPN proposals on the image.
Although we assume batch size is 1, this method supports arbitrary
batch size.
"""
img_tensor = data['img'][0]
img_metas = data['img_meta'][0].data[0]
imgs = tensor2imgs(img_tensor, **img_norm_cfg)
assert len(imgs) == len(img_metas)
for img, img_meta in zip(imgs, img_metas):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
mmcv.imshow_bboxes(img_show, result, top_k=top_k)
|
Cream/CDARTS/CDARTS_detection/mmdet/models/detectors/rpn.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/detectors/rpn.py",
"repo_id": "Cream",
"token_count": 1702
}
| 274 |
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import kaiming_init
from mmdet.core import auto_fp16, force_fp32
from ..registry import HEADS
from ..utils import ConvModule
@HEADS.register_module
class FusedSemanticHead(nn.Module):
"""Multi-level fused semantic segmentation head.
in_1 -> 1x1 conv ---
|
in_2 -> 1x1 conv -- |
||
in_3 -> 1x1 conv - ||
||| /-> 1x1 conv (mask prediction)
in_4 -> 1x1 conv -----> 3x3 convs (*4)
| \-> 1x1 conv (feature)
in_5 -> 1x1 conv ---
""" # noqa: W605
def __init__(self,
num_ins,
fusion_level,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
ignore_label=255,
loss_weight=0.2,
conv_cfg=None,
norm_cfg=None):
super(FusedSemanticHead, self).__init__()
self.num_ins = num_ins
self.fusion_level = fusion_level
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.ignore_label = ignore_label
self.loss_weight = loss_weight
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.lateral_convs = nn.ModuleList()
for i in range(self.num_ins):
self.lateral_convs.append(
ConvModule(
self.in_channels,
self.in_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
inplace=False))
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else conv_out_channels
self.convs.append(
ConvModule(
in_channels,
conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.conv_embedding = ConvModule(
conv_out_channels,
conv_out_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1)
self.criterion = nn.CrossEntropyLoss(ignore_index=ignore_label)
def init_weights(self):
kaiming_init(self.conv_logits)
@auto_fp16()
def forward(self, feats):
x = self.lateral_convs[self.fusion_level](feats[self.fusion_level])
fused_size = tuple(x.shape[-2:])
for i, feat in enumerate(feats):
if i != self.fusion_level:
feat = F.interpolate(
feat, size=fused_size, mode='bilinear', align_corners=True)
x += self.lateral_convs[i](feat)
for i in range(self.num_convs):
x = self.convs[i](x)
mask_pred = self.conv_logits(x)
x = self.conv_embedding(x)
return mask_pred, x
@force_fp32(apply_to=('mask_pred',))
def loss(self, mask_pred, labels):
labels = labels.squeeze(1).long()
loss_semantic_seg = self.criterion(mask_pred, labels)
loss_semantic_seg *= self.loss_weight
return loss_semantic_seg
|
Cream/CDARTS/CDARTS_detection/mmdet/models/mask_heads/fused_semantic_head.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/mask_heads/fused_semantic_head.py",
"repo_id": "Cream",
"token_count": 1957
}
| 275 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
from mmcv.cnn import kaiming_init
class GeneralizedAttention(nn.Module):
"""GeneralizedAttention module.
See 'An Empirical Study of Spatial Attention Mechanisms in Deep Networks'
(https://arxiv.org/abs/1711.07971) for details.
Args:
in_dim (int): Channels of the input feature map.
spatial_range (int): The spatial range.
-1 indicates no spatial range constraint.
num_heads (int): The head number of empirical_attention module.
position_embedding_dim (int): The position embedding dimension.
position_magnitude (int): A multiplier acting on coord difference.
kv_stride (int): The feature stride acting on key/value feature map.
q_stride (int): The feature stride acting on query feature map.
attention_type (str): A binary indicator string for indicating which
items in generalized empirical_attention module are used.
'1000' indicates 'query and key content' (appr - appr) item,
'0100' indicates 'query content and relative position'
(appr - position) item,
'0010' indicates 'key content only' (bias - appr) item,
'0001' indicates 'relative position only' (bias - position) item.
"""
def __init__(self,
in_dim,
spatial_range=-1,
num_heads=9,
position_embedding_dim=-1,
position_magnitude=1,
kv_stride=2,
q_stride=1,
attention_type='1111'):
super(GeneralizedAttention, self).__init__()
# hard range means local range for non-local operation
self.position_embedding_dim = (
position_embedding_dim if position_embedding_dim > 0 else in_dim)
self.position_magnitude = position_magnitude
self.num_heads = num_heads
self.channel_in = in_dim
self.spatial_range = spatial_range
self.kv_stride = kv_stride
self.q_stride = q_stride
self.attention_type = [bool(int(_)) for _ in attention_type]
self.qk_embed_dim = in_dim // num_heads
out_c = self.qk_embed_dim * num_heads
if self.attention_type[0] or self.attention_type[1]:
self.query_conv = nn.Conv2d(
in_channels=in_dim,
out_channels=out_c,
kernel_size=1,
bias=False)
self.query_conv.kaiming_init = True
if self.attention_type[0] or self.attention_type[2]:
self.key_conv = nn.Conv2d(
in_channels=in_dim,
out_channels=out_c,
kernel_size=1,
bias=False)
self.key_conv.kaiming_init = True
self.v_dim = in_dim // num_heads
self.value_conv = nn.Conv2d(
in_channels=in_dim,
out_channels=self.v_dim * num_heads,
kernel_size=1,
bias=False)
self.value_conv.kaiming_init = True
if self.attention_type[1] or self.attention_type[3]:
self.appr_geom_fc_x = nn.Linear(
self.position_embedding_dim // 2, out_c, bias=False)
self.appr_geom_fc_x.kaiming_init = True
self.appr_geom_fc_y = nn.Linear(
self.position_embedding_dim // 2, out_c, bias=False)
self.appr_geom_fc_y.kaiming_init = True
if self.attention_type[2]:
stdv = 1.0 / math.sqrt(self.qk_embed_dim * 2)
appr_bias_value = -2 * stdv * torch.rand(out_c) + stdv
self.appr_bias = nn.Parameter(appr_bias_value)
if self.attention_type[3]:
stdv = 1.0 / math.sqrt(self.qk_embed_dim * 2)
geom_bias_value = -2 * stdv * torch.rand(out_c) + stdv
self.geom_bias = nn.Parameter(geom_bias_value)
self.proj_conv = nn.Conv2d(
in_channels=self.v_dim * num_heads,
out_channels=in_dim,
kernel_size=1,
bias=True)
self.proj_conv.kaiming_init = True
self.gamma = nn.Parameter(torch.zeros(1))
if self.spatial_range >= 0:
# only works when non local is after 3*3 conv
if in_dim == 256:
max_len = 84
elif in_dim == 512:
max_len = 42
max_len_kv = int((max_len - 1.0) / self.kv_stride + 1)
local_constraint_map = np.ones(
(max_len, max_len, max_len_kv, max_len_kv), dtype=np.int)
for iy in range(max_len):
for ix in range(max_len):
local_constraint_map[iy, ix,
max((iy - self.spatial_range) //
self.kv_stride, 0):min(
(iy + self.spatial_range +
1) // self.kv_stride +
1, max_len),
max((ix - self.spatial_range) //
self.kv_stride, 0):min(
(ix + self.spatial_range +
1) // self.kv_stride +
1, max_len)] = 0
self.local_constraint_map = nn.Parameter(
torch.from_numpy(local_constraint_map).byte(),
requires_grad=False)
if self.q_stride > 1:
self.q_downsample = nn.AvgPool2d(
kernel_size=1, stride=self.q_stride)
else:
self.q_downsample = None
if self.kv_stride > 1:
self.kv_downsample = nn.AvgPool2d(
kernel_size=1, stride=self.kv_stride)
else:
self.kv_downsample = None
self.init_weights()
def get_position_embedding(self,
h,
w,
h_kv,
w_kv,
q_stride,
kv_stride,
device,
feat_dim,
wave_length=1000):
h_idxs = torch.linspace(0, h - 1, h).cuda(device)
h_idxs = h_idxs.view((h, 1)) * q_stride
w_idxs = torch.linspace(0, w - 1, w).cuda(device)
w_idxs = w_idxs.view((w, 1)) * q_stride
h_kv_idxs = torch.linspace(0, h_kv - 1, h_kv).cuda(device)
h_kv_idxs = h_kv_idxs.view((h_kv, 1)) * kv_stride
w_kv_idxs = torch.linspace(0, w_kv - 1, w_kv).cuda(device)
w_kv_idxs = w_kv_idxs.view((w_kv, 1)) * kv_stride
# (h, h_kv, 1)
h_diff = h_idxs.unsqueeze(1) - h_kv_idxs.unsqueeze(0)
h_diff *= self.position_magnitude
# (w, w_kv, 1)
w_diff = w_idxs.unsqueeze(1) - w_kv_idxs.unsqueeze(0)
w_diff *= self.position_magnitude
feat_range = torch.arange(0, feat_dim / 4).cuda(device)
dim_mat = torch.Tensor([wave_length]).cuda(device)
dim_mat = dim_mat**((4. / feat_dim) * feat_range)
dim_mat = dim_mat.view((1, 1, -1))
embedding_x = torch.cat(
((w_diff / dim_mat).sin(), (w_diff / dim_mat).cos()), dim=2)
embedding_y = torch.cat(
((h_diff / dim_mat).sin(), (h_diff / dim_mat).cos()), dim=2)
return embedding_x, embedding_y
def forward(self, x_input):
num_heads = self.num_heads
# use empirical_attention
if self.q_downsample is not None:
x_q = self.q_downsample(x_input)
else:
x_q = x_input
n, _, h, w = x_q.shape
if self.kv_downsample is not None:
x_kv = self.kv_downsample(x_input)
else:
x_kv = x_input
_, _, h_kv, w_kv = x_kv.shape
if self.attention_type[0] or self.attention_type[1]:
proj_query = self.query_conv(x_q).view(
(n, num_heads, self.qk_embed_dim, h * w))
proj_query = proj_query.permute(0, 1, 3, 2)
if self.attention_type[0] or self.attention_type[2]:
proj_key = self.key_conv(x_kv).view(
(n, num_heads, self.qk_embed_dim, h_kv * w_kv))
if self.attention_type[1] or self.attention_type[3]:
position_embed_x, position_embed_y = self.get_position_embedding(
h, w, h_kv, w_kv, self.q_stride, self.kv_stride,
x_input.device, self.position_embedding_dim)
# (n, num_heads, w, w_kv, dim)
position_feat_x = self.appr_geom_fc_x(position_embed_x).\
view(1, w, w_kv, num_heads, self.qk_embed_dim).\
permute(0, 3, 1, 2, 4).\
repeat(n, 1, 1, 1, 1)
# (n, num_heads, h, h_kv, dim)
position_feat_y = self.appr_geom_fc_y(position_embed_y).\
view(1, h, h_kv, num_heads, self.qk_embed_dim).\
permute(0, 3, 1, 2, 4).\
repeat(n, 1, 1, 1, 1)
position_feat_x /= math.sqrt(2)
position_feat_y /= math.sqrt(2)
# accelerate for saliency only
if (np.sum(self.attention_type) == 1) and self.attention_type[2]:
appr_bias = self.appr_bias.\
view(1, num_heads, 1, self.qk_embed_dim).\
repeat(n, 1, 1, 1)
energy = torch.matmul(appr_bias, proj_key).\
view(n, num_heads, 1, h_kv * w_kv)
h = 1
w = 1
else:
# (n, num_heads, h*w, h_kv*w_kv), query before key, 540mb for
if not self.attention_type[0]:
energy = torch.zeros(
n,
num_heads,
h,
w,
h_kv,
w_kv,
dtype=x_input.dtype,
device=x_input.device)
# attention_type[0]: appr - appr
# attention_type[1]: appr - position
# attention_type[2]: bias - appr
# attention_type[3]: bias - position
if self.attention_type[0] or self.attention_type[2]:
if self.attention_type[0] and self.attention_type[2]:
appr_bias = self.appr_bias.\
view(1, num_heads, 1, self.qk_embed_dim)
energy = torch.matmul(proj_query + appr_bias, proj_key).\
view(n, num_heads, h, w, h_kv, w_kv)
elif self.attention_type[0]:
energy = torch.matmul(proj_query, proj_key).\
view(n, num_heads, h, w, h_kv, w_kv)
elif self.attention_type[2]:
appr_bias = self.appr_bias.\
view(1, num_heads, 1, self.qk_embed_dim).\
repeat(n, 1, 1, 1)
energy += torch.matmul(appr_bias, proj_key).\
view(n, num_heads, 1, 1, h_kv, w_kv)
if self.attention_type[1] or self.attention_type[3]:
if self.attention_type[1] and self.attention_type[3]:
geom_bias = self.geom_bias.\
view(1, num_heads, 1, self.qk_embed_dim)
proj_query_reshape = (proj_query + geom_bias).\
view(n, num_heads, h, w, self.qk_embed_dim)
energy_x = torch.matmul(
proj_query_reshape.permute(0, 1, 3, 2, 4),
position_feat_x.permute(0, 1, 2, 4, 3))
energy_x = energy_x.\
permute(0, 1, 3, 2, 4).unsqueeze(4)
energy_y = torch.matmul(
proj_query_reshape,
position_feat_y.permute(0, 1, 2, 4, 3))
energy_y = energy_y.unsqueeze(5)
energy += energy_x + energy_y
elif self.attention_type[1]:
proj_query_reshape = proj_query.\
view(n, num_heads, h, w, self.qk_embed_dim)
proj_query_reshape = proj_query_reshape.\
permute(0, 1, 3, 2, 4)
position_feat_x_reshape = position_feat_x.\
permute(0, 1, 2, 4, 3)
position_feat_y_reshape = position_feat_y.\
permute(0, 1, 2, 4, 3)
energy_x = torch.matmul(proj_query_reshape,
position_feat_x_reshape)
energy_x = energy_x.permute(0, 1, 3, 2, 4).unsqueeze(4)
energy_y = torch.matmul(proj_query_reshape,
position_feat_y_reshape)
energy_y = energy_y.unsqueeze(5)
energy += energy_x + energy_y
elif self.attention_type[3]:
geom_bias = self.geom_bias.\
view(1, num_heads, self.qk_embed_dim, 1).\
repeat(n, 1, 1, 1)
position_feat_x_reshape = position_feat_x.\
view(n, num_heads, w*w_kv, self.qk_embed_dim)
position_feat_y_reshape = position_feat_y.\
view(n, num_heads, h * h_kv, self.qk_embed_dim)
energy_x = torch.matmul(position_feat_x_reshape, geom_bias)
energy_x = energy_x.view(n, num_heads, 1, w, 1, w_kv)
energy_y = torch.matmul(position_feat_y_reshape, geom_bias)
energy_y = energy_y.view(n, num_heads, h, 1, h_kv, 1)
energy += energy_x + energy_y
energy = energy.view(n, num_heads, h * w, h_kv * w_kv)
if self.spatial_range >= 0:
cur_local_constraint_map = \
self.local_constraint_map[:h, :w, :h_kv, :w_kv].\
contiguous().\
view(1, 1, h*w, h_kv*w_kv)
energy = energy.masked_fill_(cur_local_constraint_map,
float('-inf'))
attention = F.softmax(energy, 3)
proj_value = self.value_conv(x_kv)
proj_value_reshape = proj_value.\
view((n, num_heads, self.v_dim, h_kv * w_kv)).\
permute(0, 1, 3, 2)
out = torch.matmul(attention, proj_value_reshape).\
permute(0, 1, 3, 2).\
contiguous().\
view(n, self.v_dim * self.num_heads, h, w)
out = self.proj_conv(out)
out = self.gamma * out + x_input
return out
def init_weights(self):
for m in self.modules():
if hasattr(m, 'kaiming_init') and m.kaiming_init:
kaiming_init(
m,
mode='fan_in',
nonlinearity='leaky_relu',
bias=0,
distribution='uniform',
a=1)
|
Cream/CDARTS/CDARTS_detection/mmdet/models/plugins/generalized_attention.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/plugins/generalized_attention.py",
"repo_id": "Cream",
"token_count": 8961
}
| 276 |
import os.path as osp
import sys
import numpy as np
import torch
from torch.autograd import gradcheck
sys.path.append(osp.abspath(osp.join(__file__, '../../')))
from roi_align import RoIAlign # noqa: E402, isort:skip
feat_size = 15
spatial_scale = 1.0 / 8
img_size = feat_size / spatial_scale
num_imgs = 2
num_rois = 20
batch_ind = np.random.randint(num_imgs, size=(num_rois, 1))
rois = np.random.rand(num_rois, 4) * img_size * 0.5
rois[:, 2:] += img_size * 0.5
rois = np.hstack((batch_ind, rois))
feat = torch.randn(
num_imgs, 16, feat_size, feat_size, requires_grad=True, device='cuda:0')
rois = torch.from_numpy(rois).float().cuda()
inputs = (feat, rois)
print('Gradcheck for roi align...')
test = gradcheck(RoIAlign(3, spatial_scale), inputs, atol=1e-3, eps=1e-3)
print(test)
test = gradcheck(RoIAlign(3, spatial_scale, 2), inputs, atol=1e-3, eps=1e-3)
print(test)
|
Cream/CDARTS/CDARTS_detection/mmdet/ops/roi_align/gradcheck.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/roi_align/gradcheck.py",
"repo_id": "Cream",
"token_count": 371
}
| 277 |
from .modules.sigmoid_focal_loss import SigmoidFocalLoss, sigmoid_focal_loss
__all__ = ['SigmoidFocalLoss', 'sigmoid_focal_loss']
|
Cream/CDARTS/CDARTS_detection/mmdet/ops/sigmoid_focal_loss/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/sigmoid_focal_loss/__init__.py",
"repo_id": "Cream",
"token_count": 55
}
| 278 |
# GENERATED VERSION FILE
# TIME: Fri Oct 15 17:01:16 2021
__version__ = '0.6.0+0889383'
short_version = '0.6.0'
|
Cream/CDARTS/CDARTS_detection/mmdet/version.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/version.py",
"repo_id": "Cream",
"token_count": 50
}
| 279 |
data_path: "../DATASET/cityscapes/"
det2_cfg: "configs/Cityscapes-PanopticSegmentation/panoptic_deeplab_R_52_os16_mg124_poly_90k_bs32_crop_512_1024.yaml"
seed: 12345
random_sample: False
opt: "sgd"
opt_eps: 0.001
sched: "new" #"raw for original"
epochs: 4000
drop_path_prob: 0.2
image_height: 512
image_width: 1024
eval_height: 1024
eval_width: 2048
batch_size: 4
mode: "poly"
base_lr: 0.05
workers: 4
Fch: 6
warmup_start_lr: 5e-6
warmup_iters: 1000
weight_decay: 1e-4
model_ema: True
model_ema_decay: 0.9998
stem_head_width: 1.0
|
Cream/CDARTS/CDARTS_segmentation/configs/cityscapes/cydas.yaml/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/configs/cityscapes/cydas.yaml",
"repo_id": "Cream",
"token_count": 240
}
| 280 |
# ------------------------------------------------------------------------------
# Reference: https://github.com/tensorflow/models/blob/master/research/deeplab/datasets/data_generator.py
# ------------------------------------------------------------------------------
import collections
# Named tuple to describe the dataset properties.
DatasetDescriptor = collections.namedtuple(
'DatasetDescriptor',
[
'splits_to_sizes', # Splits of the dataset into training, val and test.
'num_classes', # Number of semantic classes, including the
# background class (if exists). For example, there
# are 20 foreground classes + 1 background class in
# the PASCAL VOC 2012 dataset. Thus, we set
# num_classes=21.
'ignore_label', # Ignore label value.
])
|
Cream/CDARTS/CDARTS_segmentation/dataloaders/segdatasets/utils.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/dataloaders/segdatasets/utils.py",
"repo_id": "Cream",
"token_count": 307
}
| 281 |
from .resnet import *
from .mobilenet import *
from .mnasnet import *
from .hrnet import *
from .xception import *
|
Cream/CDARTS/CDARTS_segmentation/segmentation/model/backbone/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/model/backbone/__init__.py",
"repo_id": "Cream",
"token_count": 38
}
| 282 |
# ------------------------------------------------------------------------------
# Base model for segmentation.
# Written by Bowen Cheng ([email protected])
# ------------------------------------------------------------------------------
from collections import OrderedDict
from torch import nn
from torch.nn import functional as F
class BaseSegmentationModel(nn.Module):
"""
Base class for segmentation models.
Arguments:
backbone: A nn.Module of backbone model.
decoder: A nn.Module of decoder.
"""
def __init__(self, backbone, decoder):
super(BaseSegmentationModel, self).__init__()
self.backbone = backbone
self.decoder = decoder
def _init_params(self):
# Backbone is already initialized (either from pre-trained checkpoint or random init).
for m in self.decoder.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def set_image_pooling(self, pool_size):
self.decoder.set_image_pooling(pool_size)
def _upsample_predictions(self, pred, input_shape):
"""Upsamples final prediction.
Args:
pred (dict): stores all output of the segmentation model.
input_shape (tuple): spatial resolution of the desired shape.
Returns:
result (OrderedDict): upsampled dictionary.
"""
result = OrderedDict()
for key in pred.keys():
out = F.interpolate(pred[key], size=input_shape, mode='bilinear', align_corners=True)
result[key] = out
return result
def forward(self, x, targets=None):
input_shape = x.shape[-2:]
# contract: features is a dict of tensors
features = self.backbone(x)
pred = self.decoder(features)
results = self._upsample_predictions(pred, input_shape)
if targets is None:
return results
else:
return self.loss(results, targets)
def loss(self, results, targets=None):
raise NotImplementedError
|
Cream/CDARTS/CDARTS_segmentation/segmentation/model/meta_arch/base.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/model/meta_arch/base.py",
"repo_id": "Cream",
"token_count": 893
}
| 283 |
# MIT License
#
# Copyright (c) 2018 Tom Runia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to conditions.
#
# Author: Tom Runia
# Date Created: 2018-08-03
# Reference: https://github.com/tomrunia/OpticalFlow_Visualization
import numpy as np
def make_colorwheel():
'''
Generates a color wheel for optical flow visualization as presented in:
Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007)
URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf
According to the C++ source code of Daniel Scharstein
According to the Matlab source code of Deqing Sun
'''
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros((ncols, 3))
col = 0
# RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.floor(255*np.arange(0,RY)/RY)
col = col+RY
# YG
colorwheel[col:col+YG, 0] = 255 - np.floor(255*np.arange(0,YG)/YG)
colorwheel[col:col+YG, 1] = 255
col = col+YG
# GC
colorwheel[col:col+GC, 1] = 255
colorwheel[col:col+GC, 2] = np.floor(255*np.arange(0,GC)/GC)
col = col+GC
# CB
colorwheel[col:col+CB, 1] = 255 - np.floor(255*np.arange(CB)/CB)
colorwheel[col:col+CB, 2] = 255
col = col+CB
# BM
colorwheel[col:col+BM, 2] = 255
colorwheel[col:col+BM, 0] = np.floor(255*np.arange(0,BM)/BM)
col = col+BM
# MR
colorwheel[col:col+MR, 2] = 255 - np.floor(255*np.arange(MR)/MR)
colorwheel[col:col+MR, 0] = 255
return colorwheel
def flow_compute_color(u, v, convert_to_bgr=False):
'''
Applies the flow color wheel to (possibly clipped) flow components u and v.
According to the C++ source code of Daniel Scharstein
According to the Matlab source code of Deqing Sun
:param u: np.ndarray, input horizontal flow
:param v: np.ndarray, input vertical flow
:param convert_to_bgr: bool, whether to change ordering and output BGR instead of RGB
:return:
'''
flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8)
colorwheel = make_colorwheel() # shape [55x3]
ncols = colorwheel.shape[0]
rad = np.sqrt(np.square(u) + np.square(v))
a = np.arctan2(-v, -u)/np.pi
fk = (a+1) / 2*(ncols-1)
k0 = np.floor(fk).astype(np.int32)
k1 = k0 + 1
k1[k1 == ncols] = 0
f = fk - k0
for i in range(colorwheel.shape[1]):
tmp = colorwheel[:,i]
col0 = tmp[k0] / 255.0
col1 = tmp[k1] / 255.0
col = (1-f)*col0 + f*col1
idx = (rad <= 1)
col[idx] = 1 - rad[idx] * (1-col[idx])
col[~idx] = col[~idx] * 0.75 # out of range?
# Note the 2-i => BGR instead of RGB
ch_idx = 2-i if convert_to_bgr else i
flow_image[:,:,ch_idx] = np.floor(255 * col)
return flow_image
def flow_to_color(flow_uv, clip_flow=None, convert_to_bgr=False):
'''
Expects a two dimensional flow image of shape [H,W,2]
According to the C++ source code of Daniel Scharstein
According to the Matlab source code of Deqing Sun
:param flow_uv: np.ndarray of shape [H,W,2]
:param clip_flow: float, maximum clipping value for flow
:return:
'''
assert flow_uv.ndim == 3, 'input flow must have three dimensions'
assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]'
if clip_flow is not None:
flow_uv = np.clip(flow_uv, 0, clip_flow)
u = flow_uv[:,:,0]
v = flow_uv[:,:,1]
rad = np.sqrt(np.square(u) + np.square(v))
rad_max = np.max(rad)
epsilon = 1e-5
u = u / (rad_max + epsilon)
v = v / (rad_max + epsilon)
return flow_compute_color(u, v, convert_to_bgr)
|
Cream/CDARTS/CDARTS_segmentation/segmentation/utils/flow_vis.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/utils/flow_vis.py",
"repo_id": "Cream",
"token_count": 1736
}
| 284 |
import numpy as np
import json
import torch
import torch.nn as nn
def __init_weight(feature, conv_init, norm_layer, bn_eps, bn_momentum,
**kwargs):
for name, m in feature.named_modules():
if isinstance(m, (nn.Conv2d, nn.Conv3d)):
conv_init(m.weight, **kwargs)
elif isinstance(m, norm_layer):
m.eps = bn_eps
m.momentum = bn_momentum
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def init_weights(m):
if type(m) == nn.Linear:
torch.nn.init.xavier_uniform(m.weight)
m.bias.data.fill_(0.01)
def init_weight(module_list, conv_init, norm_layer, bn_eps, bn_momentum,
**kwargs):
if isinstance(module_list, list):
for feature in module_list:
__init_weight(feature, conv_init, norm_layer, bn_eps, bn_momentum,
**kwargs)
else:
__init_weight(module_list, conv_init, norm_layer, bn_eps, bn_momentum,
**kwargs)
def group_weight(weight_group, module, norm_layer, lr):
group_decay = []
group_no_decay = []
for m in module.modules():
if isinstance(m, nn.Linear):
group_decay.append(m.weight)
if m.bias is not None:
group_no_decay.append(m.bias)
elif isinstance(m, (nn.Conv2d, nn.Conv3d)):
group_decay.append(m.weight)
if m.bias is not None:
group_no_decay.append(m.bias)
elif isinstance(m, norm_layer) or isinstance(m, nn.GroupNorm):
if m.weight is not None:
group_no_decay.append(m.weight)
if m.bias is not None:
group_no_decay.append(m.bias)
assert len(list(module.parameters())) == len(group_decay) + len(
group_no_decay)
weight_group.append(dict(params=group_decay, lr=lr))
weight_group.append(dict(params=group_no_decay, weight_decay=.0, lr=lr))
return weight_group
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
|
Cream/CDARTS/CDARTS_segmentation/tools/utils/init_func.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/tools/utils/init_func.py",
"repo_id": "Cream",
"token_count": 1209
}
| 285 |
MODEL:
META_ARCHITECTURE: "PanopticDeepLab"
BACKBONE:
FREEZE_AT: 0
RESNETS:
OUT_FEATURES: ["res2", "res3", "res5"]
RES5_DILATION: 2
SEM_SEG_HEAD:
NAME: "PanopticDeepLabSemSegHead"
IN_FEATURES: ["res2", "res3", "res5"]
PROJECT_FEATURES: ["res2", "res3"]
PROJECT_CHANNELS: [32, 64]
ASPP_CHANNELS: 256
ASPP_DILATIONS: [6, 12, 18]
ASPP_DROPOUT: 0.1
HEAD_CHANNELS: 256
CONVS_DIM: 256
COMMON_STRIDE: 4
NUM_CLASSES: 19
LOSS_TYPE: "hard_pixel_mining"
NORM: "SyncBN"
INS_EMBED_HEAD:
NAME: "PanopticDeepLabInsEmbedHead"
IN_FEATURES: ["res2", "res3", "res5"]
PROJECT_FEATURES: ["res2", "res3"]
PROJECT_CHANNELS: [32, 64]
ASPP_CHANNELS: 256
ASPP_DILATIONS: [6, 12, 18]
ASPP_DROPOUT: 0.1
HEAD_CHANNELS: 32
CONVS_DIM: 128
COMMON_STRIDE: 4
NORM: "SyncBN"
CENTER_LOSS_WEIGHT: 200.0
OFFSET_LOSS_WEIGHT: 0.01
PANOPTIC_DEEPLAB:
STUFF_AREA: 2048
CENTER_THRESHOLD: 0.1
NMS_KERNEL: 7
TOP_K_INSTANCE: 200
DATASETS:
TRAIN: ("cityscapes_fine_panoptic_train",)
TEST: ("cityscapes_fine_panoptic_val",)
SOLVER:
OPTIMIZER: "ADAM"
BASE_LR: 0.001
WEIGHT_DECAY: 0.0
WEIGHT_DECAY_NORM: 0.0
WEIGHT_DECAY_BIAS: 0.0
MAX_ITER: 60000
LR_SCHEDULER_NAME: "WarmupPolyLR"
IMS_PER_BATCH: 4
INPUT:
MIN_SIZE_TRAIN: (512, 640, 704, 832, 896, 1024, 1152, 1216, 1344, 1408, 1536, 1664, 1728, 1856, 1920, 2048)
MIN_SIZE_TRAIN_SAMPLING: "choice"
MIN_SIZE_TEST: 1024
MAX_SIZE_TRAIN: 4096
MAX_SIZE_TEST: 2048
CROP:
ENABLED: True
TYPE: "absolute"
SIZE: (1024, 2048)
DATALOADER:
NUM_WORKERS: 4
VERSION: 2
|
Cream/CDARTS/CDARTS_segmentation/train/configs/Cityscapes-PanopticSegmentation/Base-PanopticDeepLab-OS16.yaml/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/train/configs/Cityscapes-PanopticSegmentation/Base-PanopticDeepLab-OS16.yaml",
"repo_id": "Cream",
"token_count": 841
}
| 286 |
#!/usr/bin/env python3
# encoding: utf-8
import os
import time
import cv2
cv2.setNumThreads(0)
import torchvision
from PIL import Image
import argparse
import numpy as np
import torch
import torch.multiprocessing as mp
from utils.pyt_utils import ensure_dir, link_file, load_model, parse_devices
from utils.visualize import print_iou, show_prediction
from engine.tester import Tester
from engine.logger import get_logger
from seg_opr.metric import hist_info, compute_score
from datasets.cityscapes import Cityscapes
logger = get_logger()
cityscapes_trainID2id = {
0: 7,
1: 8,
2: 11,
3: 12,
4: 13,
5: 17,
6: 19,
7: 20,
8: 21,
9: 22,
10: 23,
11: 24,
12: 25,
13: 26,
14: 27,
15: 28,
16: 31,
17: 32,
18: 33,
19: 0
}
class SegTester(Tester):
def func_per_iteration(self, data, device, iter=None):
if self.config is not None: config = self.config
img = data['data']
label = data['label']
name = data['fn']
if len(config.eval_scale_array) == 1:
pred = self.whole_eval(img, None, device)
else:
pred = self.sliding_eval(img, config.eval_crop_size, config.eval_stride_rate, device)
if self.show_prediction:
colors = self.dataset.get_class_colors()
image = img
comp_img = show_prediction(colors, config.background, image, pred)
cv2.imwrite(os.path.join(os.path.realpath('.'), self.config.save, "test", name+".viz.png"), comp_img[:,:,::-1])
for x in range(pred.shape[0]):
for y in range(pred.shape[1]):
pred[x, y] = cityscapes_trainID2id[pred[x, y]]
cv2.imwrite(os.path.join(os.path.realpath('.'), self.config.save, "test", name+".png"), pred)
def compute_metric(self, results):
hist = np.zeros((self.config.num_classes, self.config.num_classes))
correct = 0
labeled = 0
count = 0
for d in results:
hist += d['hist']
correct += d['correct']
labeled += d['labeled']
count += 1
iu, mean_IU, mean_IU_no_back, mean_pixel_acc = compute_score(hist, correct, labeled)
result_line = print_iou(iu, mean_pixel_acc, self.dataset.get_class_names(), True)
return result_line, mean_IU
|
Cream/CDARTS/CDARTS_segmentation/train/test.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/train/test.py",
"repo_id": "Cream",
"token_count": 1040
}
| 287 |
""" CNN cell for network augmentation """
import torch.nn as nn
from copy import deepcopy
from models.ops import OPS
# Cell for NAS-Bench-201
class InferCell(nn.Module):
def __init__(self, genotype, C_in, C_out, stride):
super(InferCell, self).__init__()
self.layers = nn.ModuleList()
self.node_IN = []
self.node_IX = []
self.genotype = deepcopy(genotype)
for i in range(1, len(genotype)):
node_info = genotype[i-1]
cur_index = []
cur_innod = []
for (op_name, op_in) in node_info:
if op_in == 0:
layer = OPS[op_name](C_in , C_out, stride, True, True)
else:
layer = OPS[op_name](C_out, C_out, 1, True, True)
cur_index.append( len(self.layers) )
cur_innod.append( op_in )
self.layers.append( layer )
self.node_IX.append( cur_index )
self.node_IN.append( cur_innod )
self.nodes = len(genotype)
self.in_dim = C_in
self.out_dim = C_out
def extra_repr(self):
string = 'info :: nodes={nodes}, inC={in_dim}, outC={out_dim}'.format(**self.__dict__)
laystr = []
for i, (node_layers, node_innods) in enumerate(zip(self.node_IX,self.node_IN)):
y = ['I{:}-L{:}'.format(_ii, _il) for _il, _ii in zip(node_layers, node_innods)]
x = '{:}<-({:})'.format(i+1, ','.join(y))
laystr.append( x )
return string + ', [{:}]'.format( ' | '.join(laystr) ) + ', {:}'.format(self.genotype.tostr())
def forward(self, inputs):
nodes = [inputs]
for i, (node_layers, node_innods) in enumerate(zip(self.node_IX,self.node_IN)):
node_feature = sum( self.layers[_il](nodes[_ii]) for _il, _ii in zip(node_layers, node_innods) )
nodes.append( node_feature )
return nodes[-1]
|
Cream/CDARTS/benchmark201/models/augment_cells.py/0
|
{
"file_path": "Cream/CDARTS/benchmark201/models/augment_cells.py",
"repo_id": "Cream",
"token_count": 798
}
| 288 |
""" Network architecture visualizer using graphviz """
import sys
from graphviz import Digraph
import utils.genotypes as gt
def plot(genotype, file_path, caption=None):
""" make DAG plot and save to file_path as .png """
edge_attr = {
'fontsize': '20',
'fontname': 'times'
}
node_attr = {
'style': 'filled',
'shape': 'rect',
'align': 'center',
'fontsize': '20',
'height': '0.5',
'width': '0.5',
'penwidth': '2',
'fontname': 'times'
}
g = Digraph(
format='png',
edge_attr=edge_attr,
node_attr=node_attr,
engine='dot')
g.body.extend(['rankdir=LR'])
# input nodes
g.node("c_{k-2}", fillcolor='darkseagreen2')
g.node("c_{k-1}", fillcolor='darkseagreen2')
# intermediate nodes
n_nodes = len(genotype)
for i in range(n_nodes):
g.node(str(i), fillcolor='lightblue')
for i, edges in enumerate(genotype):
for op, j in edges:
if j == 0:
u = "c_{k-2}"
elif j == 1:
u = "c_{k-1}"
else:
u = str(j-2)
v = str(i)
g.edge(u, v, label=op, fillcolor="gray")
# output node
g.node("c_{k}", fillcolor='palegoldenrod')
for i in range(n_nodes):
g.edge(str(i), "c_{k}", fillcolor="gray")
# add image caption
if caption:
g.attr(label=caption, overlap='false', fontsize='20', fontname='times')
g.render(file_path, view=False)
if __name__ == '__main__':
if len(sys.argv) != 2:
raise ValueError("usage:\n python {} GENOTYPE".format(sys.argv[0]))
genotype_str = sys.argv[1]
try:
genotype = gt.from_str(genotype_str)
except AttributeError:
raise ValueError("Cannot parse {}".format(genotype_str))
plot(genotype.normal, "normal")
plot(genotype.reduce, "reduction")
|
Cream/CDARTS/benchmark201/utils/visualize.py/0
|
{
"file_path": "Cream/CDARTS/benchmark201/utils/visualize.py",
"repo_id": "Cream",
"token_count": 938
}
| 289 |
import torch.nn as nn
from lib.models.augment_cells import AugmentCell
class ModelTest(nn.Module):
def __init__(self, genotypes_dict, model_type, res_stem=False, init_channel=96, stem_multiplier=3, n_nodes=4, num_classes=1000):
"""
args:
"""
super(ModelTest, self).__init__()
self.c_in = 3
self.init_channel = init_channel
self.stem_multiplier = stem_multiplier
self.num_classes = num_classes
self.n_nodes = n_nodes
self.model_type = model_type
self.res_stem = res_stem
if self.model_type == 'cifar':
reduction_p = False
self.layers_reduction = [True, True, False]
self.augment_layers = [7, 7, 6]
self.nas_layers = nn.ModuleList([None, None, None])
self.feature_extractor = self.cifar_stem(self.init_channel * self.stem_multiplier)
elif self.model_type == 'imagenet':
if self.res_stem:
reduction_p = False
self.nas_layers = nn.ModuleList([None, None, None, None])
self.layers_reduction = [False, True, True, True]
self.augment_layers = [3, 4, 3, 4]
self.feature_extractor = self.resnet_stem(self.init_channel * self.stem_multiplier)
else:
reduction_p = True
self.nas_layers = nn.ModuleList([None, None, None])
self.layers_reduction = [True, True, False]
self.augment_layers = [5, 5, 4]
self.feature_extractor = self.imagenet_stem(self.init_channel * self.stem_multiplier)
else:
raise Exception("Wrong model type!")
self.nas_layers_num = len(self.nas_layers)
c_p = self.init_channel * self.stem_multiplier
c_pp = self.init_channel * self.stem_multiplier
c_cur = self.init_channel
for layer_idx, genotype in genotypes_dict.items():
reduction = self.layers_reduction[layer_idx]
nas_layer = self.generate_nas_layer(c_cur, c_p, c_pp, reduction_p, reduction, genotype, self.augment_layers[layer_idx])
self.nas_layers[layer_idx] = nas_layer
if reduction:
c_p = c_cur * 2 * self.n_nodes
else:
c_p = c_cur * self.n_nodes
if self.res_stem:
c_pp = c_p
reduction_p = False
else:
c_pp = c_cur * self.n_nodes
reduction_p = reduction
if reduction:
c_cur = c_cur * 2
else:
c_cur = c_cur
self.fc = nn.Linear(c_p, self.num_classes)
self.gap = nn.AdaptiveAvgPool2d(1)
def generate_nas_layer(self, C_cur, C_p, C_pp, reduction_p, reduction_cur, genotype, cell_num=3, bn_affine=True):
cells = nn.ModuleList()
if self.res_stem:
reduction_idx = 0
else:
reduction_idx = cell_num - 1
for i in range(cell_num):
if i == reduction_idx and reduction_cur:
C_cur *= 2
reduction = True
else:
reduction = False
cell = AugmentCell(genotype, C_pp, C_p, C_cur, reduction_p, reduction, bn_affine)
reduction_p = reduction
cells.append(cell)
C_cur_out = C_cur * len(cell.concat)
C_pp, C_p = C_p, C_cur_out
return cells
def forward(self, x):
s0, s1 = self.extract_features(x)
for i in range(self.nas_layers_num):
s0, s1 = self.forward_nas_layer(s0, s1, self.nas_layers[i])
out = self.gap(s1)
out = out.view(out.size(0), -1) # flatten
logits = self.fc(out)
return logits, logits
def forward_nas_layer(self, s0, s1, nas_layer):
for cell in nas_layer:
s0, s1 = s1, cell(s0, s1)
return s0, s1
def extract_features(self, im):
# feature_extractor is nn.ModuleList()
if len(self.feature_extractor) == 1:
s0 = self.feature_extractor[0](im)
s1 = s0
return [s0, s1]
elif len(self.feature_extractor) == 2:
s0 = self.feature_extractor[0](im)
s1 = self.feature_extractor[1](s0)
return [s0, s1]
else:
raise NotImplementedError
def resnet_stem(self, inplanes=64):
C_in = self.c_in
feature_extractor = nn.ModuleList()
stem = nn.Sequential(
nn.Conv2d(C_in, inplanes, kernel_size=7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(inplanes),
nn.ReLU(inplace=True),
# the layer1 is concated with maxpool
nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
)
feature_extractor.append(stem)
return feature_extractor
def cifar_stem(self, init_channel):
C_in = self.c_in
C_cur = init_channel
feature_extractor = nn.ModuleList()
stem = nn.Sequential(
nn.Conv2d(C_in, C_cur, 3, 1, 1, bias=False),
nn.BatchNorm2d(C_cur)
)
feature_extractor.append(stem)
return feature_extractor
def imagenet_stem(self, init_channel):
C_in = self.c_in
C_cur = init_channel
feature_extractor = nn.ModuleList()
stem0 = nn.Sequential(
nn.Conv2d(C_in, C_cur // 2, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C_cur // 2),
nn.ReLU(inplace=True),
nn.Conv2d(C_cur // 2, C_cur, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C_cur),
)
stem1 = nn.Sequential(
nn.ReLU(inplace=True),
nn.Conv2d(C_cur, C_cur, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C_cur),
)
feature_extractor.append(stem0)
feature_extractor.append(stem1)
return feature_extractor
|
Cream/CDARTS/lib/models/model_test.py/0
|
{
"file_path": "Cream/CDARTS/lib/models/model_test.py",
"repo_id": "Cream",
"token_count": 3164
}
| 290 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Written by Hao Du and Houwen Peng
# email: [email protected] and [email protected]
import time
import torchvision
import torch.nn.functional as F
from lib.utils.util import *
# supernet train function
def train_epoch(epoch, model, loader, optimizer, loss_fn, prioritized_board, MetaMN, cfg,
est=None, logger=None, lr_scheduler=None, saver=None,
output_dir='', model_ema=None, local_rank=0):
batch_time_m = AverageMeter()
data_time_m = AverageMeter()
losses_m = AverageMeter()
kd_losses_m = AverageMeter()
prec1_m = AverageMeter()
prec5_m = AverageMeter()
model.train()
end = time.time()
last_idx = len(loader) - 1
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
data_time_m.update(time.time() - end)
# get random architectures
prob = prioritized_board.get_prob()
random_cand = prioritized_board.get_cand_with_prob(prob)
random_cand.insert(0, [0])
random_cand.append([0])
# evaluate FLOPs of candidates
cand_flops = est.get_flops(random_cand)
# update meta matching networks
MetaMN.run_update(input, target, random_cand, model, optimizer,
prioritized_board, loss_fn, epoch, batch_idx)
# get_best_teacher
if prioritized_board.board_size() > 0:
meta_value, teacher_cand = prioritized_board.select_teacher(model, random_cand)
if prioritized_board.board_size() == 0 or epoch <= cfg.SUPERNET.META_STA_EPOCH:
output = model(input, random_cand)
loss = loss_fn(output, target)
kd_loss, teacher_output, teacher_cand = None, None, None
else:
output = model(input, random_cand)
valid_loss = loss_fn(output, target)
# get soft label from teacher cand
with torch.no_grad():
teacher_output = model(input, teacher_cand).detach()
soft_label = F.softmax(teacher_output, dim=1)
kd_loss = cross_entropy_loss_with_soft_target(output, soft_label)
loss = (meta_value * kd_loss + (2 - meta_value) * valid_loss) / 2
optimizer.zero_grad()
loss.backward()
optimizer.step()
prec1, prec5 = accuracy(output, target, topk=(1, 5))
if cfg.NUM_GPU == 1:
reduced_loss = loss.data
else:
reduced_loss = reduce_tensor(loss.data, cfg.NUM_GPU)
prec1 = reduce_tensor(prec1, cfg.NUM_GPU)
prec5 = reduce_tensor(prec5, cfg.NUM_GPU)
prioritized_board.update_prioritized_board(input, teacher_output, output, epoch, prec1, cand_flops, random_cand)
torch.cuda.synchronize()
if kd_loss is not None:
kd_losses_m.update(kd_loss.item(), input.size(0))
losses_m.update(reduced_loss.item(), input.size(0))
prec1_m.update(prec1.item(), output.size(0))
prec5_m.update(prec5.item(), output.size(0))
batch_time_m.update(time.time() - end)
if lr_scheduler is not None:
lr_scheduler.step()
if last_batch or batch_idx % cfg.LOG_INTERVAL == 0:
lrl = [param_group['lr'] for param_group in optimizer.param_groups]
lr = sum(lrl) / len(lrl)
if local_rank == 0:
logger.info(
'Train: {} [{:>4d}/{} ({:>3.0f}%)] '
'Loss: {loss.val:>9.6f} ({loss.avg:>6.4f}) '
'KD-Loss: {kd_loss.val:>9.6f} ({kd_loss.avg:>6.4f}) '
'Prec@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) '
'Prec@5: {top5.val:>7.4f} ({top5.avg:>7.4f}) '
'Time: {batch_time.val:.3f}s, {rate:>7.2f}/s '
'({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '
'LR: {lr:.3e} '
'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format(
epoch,
batch_idx, len(loader),
100. * batch_idx / last_idx,
loss=losses_m,
kd_loss=kd_losses_m,
top1=prec1_m,
top5=prec5_m,
batch_time=batch_time_m,
rate=input.size(0) * cfg.NUM_GPU / batch_time_m.val,
rate_avg=input.size(0) * cfg.NUM_GPU / batch_time_m.avg,
lr=lr,
data_time=data_time_m))
if cfg.SAVE_IMAGES and output_dir:
torchvision.utils.save_image(
input, os.path.join(
output_dir, 'train-batch-%d.jpg' %
batch_idx), padding=0, normalize=True)
if saver is not None and cfg.RECOVERY_INTERVAL and (
last_batch or (batch_idx + 1) % cfg.RECOVERY_INTERVAL == 0):
saver.save_recovery(model, optimizer, cfg, epoch,
model_ema=model_ema, batch_idx=batch_idx)
end = time.time()
if local_rank == 0:
for idx, i in enumerate(prioritized_board.prioritized_board):
logger.info("No.{} {}".format(idx, i[:4]))
return OrderedDict([('loss', losses_m.avg)])
def validate(model, loader, loss_fn, prioritized_board, cfg, log_suffix='', local_rank=0, logger=None):
batch_time_m = AverageMeter()
losses_m = AverageMeter()
prec1_m = AverageMeter()
prec5_m = AverageMeter()
model.eval()
end = time.time()
last_idx = len(loader) - 1
# get random child architecture
random_cand = prioritized_board.get_cand_with_prob(None)
random_cand.insert(0, [0])
random_cand.append([0])
with torch.no_grad():
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
input = input.cuda()
target = target.cuda()
output = model(input, random_cand)
if isinstance(output, (tuple, list)):
output = output[0]
# augmentation reduction
reduce_factor = cfg.TTA
if reduce_factor > 1:
output = output.unfold(
0,
reduce_factor,
reduce_factor).mean(
dim=2)
target = target[0:target.size(0):reduce_factor]
loss = loss_fn(output, target)
prec1, prec5 = accuracy(output, target, topk=(1, 5))
if cfg.NUM_GPU > 1:
reduced_loss = reduce_tensor(loss.data, cfg.NUM_GPU)
prec1 = reduce_tensor(prec1, cfg.NUM_GPU)
prec5 = reduce_tensor(prec5, cfg.NUM_GPU)
else:
reduced_loss = loss.data
torch.cuda.synchronize()
losses_m.update(reduced_loss.item(), input.size(0))
prec1_m.update(prec1.item(), output.size(0))
prec5_m.update(prec5.item(), output.size(0))
batch_time_m.update(time.time() - end)
end = time.time()
if local_rank == 0 and (
last_batch or batch_idx %
cfg.LOG_INTERVAL == 0):
log_name = 'Test' + log_suffix
logger.info(
'{0}: [{1:>4d}/{2}] '
'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '
'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '
'Prec@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) '
'Prec@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format(
log_name, batch_idx, last_idx,
batch_time=batch_time_m, loss=losses_m,
top1=prec1_m, top5=prec5_m))
metrics = OrderedDict(
[('loss', losses_m.avg), ('prec1', prec1_m.avg), ('prec5', prec5_m.avg)])
return metrics
|
Cream/Cream/lib/core/train.py/0
|
{
"file_path": "Cream/Cream/lib/core/train.py",
"repo_id": "Cream",
"token_count": 4398
}
| 291 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Written by Hao Du and Houwen Peng
# email: [email protected] and [email protected]
# This file is to add current path into python library.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path as osp
import sys
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
this_dir = osp.dirname(__file__)
lib_path = osp.join(this_dir, '..', 'lib')
add_path(lib_path)
lib_path = osp.join(this_dir, '..')
add_path(lib_path)
|
Cream/Cream/tools/_init_paths.py/0
|
{
"file_path": "Cream/Cream/tools/_init_paths.py",
"repo_id": "Cream",
"token_count": 222
}
| 292 |
"""
3Augment implementation from (https://github.com/facebookresearch/deit/blob/main/augment.py)
Data-augmentation (DA) based on dino DA (https://github.com/facebookresearch/dino)
and timm DA(https://github.com/rwightman/pytorch-image-models)
Can be called by adding "--ThreeAugment" to the command line
"""
import torch
from torchvision import transforms
from timm.data.transforms import str_to_pil_interp, RandomResizedCropAndInterpolation, ToNumpy, ToTensor
import numpy as np
from torchvision import datasets, transforms
import random
from PIL import ImageFilter, ImageOps
import torchvision.transforms.functional as TF
class GaussianBlur(object):
"""
Apply Gaussian Blur to the PIL image.
"""
def __init__(self, p=0.1, radius_min=0.1, radius_max=2.):
self.prob = p
self.radius_min = radius_min
self.radius_max = radius_max
def __call__(self, img):
do_it = random.random() <= self.prob
if not do_it:
return img
img = img.filter(
ImageFilter.GaussianBlur(
radius=random.uniform(self.radius_min, self.radius_max)
)
)
return img
class Solarization(object):
"""
Apply Solarization to the PIL image.
"""
def __init__(self, p=0.2):
self.p = p
def __call__(self, img):
if random.random() < self.p:
return ImageOps.solarize(img)
else:
return img
class gray_scale(object):
"""
Apply Solarization to the PIL image.
"""
def __init__(self, p=0.2):
self.p = p
self.transf = transforms.Grayscale(3)
def __call__(self, img):
if random.random() < self.p:
return self.transf(img)
else:
return img
class horizontal_flip(object):
"""
Apply Solarization to the PIL image.
"""
def __init__(self, p=0.2,activate_pred=False):
self.p = p
self.transf = transforms.RandomHorizontalFlip(p=1.0)
def __call__(self, img):
if random.random() < self.p:
return self.transf(img)
else:
return img
def new_data_aug_generator(args = None):
img_size = args.input_size
remove_random_resized_crop = False
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
primary_tfl = []
scale=(0.08, 1.0)
interpolation='bicubic'
if remove_random_resized_crop:
primary_tfl = [
transforms.Resize(img_size, interpolation=3),
transforms.RandomCrop(img_size, padding=4,padding_mode='reflect'),
transforms.RandomHorizontalFlip()
]
else:
primary_tfl = [
RandomResizedCropAndInterpolation(
img_size, scale=scale, interpolation=interpolation),
transforms.RandomHorizontalFlip()
]
secondary_tfl = [transforms.RandomChoice([gray_scale(p=1.0),
Solarization(p=1.0),
GaussianBlur(p=1.0)])]
if args.color_jitter is not None and not args.color_jitter==0:
secondary_tfl.append(transforms.ColorJitter(args.color_jitter, args.color_jitter, args.color_jitter))
final_tfl = [
transforms.ToTensor(),
transforms.Normalize(
mean=torch.tensor(mean),
std=torch.tensor(std))
]
return transforms.Compose(primary_tfl+secondary_tfl+final_tfl)
|
Cream/EfficientViT/classification/data/threeaugment.py/0
|
{
"file_path": "Cream/EfficientViT/classification/data/threeaugment.py",
"repo_id": "Cream",
"token_count": 1647
}
| 293 |
# dataset settings
dataset_type = 'DeepFashionDataset'
data_root = 'data/DeepFashion/In-shop/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', img_scale=(750, 1101), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(750, 1101),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=1,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/DeepFashion_segmentation_query.json',
img_prefix=data_root + 'Img/',
pipeline=train_pipeline,
data_root=data_root),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/DeepFashion_segmentation_query.json',
img_prefix=data_root + 'Img/',
pipeline=test_pipeline,
data_root=data_root),
test=dict(
type=dataset_type,
ann_file=data_root +
'annotations/DeepFashion_segmentation_gallery.json',
img_prefix=data_root + 'Img/',
pipeline=test_pipeline,
data_root=data_root))
evaluation = dict(interval=5, metric=['bbox', 'segm'])
|
Cream/EfficientViT/downstream/configs/_base_/datasets/deepfashion.py/0
|
{
"file_path": "Cream/EfficientViT/downstream/configs/_base_/datasets/deepfashion.py",
"repo_id": "Cream",
"token_count": 892
}
| 294 |
# --------------------------------------------------------
# EfficientViT FPN Architecture for Downstream Tasks
# Copyright (c) 2022 Microsoft
# Adapted from mmdetection FPN and LightViT
# mmdetection: (https://github.com/open-mmlab/mmdetection)
# LightViT: (https://github.com/hunto/LightViT)
# Written by: Xinyu Liu
# --------------------------------------------------------
import warnings
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, xavier_init
from mmcv.runner import auto_fp16
from mmdet.models.builder import NECKS
@NECKS.register_module()
class EfficientViTFPN(nn.Module):
r"""Feature Pyramid Network for EfficientViT.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale)
num_outs (int): Number of output scales.
start_level (int): Index of the start input backbone level used to
build the feature pyramid. Default: 0.
end_level (int): Index of the end input backbone level (exclusive) to
build the feature pyramid. Default: -1, which means the last level.
add_extra_convs (bool | str): If bool, it decides whether to add conv
layers on top of the original feature maps. Default to False.
If True, its actual mode is specified by `extra_convs_on_inputs`.
If str, it specifies the source feature map of the extra convs.
Only the following options are allowed
- 'on_input': Last feat map of neck inputs (i.e. backbone feature).
- 'on_lateral': Last feature map after lateral convs.
- 'on_output': The last output feature map after fpn convs.
extra_convs_on_inputs (bool, deprecated): Whether to apply extra convs
on the original feature from the backbone. If True,
it is equivalent to `add_extra_convs='on_input'`. If False, it is
equivalent to set `add_extra_convs='on_output'`. Default to True.
relu_before_extra_convs (bool): Whether to apply relu before the extra
conv. Default: False.
no_norm_on_lateral (bool): Whether to apply norm on lateral.
Default: False.
num_extra_trans_convs (int): extra transposed conv on the output
with largest resolution. Default: 0.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Config dict for normalization layer. Default: None.
act_cfg (str): Config dict for activation layer in ConvModule.
Default: None.
upsample_cfg (dict): Config dict for interpolate layer.
Default: `dict(mode='nearest')`
"""
def __init__(self,
in_channels,
out_channels,
num_outs,
start_level=0,
end_level=-1,
add_extra_convs=False,
extra_convs_on_inputs=True,
relu_before_extra_convs=False,
no_norm_on_lateral=False,
num_extra_trans_convs=0,
conv_cfg=None,
norm_cfg=None,
act_cfg=None,
upsample_cfg=dict(mode='nearest')):
super(EfficientViTFPN, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.relu_before_extra_convs = relu_before_extra_convs
self.no_norm_on_lateral = no_norm_on_lateral
self.num_extra_trans_convs = num_extra_trans_convs
self.fp16_enabled = False
self.upsample_cfg = upsample_cfg.copy()
if end_level == -1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level < inputs, no extra level is allowed
self.backbone_end_level = end_level
assert end_level <= len(in_channels)
assert num_outs == end_level - start_level
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
assert isinstance(add_extra_convs, (str, bool))
if isinstance(add_extra_convs, str):
# Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output'
assert add_extra_convs in ('on_input', 'on_lateral', 'on_output')
elif add_extra_convs: # True
if extra_convs_on_inputs:
# TODO: deprecate `extra_convs_on_inputs`
warnings.simplefilter('once')
warnings.warn(
'"extra_convs_on_inputs" will be deprecated in v2.9.0,'
'Please use "add_extra_convs"', DeprecationWarning)
self.add_extra_convs = 'on_input'
else:
self.add_extra_convs = 'on_output'
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(
in_channels[i],
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg if not self.no_norm_on_lateral else None,
act_cfg=act_cfg,
inplace=False)
fpn_conv = ConvModule(
out_channels,
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
# add extra conv layers (e.g., RetinaNet)
extra_levels = num_outs - self.backbone_end_level + self.start_level
assert extra_levels >= num_extra_trans_convs
extra_levels -= num_extra_trans_convs
if self.add_extra_convs and extra_levels >= 1:
for i in range(extra_levels):
if i == 0 and self.add_extra_convs == 'on_input':
in_channels = self.in_channels[self.backbone_end_level - 1]
else:
in_channels = out_channels
extra_fpn_conv = ConvModule(
in_channels,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
self.fpn_convs.append(extra_fpn_conv)
# add extra transposed convs
self.extra_trans_convs = nn.ModuleList()
self.extra_fpn_convs = nn.ModuleList()
for i in range(num_extra_trans_convs):
extra_trans_conv = TransposedConvModule(
out_channels,
out_channels,
2,
stride=2,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg if not no_norm_on_lateral else None,
act_cfg=act_cfg,
inplace=False)
self.extra_trans_convs.append(extra_trans_conv)
extra_fpn_conv = ConvModule(
out_channels,
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
self.extra_fpn_convs.append(extra_fpn_conv)
# default init_weights for conv(msra) and norm in ConvModule
def init_weights(self):
"""Initialize the weights of FPN module."""
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
@auto_fp16()
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == len(self.in_channels)
# build laterals
laterals = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
# build top-down path
used_backbone_levels = len(laterals)
for i in range(used_backbone_levels - 1, 0, -1):
# In some cases, fixing `scale factor` (e.g. 2) is preferred, but
# it cannot co-exist with `size` in `F.interpolate`.
if 'scale_factor' in self.upsample_cfg:
laterals[i - 1] += F.interpolate(laterals[i],
**self.upsample_cfg)
else:
prev_shape = laterals[i - 1].shape[2:]
laterals[i - 1] += F.interpolate(
laterals[i], size=prev_shape, **self.upsample_cfg)
# extra transposed convs for outputs with extra scales
extra_laterals = []
if self.num_extra_trans_convs > 0:
prev_lateral = laterals[0]
for i in range(self.num_extra_trans_convs):
extra_lateral = self.extra_trans_convs[i](prev_lateral)
extra_laterals.insert(0, extra_lateral)
prev_lateral = extra_lateral
# part 1: from original levels
outs = [
self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)
]
# part 2: add extra levels
if self.num_outs > len(outs) + len(extra_laterals):
# use max pool to get more levels on top of outputs
# (e.g., Faster R-CNN, Mask R-CNN)
if not self.add_extra_convs:
for i in range(self.num_outs - len(extra_laterals) - used_backbone_levels):
outs.append(F.max_pool2d(outs[-1], 1, stride=2))
# add conv layers on top of original feature maps (RetinaNet)
else:
if self.add_extra_convs == 'on_input':
extra_source = inputs[self.backbone_end_level - 1]
elif self.add_extra_convs == 'on_lateral':
extra_source = laterals[-1]
elif self.add_extra_convs == 'on_output':
extra_source = outs[-1]
else:
raise NotImplementedError
outs.append(self.fpn_convs[used_backbone_levels](extra_source))
for i in range(used_backbone_levels + 1, self.num_outs - len(extra_laterals)): # Not called
print("i: {}".format(i), self.fpn_convs[i])
if self.relu_before_extra_convs:
outs.append(self.fpn_convs[i](F.relu(outs[-1])))
else:
outs.append(self.fpn_convs[i](outs[-1]))
# part 3: add extra transposed convs
if self.num_extra_trans_convs > 0:
# apply 3x3 conv on the larger feat (1/8) after 3x3 trans conv
# because the 3x3 trans conv is on the lateral
# thus no extra 1x1 laterals are required
extra_outs = [
self.extra_fpn_convs[i](extra_laterals[i])
for i in range(self.num_extra_trans_convs)
]
# 1 + 4 (3+1extra) = 5
assert (len(extra_outs) + len(outs)) == self.num_outs, f"{len(extra_outs)} + {len(outs)} != {self.num_outs}"
return tuple(extra_outs + outs)
class TransposedConvModule(ConvModule):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias='auto', conv_cfg=None,
norm_cfg=None, act_cfg=..., inplace=True,
**kwargs):
super(TransposedConvModule, self).__init__(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias, conv_cfg,
norm_cfg, act_cfg, inplace, **kwargs)
self.conv = nn.ConvTranspose2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=self.with_bias
)
# Use msra init by default
self.init_weights()
|
Cream/EfficientViT/downstream/efficientvit_fpn.py/0
|
{
"file_path": "Cream/EfficientViT/downstream/efficientvit_fpn.py",
"repo_id": "Cream",
"token_count": 6310
}
| 295 |
"""
Train and eval functions used in main.py
"""
import math
import sys
from typing import Iterable, Optional
import torch
from timm.data import Mixup
from timm.utils import accuracy, ModelEma
from losses import DistillationLoss
import utils
def train_one_epoch(model: torch.nn.Module, criterion: DistillationLoss,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,
model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None,
set_training_mode=True):
model.train(set_training_mode)
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
with torch.cuda.amp.autocast():
outputs = model(samples)
loss = criterion(samples, outputs, targets)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
optimizer.zero_grad()
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order)
torch.cuda.synchronize()
if model_ema is not None:
model_ema.update(model)
metric_logger.update(loss=loss_value)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(data_loader, model, device):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
# switch to evaluation mode
model.eval()
for images, target in metric_logger.log_every(data_loader, 10, header):
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
|
Cream/MiniViT/Mini-DeiT/engine.py/0
|
{
"file_path": "Cream/MiniViT/Mini-DeiT/engine.py",
"repo_id": "Cream",
"token_count": 1452
}
| 296 |
# Mini-Swin
This repo is for MiniViT for swin transformers.
## Model Zoo
Model | Params. | Input | Top-1 Acc. % | Top-5 Acc. % | Download link
--- |:---:|:---:|:---:|:---:|:---:
Mini-Swin-T | 12M | 224x224 | 81.3 | 95.7 | [model](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/mini-swin-tiny-12m.pth), [log](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/log_mini_swin_tiny.txt)
Mini-Swin-S | 26M | 224x224 | 83.9 | 97.0 | [model](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/mini-swin-small-26m.pth), [log](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/log_mini_swin_small.txt)
Mini-Swin-B | 46M | 224x224 | 84.5| 97.3 | [model](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/mini-swin-base-46m.pth), [log](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/log_mini_swin_base.txt)
Mini-Swin-B | 47M | 384x384 | 85.5 | 97.6 | [model](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/mini-swin-base-224to384.pth), [log](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/log_mini_swin_base_384.txt)
# Usage
Create the environment:
```bash
pip install -r requirements.txt
```
## Data Preparation
You can download the ImageNet-1K dataset from [`http://www.image-net.org/`](http://www.image-net.org/).
The train set and validation set should be saved as the `*.tar` archives:
```
ImageNet/
├── train.tar
└── val.tar
```
Our code also supports storing images as individual files as follow:
```
ImageNet/
├── train
│ ├── n01440764
│ │ ├── n01440764_10026.JPEG
│ │ ├── n01440764_10027.JPEG
...
├── val
│ ├── n01440764
│ │ ├── ILSVRC2012_val_00000293.JPEG
```
## Training
Training Mini-Swin-Tiny
```bash
python -m torch.distributed.launch --nproc_per_node 8 main.py --cfg configs/swin_tiny_patch4_window7_224_minivit_sharenum6.yaml --data-path <data-path> --output <output-folder> --tag mini-swin-tiny --batch-size 128 --is_sep_layernorm --is_transform_heads --is_transform_ffn --do_distill --alpha 0.0 --teacher <teacher-path> --attn_loss --hidden_loss --hidden_relation --student_layer_list 11_9_7_5_3_1 --teacher_layer_list 23_21_15_9_3_1 --hidden_weight 0.1
```
<details>
<summary>Training Mini-Swin-Small</summary>
<pre><code>python -m torch.distributed.launch --nproc_per_node 8 main.py --cfg configs/swin_small_patch4_window7_224_minivit_sharenum2.yaml --data-path <data-path> --output <output-folder> --tag mini-swin-small --batch-size 128 --is_sep_layernorm --is_transform_heads --is_transform_ffn --do_distill --alpha 0.0 --teacher <teacher-path> --attn_loss --hidden_loss --hidden_relation --student_layer_list 23_21_15_9_3_1 --teacher_layer_list 23_21_15_9_3_1 --hidden_weight 0.1
</code></pre>
</details>
<details>
<summary>Training Mini-Swin-Base</summary>
<pre><code>python -m torch.distributed.launch --nproc_per_node 8 main.py --cfg configs/swin_base_patch4_window7_224_minivit_sharenum2.yaml --data-path <data-path> --output <output-folder> --tag mini-swin-base --batch-size 128 --is_sep_layernorm --is_transform_heads --is_transform_ffn --do_distill --alpha 0.0 --teacher <teacher-path> --attn_loss --hidden_loss --hidden_relation --student_layer_list 23_21_15_9_3_1 --teacher_layer_list 23_21_15_9_3_1 --hidden_weight 0.1
</code></pre>
</details>
### Finetune Mini-Swin-B with resolution 384:
```bash
python -m torch.distributed.launch --nproc_per_node 8 main.py --cfg configs/swin_base_patch4_window7_224to384_minivit_sharenum2_adamw.yaml --data-path <data-path> --output <output-folder> --tag mini-swin-base-224to384 --batch-size 16 --accumulation-steps 2 --is_sep_layernorm --is_transform_heads --is_transform_ffn --resume <model-224-ckpt> --resume_weight_only --train_224to384
```
## Evaluation
Run the following commands for evaluation:
Evaluate Mini-Swin-Tiny
```bash
python -m torch.distributed.launch --nproc_per_node 8 main.py --cfg configs/swin_tiny_patch4_window7_224_minivit_sharenum6.yaml --data-path <data-path> --batch-size 64 --is_sep_layernorm --is_transform_ffn --is_transform_heads --resume checkpoints/mini-swin-tiny-12m.pth --eval
```
<details>
<summary>Evaluate Mini-Swin-Small</summary>
<pre><code>python -m torch.distributed.launch --nproc_per_node 8 main.py --cfg configs/swin_small_patch4_window7_224_minivit_sharenum2.yaml --data-path <data-path> --batch-size 64 --is_sep_layernorm --is_transform_ffn --is_transform_heads --resume checkpoints/mini-swin-small-26m.pth --eval
</code></pre>
</details>
<details>
<summary>Evaluate Mini-Swin-Base</summary>
<pre><code>python -m torch.distributed.launch --nproc_per_node 8 main.py --cfg configs/swin_base_patch4_window7_224_minivit_sharenum2.yaml --data-path <data-path> --batch-size 64 --is_sep_layernorm --is_transform_ffn --is_transform_heads --resume checkpoints/mini-swin-base-46m.pth --eval
</code></pre>
</details>
<details>
<summary>Evaluate Mini-Swin-Base-384</summary>
<pre><code>python -m torch.distributed.launch --nproc_per_node 8 main.py --cfg configs/swin_base_patch4_window7_224to384_minivit_sharenum2_adamw.yaml --data-path <data-path> --batch-size 32 --is_sep_layernorm --is_transform_ffn --is_transform_heads --resume checkpoints/mini-swin-base-224to384.pth --eval
</code></pre>
</details>
## Bibtex
If this repo is helpful for you, please consider to cite it. Thank you! :)
```bibtex
@InProceedings{MiniViT,
title = {MiniViT: Compressing Vision Transformers With Weight Multiplexing},
author = {Zhang, Jinnian and Peng, Houwen and Wu, Kan and Liu, Mengchen and Xiao, Bin and Fu, Jianlong and Yuan, Lu},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
month = {June},
year = {2022},
pages = {12145-12154}
}
```
# License
Our code is based on [Swin Transformer](https://github.com/microsoft/Swin-Transformer). Thank you!
[MIT License](./LICENSE)
|
Cream/MiniViT/Mini-Swin/README.md/0
|
{
"file_path": "Cream/MiniViT/Mini-Swin/README.md",
"repo_id": "Cream",
"token_count": 2359
}
| 297 |
import os
import zipfile
import io
import numpy as np
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
def is_zip_path(img_or_path):
"""judge if this is a zip path"""
return '.zip@' in img_or_path
class ZipReader(object):
"""A class to read zipped files"""
zip_bank = dict()
def __init__(self):
super(ZipReader, self).__init__()
@staticmethod
def get_zipfile(path):
zip_bank = ZipReader.zip_bank
if path not in zip_bank:
zfile = zipfile.ZipFile(path, 'r')
zip_bank[path] = zfile
return zip_bank[path]
@staticmethod
def split_zip_style_path(path):
pos_at = path.index('@')
assert pos_at != -1, "character '@' is not found from the given path '%s'" % path
zip_path = path[0: pos_at]
folder_path = path[pos_at + 1:]
folder_path = str.strip(folder_path, '/')
return zip_path, folder_path
@staticmethod
def list_folder(path):
zip_path, folder_path = ZipReader.split_zip_style_path(path)
zfile = ZipReader.get_zipfile(zip_path)
folder_list = []
for file_foler_name in zfile.namelist():
file_foler_name = str.strip(file_foler_name, '/')
if file_foler_name.startswith(folder_path) and \
len(os.path.splitext(file_foler_name)[-1]) == 0 and \
file_foler_name != folder_path:
if len(folder_path) == 0:
folder_list.append(file_foler_name)
else:
folder_list.append(file_foler_name[len(folder_path) + 1:])
return folder_list
@staticmethod
def list_files(path, extension=None):
if extension is None:
extension = ['.*']
zip_path, folder_path = ZipReader.split_zip_style_path(path)
zfile = ZipReader.get_zipfile(zip_path)
file_lists = []
for file_foler_name in zfile.namelist():
file_foler_name = str.strip(file_foler_name, '/')
if file_foler_name.startswith(folder_path) and \
str.lower(os.path.splitext(file_foler_name)[-1]) in extension:
if len(folder_path) == 0:
file_lists.append(file_foler_name)
else:
file_lists.append(file_foler_name[len(folder_path) + 1:])
return file_lists
@staticmethod
def read(path):
zip_path, path_img = ZipReader.split_zip_style_path(path)
zfile = ZipReader.get_zipfile(zip_path)
data = zfile.read(path_img)
return data
@staticmethod
def imread(path):
zip_path, path_img = ZipReader.split_zip_style_path(path)
zfile = ZipReader.get_zipfile(zip_path)
data = zfile.read(path_img)
try:
im = Image.open(io.BytesIO(data))
except:
print("ERROR IMG LOADED: ", path_img)
random_img = np.random.rand(224, 224, 3) * 255
im = Image.fromarray(np.uint8(random_img))
return im
|
Cream/MiniViT/Mini-Swin/data/zipreader.py/0
|
{
"file_path": "Cream/MiniViT/Mini-Swin/data/zipreader.py",
"repo_id": "Cream",
"token_count": 1494
}
| 298 |
# Neural Architecture Design and Search [](https://twitter.com/intent/tweet?text=A%20new%20collection%20of%20tiny%20and%20efficient%20models%20thru%20architecture%20design%20and%20search,%20SOTA%20performance!!&url=https://github.com/microsoft/Cream&via=houwen_peng&hashtags=NAS,ViT,vision_transformer)
***This is a collection of our NAS and Vision Transformer work***
> [**TinyCLIP**](./TinyCLIP) (```@ICCV'23```): **TinyCLIP: CLIP Distillation via Affinity Mimicking and Weight Inheritance**
> [**EfficientViT**](./EfficientViT) (```@CVPR'23```): **EfficientViT: Memory Efficient Vision Transformer with Cascaded Group Attention**
> [**TinyViT**](./TinyViT) (```@ECCV'22```): **TinyViT: Fast Pretraining Distillation for Small Vision Transformers**
> [**MiniViT**](./MiniViT) (```@CVPR'22```): **MiniViT: Compressing Vision Transformers with Weight Multiplexing**
> [**CDARTS**](./CDARTS) (```@TPAMI'22```): **Cyclic Differentiable Architecture Search**
> [**AutoFormerV2**](./AutoFormerV2) (```@NeurIPS'21```): **Searching the Search Space of Vision Transformer**
> [**iRPE**](./iRPE) (```@ICCV'21```): **Rethinking and Improving Relative Position Encoding for Vision Transformer**
> [**AutoFormer**](./AutoFormer) (```@ICCV'21```): **AutoFormer: Searching Transformers for Visual Recognition**
> [**Cream**](./Cream) (```@NeurIPS'20```): **Cream of the Crop: Distilling Prioritized Paths For One-Shot Neural Architecture Search**
We also implemented our NAS algorithms on Microsoft [**NNI**](https://github.com/microsoft/nni) (Neural Network Intelligence).
## News
- :sunny: Hiring research interns for next-generation model design, efficient large model inference: [email protected]
- :boom: Sep, 2023: Code for [**TinyCLIP**](./TinyCLIP) is now released.
- :boom: Jul, 2023: [**TinyCLIP**](./TinyCLIP) accepted to ICCV'23
- :boom: May, 2023: Code for [**EfficientViT**](./EfficientViT) is now released.
- :boom: Mar, 2023: [**EfficientViT**](./EfficientViT) accepted to CVPR'23
- :boom: Jul, 2022: Code for [**TinyViT**](./TinyViT) is now released.
- :boom: Apr, 2022: Code for [**MiniViT**](./MiniViT) is now released.
- :boom: Mar, 2022: [**MiniViT**](https://openaccess.thecvf.com/content/CVPR2022/html/Zhang_MiniViT_Compressing_Vision_Transformers_With_Weight_Multiplexing_CVPR_2022_paper.html) has been accepted by CVPR'22.
- :boom: Feb, 2022: Code for [**CDARTS**](./CDARTS) is now released.
- :boom: Feb, 2022: [**CDARTS**](./CDARTS) has been accepted by TPAMI'22.
- :boom: Jan, 2022: Code for [**AutoFormerV2**](./AutoFormerV2) is now released.
- :boom: Oct, 2021: [**AutoFormerV2**](./AutoFormerV2) has been accepted by NeurIPS'21, code will be released soon.
- :boom: Aug, 2021: Code for [**AutoFormer**](./AutoFormer) is now released.
- :boom: July, 2021: [**iRPE code**](./iRPE) (**with CUDA Acceleration**) is now released. Paper is [here](https://openaccess.thecvf.com/content/ICCV2021/html/Wu_Rethinking_and_Improving_Relative_Position_Encoding_for_Vision_Transformer_ICCV_2021_paper.html).
- :boom: July, 2021: [**iRPE**](https://openaccess.thecvf.com/content/ICCV2021/html/Wu_Rethinking_and_Improving_Relative_Position_Encoding_for_Vision_Transformer_ICCV_2021_paper.html) has been accepted by ICCV'21.
- :boom: July, 2021: [**AutoFormer**](https://openaccess.thecvf.com/content/ICCV2021/html/Chen_AutoFormer_Searching_Transformers_for_Visual_Recognition_ICCV_2021_paper.html) has been accepted by ICCV'21.
- :boom: July, 2021: [**AutoFormer**](./AutoFormer) is now available on [arXiv](https://arxiv.org/abs/2107.00651).
- :boom: Oct, 2020: Code for [**Cream**](./Cream) is now released.
- :boom: Oct, 2020: [**Cream**](./Cream) was accepted to NeurIPS'20
## Works
### [TinyCLIP](./TinyCLIP)
**TinyCLIP** is a novel **cross-modal distillation** method for large-scale language-image pre-trained models. The method introduces two core techniques: **affinity mimicking** and **weight inheritance**. This work unleashes the capacity of small CLIP models, fully leveraging large-scale models as well as pre-training data and striking the best trade-off between speed and accuracy.
<div align="center">
<img width="85%" alt="TinyCLIP overview" src="./TinyCLIP/figure/TinyCLIP.jpg"/>
</div>
### [EfficientViT](./EfficientViT)
**EfficientViT** is a family of high-speed vision transformers. It is built with a new memory efficient building block with a **sandwich layout**, and an efficient **cascaded group attention** operation which mitigates attention computation redundancy.
<div align="center">
<img width="69%" alt="EfficientViT overview" src="./EfficientViT/classification/.figures/efficientvit_main_static.png"/>
</div>
### [TinyViT](./TinyViT)
TinyViT is a new family of **tiny and efficient** vision transformers pretrained on **large-scale** datasets with our proposed **fast distillation framework**. The central idea is to **transfer knowledge** from **large pretrained models** to small ones. The logits of large teacher models are sparsified and stored in disk in advance to **save the memory cost and computation overheads**.
<div align="center">
<img width="80%" alt="TinyViT overview" src="./TinyViT/.figure/framework.png"/>
</div>
### [MiniViT](./MiniViT)
MiniViT is a new compression framework that achieves parameter reduction in vision transformers while retaining the same performance. The central idea of MiniViT is to multiplex the weights of consecutive transformer blocks. Specifically, we make the weights shared across layers, while imposing a transformation on the weights to increase diversity. Weight distillation over self-attention is also applied to transfer knowledge from large-scale ViT models to weight-multiplexed compact models.
<div align="center">
<img width="70%" alt="MiniViT overview" src="./MiniViT/.figure/framework.png"/>
</div>
### [CDARTS](./CDARTS)
In this work, we propose new joint optimization objectives and a novel Cyclic Differentiable ARchiTecture Search framework, dubbed CDARTS. Considering the structure difference, CDARTS builds a cyclic feedback mechanism between the search and evaluation networks with introspective distillation.
<div align="center">
<img width="50%" alt="CDARTS overview" src="CDARTS/demo/framework1.png"/>
</div>
### [AutoFormerV2](./AutoFormerV2)
In this work, instead of searching the architecture in a predefined search space, with the help of AutoFormer, we proposed to search the search space to automatically find a great search space first.
After that we search the architectures in the searched space. In addition, we provide insightful observations and guidelines for general vision transformer design.
<div align="center">
<img width="70%" alt="AutoFormerV2 overview" src="AutoFormerV2/.figure/overview.jpg"/>
</div>
### [AutoFormer](./AutoFormer)
AutoFormer is new one-shot architecture search framework dedicated to vision transformer search. It entangles the weights of different vision transformer blocks in the same layers during supernet training.
Benefiting from the strategy, the trained supernet allows thousands of subnets to be very well-trained. Specifically, the performance of these subnets with weights inherited from the supernet is comparable to those retrained from scratch.
<div align="center">
<img width="70%" alt="AutoFormer overview" src="AutoFormer/.figure/overview.png"/>
</div>
### [iRPE](./iRPE)
**Image RPE (iRPE for short) methods are new relative position encoding methods dedicated to 2D images**, considering directional relative distance modeling as well as the interactions between queries and relative position embeddings in self-attention mechanism. The proposed iRPE methods are simple and lightweight, being easily plugged into transformer blocks. Experiments demonstrate that solely due to the proposed encoding methods, **DeiT and DETR obtain up to 1.5% (top-1 Acc) and 1.3% (mAP) stable improvements** over their original versions on ImageNet and COCO respectively, without tuning any extra hyperparamters such as learning rate and weight decay. Our ablation and analysis also yield interesting findings, some of which run counter to previous understanding.
<div align="center">
<img width="70%" alt="iRPE overview" src="iRPE/iRPE.png"/>
</div>
### [Cream](./Cream)
**[[Paper]](https://papers.nips.cc/paper/2020/file/d072677d210ac4c03ba046120f0802ec-Paper.pdf) [[Models-Google Drive]](https://drive.google.com/drive/folders/1NLGAbBF9bA1IUAxKlk2VjgRXhr6RHvRW?usp=sharing)[[Models-Baidu Disk (password: wqw6)]](https://pan.baidu.com/s/1TqQNm2s14oEdyNPimw3T9g) [[Slides]]() [[BibTex]](https://scholar.googleusercontent.com/scholar.bib?q=info:ICWVXc_SsKAJ:scholar.google.com/&output=citation&scisdr=CgUmooXfEMfTi0cV5aU:AAGBfm0AAAAAX7sQ_aXoamdKRaBI12tAVN8REq1VKNwM&scisig=AAGBfm0AAAAAX7sQ_RdYtp6BSro3zgbXVJU2MCgsG730&scisf=4&ct=citation&cd=-1&hl=ja)** <br/>
In this work, we present a simple yet effective architecture distillation method. The central idea is that subnetworks can learn collaboratively and teach each other throughout the training process, aiming to boost the convergence of individual models. We introduce the concept of prioritized path, which refers to the architecture candidates exhibiting superior performance during training. Distilling knowledge from the prioritized paths is able to boost the training of subnetworks. Since the prioritized paths are changed on the fly depending on their performance and complexity, the final obtained paths are the cream of the crop.
<div >
<img src="Cream/demo/intro.jpg" width="90%"/>
</div>
## Bibtex
```bibtex
@InProceedings{tinyclip,
title = {TinyCLIP: CLIP Distillation via Affinity Mimicking and Weight Inheritance},
author = {Wu, Kan and Peng, Houwen and Zhou, Zhenghong and Xiao, Bin and Liu, Mengchen and Yuan, Lu and Xuan, Hong and Valenzuela, Michael and Chen, Xi (Stephen) and Wang, Xinggang and Chao, Hongyang and Hu, Han},
booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)},
month = {October},
year = {2023},
pages = {21970-21980}
}
@InProceedings{liu2023efficientvit,
title = {EfficientViT: Memory Efficient Vision Transformer with Cascaded Group Attention},
author = {Liu, Xinyu and Peng, Houwen and Zheng, Ningxin and Yang, Yuqing and Hu, Han and Yuan, Yixuan},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
year = {2023},
}
@InProceedings{tiny_vit,
title={TinyViT: Fast Pretraining Distillation for Small Vision Transformers},
author={Wu, Kan and Zhang, Jinnian and Peng, Houwen and Liu, Mengchen and Xiao, Bin and Fu, Jianlong and Yuan, Lu},
booktitle={European conference on computer vision (ECCV)},
year={2022}
}
@InProceedings{MiniViT,
title = {MiniViT: Compressing Vision Transformers With Weight Multiplexing},
author = {Zhang, Jinnian and Peng, Houwen and Wu, Kan and Liu, Mengchen and Xiao, Bin and Fu, Jianlong and Yuan, Lu},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
month = {June},
year = {2022},
pages = {12145-12154}
}
@article{CDARTS,
title={Cyclic Differentiable Architecture Search},
author={Yu, Hongyuan and Peng, Houwen and Huang, Yan and Fu, Jianlong and Du, Hao and Wang, Liang and Ling, Haibin},
journal={IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI)},
year={2022}
}
@article{S3,
title={Searching the Search Space of Vision Transformer},
author={Minghao, Chen and Kan, Wu and Bolin, Ni and Houwen, Peng and Bei, Liu and Jianlong, Fu and Hongyang, Chao and Haibin, Ling},
booktitle={Conference and Workshop on Neural Information Processing Systems (NeurIPS)},
year={2021}
}
@InProceedings{iRPE,
title = {Rethinking and Improving Relative Position Encoding for Vision Transformer},
author = {Wu, Kan and Peng, Houwen and Chen, Minghao and Fu, Jianlong and Chao, Hongyang},
booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)},
month = {October},
year = {2021},
pages = {10033-10041}
}
@InProceedings{AutoFormer,
title = {AutoFormer: Searching Transformers for Visual Recognition},
author = {Chen, Minghao and Peng, Houwen and Fu, Jianlong and Ling, Haibin},
booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)},
month = {October},
year = {2021},
pages = {12270-12280}
}
@article{Cream,
title={Cream of the Crop: Distilling Prioritized Paths For One-Shot Neural Architecture Search},
author={Peng, Houwen and Du, Hao and Yu, Hongyuan and Li, Qi and Liao, Jing and Fu, Jianlong},
journal={Advances in Neural Information Processing Systems},
volume={33},
year={2020}
}
```
## License
License under an MIT license.
|
Cream/README.md/0
|
{
"file_path": "Cream/README.md",
"repo_id": "Cream",
"token_count": 4155
}
| 299 |
export NNODES=1
export GPUS_PER_NODE=8
DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES"
torchrun $DISTRIBUTED_ARGS src/training/main.py \
--save-frequency 1 \
--report-to wandb \
--train-data synthetic \
--dataset-type synthetic \
--imagenet-val ./ImageNet \
--warmup 3000 \
--batch-size 1024 \
--epochs 6 \
--workers 8 \
--model ViT-B-32 \
--name exp_name \
--seed 0 \
--local-loss \
--grad-checkpointing \
--logs ./outputs/ViT-B-32 \
--lr 0.0001 \
--gather-with-grad \
--pretrained-image-file ViT-B-32@laion2b_e16 \
--pretrained-text-file ViT-B-32@laion2b_e16 \
--distillation-teacher ViT-B-32@laion2b_e16 \
--norm_gradient_clip 5 \
--train-num-samples 400000000 \
--prune-step 3000 \
--prune-image \
--prune-text \
--total-loss-flag \
--target-sparsity 0.25 \
--start-sparsity 0.0 \
--sparsity-warmup 1000 \
--logit-scale 50
|
Cream/TinyCLIP/script/auto_weight_inherit_100to75.sh/0
|
{
"file_path": "Cream/TinyCLIP/script/auto_weight_inherit_100to75.sh",
"repo_id": "Cream",
"token_count": 362
}
| 300 |
{
"embed_dim": 1024,
"vision_cfg": {
"image_size": 224,
"layers": [
3,
4,
6,
3
],
"width": 64,
"patch_size": null
},
"text_cfg": {
"context_length": 77,
"vocab_size": 49408,
"width": 512,
"heads": 8,
"layers": 12
}
}
|
Cream/TinyCLIP/src/open_clip/model_configs/RN50.json/0
|
{
"file_path": "Cream/TinyCLIP/src/open_clip/model_configs/RN50.json",
"repo_id": "Cream",
"token_count": 234
}
| 301 |
__version__ = '2.0.2'
|
Cream/TinyCLIP/src/open_clip/version.py/0
|
{
"file_path": "Cream/TinyCLIP/src/open_clip/version.py",
"repo_id": "Cream",
"token_count": 12
}
| 302 |
# --------------------------------------------------------
# reference: https://github.com/crj1998/pruning/tree/master
# --------------------------------------------------------
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.cm as cm
from matplotlib.colors import LinearSegmentedColormap
import numpy as np
color_list = ['pink', 'deepskyblue']
my_cmap = LinearSegmentedColormap.from_list('custom', color_list)
cm.register_cmap(cmap=my_cmap)
def plot(heads, intermediates, name):
fig, ax = plt.subplots(1, 2, facecolor='white', figsize=(
10, 4), dpi=120, gridspec_kw={'width_ratios': [1.15, 3]})
heads_num = heads.shape[1]
ax[0].matshow(heads, cmap="custom", vmin=0.0, vmax=1.0)
ax[0].set_xlabel("Heads")
ax[0].set_ylabel("Layer")
ax[0].set_xticks([i for i in range(heads_num)], [str(i + 1)
for i in range(heads_num)])
ax[0].set_yticks([i for i in range(12)], [str(i + 1) for i in range(12)])
# Minor ticks
ax[0].set_xticks([i - 0.5 for i in range(heads_num)], minor=True)
ax[0].set_yticks([i - 0.5 for i in range(12)], minor=True)
ax[0].xaxis.tick_bottom()
ax[0].tick_params('both', length=0, width=0, which='both')
# Gridlines based on minor ticks
ax[0].grid(which='minor', color='w', linestyle='-', linewidth=1)
ax[0].set_title('MHAs')
channel = intermediates.shape[1] / 4
intermediates = intermediates.repeat(100, axis=0)
ax[1].matshow(intermediates, cmap="custom", vmin=0.0, vmax=1.0)
ax[1].set_xlabel("FFNs channels")
ax[1].set_xticks([i * channel for i in range(1, 5)],
[f'{i}.0x' for i in range(1, 5)])
ax[1].set_yticks([i * 100 + 50 for i in range(12)],
[str(i + 1) for i in range(12)])
ax[1].set_yticks([i * 100 for i in range(12)], minor=True)
# Minor ticks
ax[1].xaxis.tick_bottom()
ax[1].yaxis.tick_right()
ax[1].tick_params('both', length=0, width=0, which='both')
# Gridlines based on minor ticks
ax[1].grid(which='minor', axis='y', color='w', linestyle='-', linewidth=1)
ax[1].set_title('FFNs')
fig.tight_layout()
fig.suptitle(name)
return fig
|
Cream/TinyCLIP/src/training/viz.py/0
|
{
"file_path": "Cream/TinyCLIP/src/training/viz.py",
"repo_id": "Cream",
"token_count": 948
}
| 303 |
import math
import torch
from torch.utils.data import Sampler
import torch.distributed as dist
class OrderedDistributedSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
"""
def __init__(self, dataset, num_replicas=None, rank=None):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
def __iter__(self):
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
class RepeatAugSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset for distributed,
with repeated augmentation.
It ensures that different each augmented version of a sample will be visible to a
different process (GPU). Heavily based on torch.utils.data.DistributedSampler
This sampler was taken from https://github.com/facebookresearch/deit/blob/0c4b8f60/samplers.py
Used in
Copyright (c) 2015-present, Facebook, Inc.
"""
def __init__(
self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
num_repeats=3,
selected_round=256,
selected_ratio=0,
):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.shuffle = shuffle
self.num_repeats = num_repeats
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * num_repeats / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
# Determine the number of samples to select per epoch for each rank.
# num_selected logic defaults to be the same as original RASampler impl, but this one can be tweaked
# via selected_ratio and selected_round args.
selected_ratio = selected_ratio or num_replicas # ratio to reduce selected samples by, num_replicas if 0
if selected_round:
self.num_selected_samples = int(math.floor(
len(self.dataset) // selected_round * selected_round / selected_ratio))
else:
self.num_selected_samples = int(math.ceil(len(self.dataset) / selected_ratio))
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
if self.shuffle:
indices = torch.randperm(len(self.dataset), generator=g)
else:
indices = torch.arange(start=0, end=len(self.dataset))
# produce repeats e.g. [0, 0, 0, 1, 1, 1, 2, 2, 2....]
indices = torch.repeat_interleave(indices, repeats=self.num_repeats, dim=0).tolist()
# add extra samples to make it evenly divisible
padding_size = self.total_size - len(indices)
if padding_size > 0:
indices += indices[:padding_size]
assert len(indices) == self.total_size
# subsample per rank
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
# return up to num selected samples
return iter(indices[:self.num_selected_samples])
def __len__(self):
return self.num_selected_samples
def set_epoch(self, epoch):
self.epoch = epoch
|
Cream/TinyViT/data/augmentation/distributed_sampler.py/0
|
{
"file_path": "Cream/TinyViT/data/augmentation/distributed_sampler.py",
"repo_id": "Cream",
"token_count": 2098
}
| 304 |
import torch
import torchvision.transforms.functional as F
try:
from torchvision.transforms.functional import InterpolationMode
has_interpolation_mode = True
except ImportError:
has_interpolation_mode = False
from PIL import Image
import warnings
import math
from .aug_random import random
import numpy as np
class ToNumpy:
def __call__(self, pil_img):
np_img = np.array(pil_img, dtype=np.uint8)
if np_img.ndim < 3:
np_img = np.expand_dims(np_img, axis=-1)
np_img = np.rollaxis(np_img, 2) # HWC to CHW
return np_img
class ToTensor:
def __init__(self, dtype=torch.float32):
self.dtype = dtype
def __call__(self, pil_img):
np_img = np.array(pil_img, dtype=np.uint8)
if np_img.ndim < 3:
np_img = np.expand_dims(np_img, axis=-1)
np_img = np.rollaxis(np_img, 2) # HWC to CHW
return torch.from_numpy(np_img).to(dtype=self.dtype)
_pil_interpolation_to_str = {
Image.NEAREST: 'nearest',
Image.BILINEAR: 'bilinear',
Image.BICUBIC: 'bicubic',
Image.BOX: 'box',
Image.HAMMING: 'hamming',
Image.LANCZOS: 'lanczos',
}
_str_to_pil_interpolation = {b: a for a, b in _pil_interpolation_to_str.items()}
if has_interpolation_mode:
_torch_interpolation_to_str = {
InterpolationMode.NEAREST: 'nearest',
InterpolationMode.BILINEAR: 'bilinear',
InterpolationMode.BICUBIC: 'bicubic',
InterpolationMode.BOX: 'box',
InterpolationMode.HAMMING: 'hamming',
InterpolationMode.LANCZOS: 'lanczos',
}
_str_to_torch_interpolation = {b: a for a, b in _torch_interpolation_to_str.items()}
else:
_pil_interpolation_to_torch = {}
_torch_interpolation_to_str = {}
def str_to_pil_interp(mode_str):
return _str_to_pil_interpolation[mode_str]
def str_to_interp_mode(mode_str):
if has_interpolation_mode:
return _str_to_torch_interpolation[mode_str]
else:
return _str_to_pil_interpolation[mode_str]
def interp_mode_to_str(mode):
if has_interpolation_mode:
return _torch_interpolation_to_str[mode]
else:
return _pil_interpolation_to_str[mode]
_RANDOM_INTERPOLATION = (str_to_interp_mode('bilinear'), str_to_interp_mode('bicubic'))
class RandomResizedCropAndInterpolation:
"""Crop the given PIL Image to random size and aspect ratio with random interpolation.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: expected output size of each edge
scale: range of size of the origin size cropped
ratio: range of aspect ratio of the origin aspect ratio cropped
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.),
interpolation='bilinear'):
if isinstance(size, (list, tuple)):
self.size = tuple(size)
else:
self.size = (size, size)
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
warnings.warn("range should be of kind (min, max)")
if interpolation == 'random':
self.interpolation = _RANDOM_INTERPOLATION
else:
self.interpolation = str_to_interp_mode(interpolation)
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
area = img.size[0] * img.size[1]
for attempt in range(10):
target_area = random.uniform(*scale) * area
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if w <= img.size[0] and h <= img.size[1]:
i = random.randint(0, img.size[1] - h)
j = random.randint(0, img.size[0] - w)
return i, j, h, w
# Fallback to central crop
in_ratio = img.size[0] / img.size[1]
if in_ratio < min(ratio):
w = img.size[0]
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = img.size[1]
w = int(round(h * max(ratio)))
else: # whole image
w = img.size[0]
h = img.size[1]
i = (img.size[1] - h) // 2
j = (img.size[0] - w) // 2
return i, j, h, w
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped and resized.
Returns:
PIL Image: Randomly cropped and resized image.
"""
i, j, h, w = self.get_params(img, self.scale, self.ratio)
if isinstance(self.interpolation, (tuple, list)):
interpolation = random.choice(self.interpolation)
else:
interpolation = self.interpolation
return F.resized_crop(img, i, j, h, w, self.size, interpolation)
def __repr__(self):
if isinstance(self.interpolation, (tuple, list)):
interpolate_str = ' '.join([interp_mode_to_str(x) for x in self.interpolation])
else:
interpolate_str = interp_mode_to_str(self.interpolation)
format_string = self.__class__.__name__ + '(size={0}'.format(self.size)
format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))
format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))
format_string += ', interpolation={0})'.format(interpolate_str)
return format_string
|
Cream/TinyViT/data/augmentation/transforms.py/0
|
{
"file_path": "Cream/TinyViT/data/augmentation/transforms.py",
"repo_id": "Cream",
"token_count": 2875
}
| 305 |
# --------------------------------------------------------
# TinyViT Model Builder
# Copyright (c) 2022 Microsoft
# --------------------------------------------------------
from .tiny_vit import TinyViT
def build_model(config):
model_type = config.MODEL.TYPE
if model_type == 'tiny_vit':
M = config.MODEL.TINY_VIT
model = TinyViT(img_size=config.DATA.IMG_SIZE,
in_chans=M.IN_CHANS,
num_classes=config.MODEL.NUM_CLASSES,
embed_dims=M.EMBED_DIMS,
depths=M.DEPTHS,
num_heads=M.NUM_HEADS,
window_sizes=M.WINDOW_SIZES,
mlp_ratio=M.MLP_RATIO,
drop_rate=config.MODEL.DROP_RATE,
drop_path_rate=config.MODEL.DROP_PATH_RATE,
use_checkpoint=config.TRAIN.USE_CHECKPOINT,
mbconv_expand_ratio=M.MBCONV_EXPAND_RATIO,
local_conv_size=M.LOCAL_CONV_SIZE,
layer_lr_decay=config.TRAIN.LAYER_LR_DECAY,
)
elif model_type == 'clip_vit_large14_224':
from .clip import CLIP
kwargs = {
'embed_dim': 768, 'image_resolution': 224,
'vision_layers': 24, 'vision_width': 1024, 'vision_patch_size': 14,
"num_classes": config.MODEL.NUM_CLASSES,
}
model = CLIP(**kwargs)
else:
raise NotImplementedError(f"Unkown model: {model_type}")
return model
|
Cream/TinyViT/models/build.py/0
|
{
"file_path": "Cream/TinyViT/models/build.py",
"repo_id": "Cream",
"token_count": 863
}
| 306 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
COCO evaluator that works in distributed mode.
Mostly copy-paste from https://github.com/pytorch/vision/blob/edfd5a7/references/detection/coco_eval.py
The difference is that there is less copy-pasting from pycocotools
in the end of the file, as python3 can suppress prints with contextlib
"""
import os
import contextlib
import copy
import numpy as np
import torch
from pycocotools.cocoeval import COCOeval
from pycocotools.coco import COCO
import pycocotools.mask as mask_util
from util.misc import all_gather
class CocoEvaluator(object):
def __init__(self, coco_gt, iou_types):
assert isinstance(iou_types, (list, tuple))
coco_gt = copy.deepcopy(coco_gt)
self.coco_gt = coco_gt
self.iou_types = iou_types
self.coco_eval = {}
for iou_type in iou_types:
self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type)
self.img_ids = []
self.eval_imgs = {k: [] for k in iou_types}
def update(self, predictions):
img_ids = list(np.unique(list(predictions.keys())))
self.img_ids.extend(img_ids)
for iou_type in self.iou_types:
results = self.prepare(predictions, iou_type)
# suppress pycocotools prints
with open(os.devnull, 'w') as devnull:
with contextlib.redirect_stdout(devnull):
coco_dt = COCO.loadRes(self.coco_gt, results) if results else COCO()
coco_eval = self.coco_eval[iou_type]
coco_eval.cocoDt = coco_dt
coco_eval.params.imgIds = list(img_ids)
img_ids, eval_imgs = evaluate(coco_eval)
self.eval_imgs[iou_type].append(eval_imgs)
def synchronize_between_processes(self):
for iou_type in self.iou_types:
self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2)
create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type])
def accumulate(self):
for coco_eval in self.coco_eval.values():
coco_eval.accumulate()
def summarize(self):
for iou_type, coco_eval in self.coco_eval.items():
print("IoU metric: {}".format(iou_type))
coco_eval.summarize()
def prepare(self, predictions, iou_type):
if iou_type == "bbox":
return self.prepare_for_coco_detection(predictions)
elif iou_type == "segm":
return self.prepare_for_coco_segmentation(predictions)
elif iou_type == "keypoints":
return self.prepare_for_coco_keypoint(predictions)
else:
raise ValueError("Unknown iou type {}".format(iou_type))
def prepare_for_coco_detection(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"bbox": box,
"score": scores[k],
}
for k, box in enumerate(boxes)
]
)
return coco_results
def prepare_for_coco_segmentation(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
scores = prediction["scores"]
labels = prediction["labels"]
masks = prediction["masks"]
masks = masks > 0.5
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
rles = [
mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0]
for mask in masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"segmentation": rle,
"score": scores[k],
}
for k, rle in enumerate(rles)
]
)
return coco_results
def prepare_for_coco_keypoint(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
keypoints = prediction["keypoints"]
keypoints = keypoints.flatten(start_dim=1).tolist()
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
'keypoints': keypoint,
"score": scores[k],
}
for k, keypoint in enumerate(keypoints)
]
)
return coco_results
def convert_to_xywh(boxes):
xmin, ymin, xmax, ymax = boxes.unbind(1)
return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)
def merge(img_ids, eval_imgs):
all_img_ids = all_gather(img_ids)
all_eval_imgs = all_gather(eval_imgs)
merged_img_ids = []
for p in all_img_ids:
merged_img_ids.extend(p)
merged_eval_imgs = []
for p in all_eval_imgs:
merged_eval_imgs.append(p)
merged_img_ids = np.array(merged_img_ids)
merged_eval_imgs = np.concatenate(merged_eval_imgs, 2)
# keep only unique (and in sorted order) images
merged_img_ids, idx = np.unique(merged_img_ids, return_index=True)
merged_eval_imgs = merged_eval_imgs[..., idx]
return merged_img_ids, merged_eval_imgs
def create_common_coco_eval(coco_eval, img_ids, eval_imgs):
img_ids, eval_imgs = merge(img_ids, eval_imgs)
img_ids = list(img_ids)
eval_imgs = list(eval_imgs.flatten())
coco_eval.evalImgs = eval_imgs
coco_eval.params.imgIds = img_ids
coco_eval._paramsEval = copy.deepcopy(coco_eval.params)
#################################################################
# From pycocotools, just removed the prints and fixed
# a Python3 bug about unicode not defined
#################################################################
def evaluate(self):
'''
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
:return: None
'''
# tic = time.time()
# print('Running per image evaluation...')
p = self.params
# add backward compatibility if useSegm is specified in params
if p.useSegm is not None:
p.iouType = 'segm' if p.useSegm == 1 else 'bbox'
print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))
# print('Evaluate annotation type *{}*'.format(p.iouType))
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params = p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType == 'segm' or p.iouType == 'bbox':
computeIoU = self.computeIoU
elif p.iouType == 'keypoints':
computeIoU = self.computeOks
self.ious = {
(imgId, catId): computeIoU(imgId, catId)
for imgId in p.imgIds
for catId in catIds}
evaluateImg = self.evaluateImg
maxDet = p.maxDets[-1]
evalImgs = [
evaluateImg(imgId, catId, areaRng, maxDet)
for catId in catIds
for areaRng in p.areaRng
for imgId in p.imgIds
]
# this is NOT in the pycocotools code, but could be done outside
evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds))
self._paramsEval = copy.deepcopy(self.params)
# toc = time.time()
# print('DONE (t={:0.2f}s).'.format(toc-tic))
return p.imgIds, evalImgs
#################################################################
# end of straight copy from pycocotools, just removing the prints
#################################################################
|
Cream/iRPE/DETR-with-iRPE/datasets/coco_eval.py/0
|
{
"file_path": "Cream/iRPE/DETR-with-iRPE/datasets/coco_eval.py",
"repo_id": "Cream",
"token_count": 4188
}
| 307 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
DETR Transformer class.
Copy-paste from torch.nn.Transformer with modifications:
* positional encodings are passed in MHattention
* extra LN at the end of encoder is removed
* decoder returns a stack of activations from all decoding layers
"""
import copy
from typing import Optional, List
import torch
import torch.nn.functional as F
from torch import nn, Tensor
# Self-attention with 2D relative position encoding
from .rpe_attention import RPEMultiheadAttention, irpe
RPE_HELP = '''
we can use a string to represent a kind of 2D-RPE
Format:
rpe-{ratio}-{method}-{mode}-{shared_head}-{rpe_on}
e.g. rpe-2.0-product-ctx-1-k
it represents
ratio=2.0,
method='product',
mode='ctx',
shared_head=True,
rpe_on='k',
ratio | num_buckets
------|------------
1.9 | 49
2.0 | 81
2.5 | 121
3.0 | 169
'''
class Transformer(nn.Module):
def __init__(self, d_model=512, nhead=8, num_encoder_layers=6,
num_decoder_layers=6, dim_feedforward=2048, dropout=0.1,
activation="relu", normalize_before=False,
return_intermediate_dec=False,
enc_rpe2d=''):
super().__init__()
if enc_rpe2d is None or len(enc_rpe2d) == 0:
rpe_config = None
else:
try:
# rpe-{ratio}-{method}-{mode}-{shared_head}-{rpe_on}
sp = enc_rpe2d.split('-')
assert len(sp) == 6, len(sp)
assert sp[0] == 'rpe'
ratio = float(sp[1])
method = sp[2]
mode = sp[3]
shared_head = bool(int(sp[4]))
rpe_on = sp[5]
rpe_config = irpe.get_rpe_config(
ratio=ratio,
method=method,
mode=mode,
shared_head=shared_head,
skip=0,
rpe_on=rpe_on,
)
except:
print("Wrong Format:" + RPE_HELP)
raise
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward,
dropout, activation, normalize_before,
rpe_config=rpe_config)
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
self.encoder = TransformerEncoder(
encoder_layer, num_encoder_layers, encoder_norm)
decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward,
dropout, activation, normalize_before)
decoder_norm = nn.LayerNorm(d_model)
self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm,
return_intermediate=return_intermediate_dec)
self._reset_parameters()
self.d_model = d_model
self.nhead = nhead
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, src, mask, query_embed, pos_embed):
# flatten NxCxHxW to HWxNxC
bs, c, h, w = src.shape
src = src.flatten(2).permute(2, 0, 1)
pos_embed = pos_embed.flatten(2).permute(2, 0, 1)
query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)
mask = mask.flatten(1)
tgt = torch.zeros_like(query_embed)
memory = self.encoder(
src, src_key_padding_mask=mask, pos=pos_embed, hw=(h, w))
hs = self.decoder(tgt, memory, memory_key_padding_mask=mask,
pos=pos_embed, query_pos=query_embed)
return hs.transpose(1, 2), memory.permute(1, 2, 0).view(bs, c, h, w)
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers, norm=None):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, src,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
hw=None):
output = src
for layer in self.layers:
output = layer(output, src_mask=mask,
src_key_padding_mask=src_key_padding_mask,
pos=pos,
hw=hw)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
output = tgt
intermediate = []
for layer in self.layers:
output = layer(output, memory, tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos=pos, query_pos=query_pos)
if self.return_intermediate:
intermediate.append(self.norm(output))
if self.norm is not None:
output = self.norm(output)
if self.return_intermediate:
intermediate.pop()
intermediate.append(output)
if self.return_intermediate:
return torch.stack(intermediate)
return output.unsqueeze(0)
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="relu", normalize_before=False,
rpe_config=None):
super().__init__()
self.self_attn = RPEMultiheadAttention(
d_model, nhead, dropout=dropout, rpe_config=rpe_config)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
hw=None):
q = k = self.with_pos_embed(src, pos)
src2 = self.self_attn(q, k, value=src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask,
hw=hw)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def forward_pre(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
hw=None):
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask,
hw=hw)[0]
src = src + self.dropout1(src2)
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = src + self.dropout2(src2)
return src
def forward(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
hw=None):
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos, hw=hw)
return self.forward_post(src, src_mask, src_key_padding_mask, pos, hw=hw)
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(
d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward_pre(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt2 = self.norm2(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
return tgt
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
return self.forward_post(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def build_transformer(args):
return Transformer(
d_model=args.hidden_dim,
dropout=args.dropout,
nhead=args.nheads,
dim_feedforward=args.dim_feedforward,
num_encoder_layers=args.enc_layers,
num_decoder_layers=args.dec_layers,
normalize_before=args.pre_norm,
return_intermediate_dec=True,
# 2D relative position encoding
enc_rpe2d=args.enc_rpe2d,
)
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
|
Cream/iRPE/DETR-with-iRPE/models/transformer.py/0
|
{
"file_path": "Cream/iRPE/DETR-with-iRPE/models/transformer.py",
"repo_id": "Cream",
"token_count": 7291
}
| 308 |
[flake8]
max-line-length = 120
ignore = F401,E402,F403,W503,W504
|
Cream/iRPE/DeiT-with-iRPE/tox.ini/0
|
{
"file_path": "Cream/iRPE/DeiT-with-iRPE/tox.ini",
"repo_id": "Cream",
"token_count": 30
}
| 309 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
from timm.data import create_loader
import torch
import torch.utils.data
import torchvision.datasets as datasets
from .transformas import build_transforms
from .samplers import RASampler
def build_dataset(cfg, is_train):
dataset = None
if 'imagenet' in cfg.DATASET.DATASET:
dataset = _build_imagenet_dataset(cfg, is_train)
else:
raise ValueError('Unkown dataset: {}'.format(cfg.DATASET.DATASET))
return dataset
def _build_image_folder_dataset(cfg, is_train):
transforms = build_transforms(cfg, is_train)
dataset_name = cfg.DATASET.TRAIN_SET if is_train else cfg.DATASET.TEST_SET
dataset = datasets.ImageFolder(
os.path.join(cfg.DATASET.ROOT, dataset_name), transforms
)
logging.info(
'=> load samples: {}, is_train: {}'
.format(len(dataset), is_train)
)
return dataset
def _build_imagenet_dataset(cfg, is_train):
transforms = build_transforms(cfg, is_train)
dataset_name = cfg.DATASET.TRAIN_SET if is_train else cfg.DATASET.TEST_SET
dataset = datasets.ImageFolder(
os.path.join(cfg.DATASET.ROOT, dataset_name), transforms
)
return dataset
def build_dataloader(cfg, is_train=True, distributed=False):
if is_train:
batch_size_per_gpu = cfg.TRAIN.BATCH_SIZE_PER_GPU
shuffle = True
else:
batch_size_per_gpu = cfg.TEST.BATCH_SIZE_PER_GPU
shuffle = False
dataset = build_dataset(cfg, is_train)
if distributed:
if is_train and cfg.DATASET.SAMPLER == 'repeated_aug':
logging.info('=> use repeated aug sampler')
sampler = RASampler(dataset, shuffle=shuffle)
else:
sampler = torch.utils.data.distributed.DistributedSampler(
dataset, shuffle=shuffle
)
shuffle = False
else:
sampler = None
if cfg.AUG.TIMM_AUG.USE_LOADER and is_train:
logging.info('=> use timm loader for training')
timm_cfg = cfg.AUG.TIMM_AUG
data_loader = create_loader(
dataset,
input_size=cfg.TRAIN.IMAGE_SIZE[0],
batch_size=cfg.TRAIN.BATCH_SIZE_PER_GPU,
is_training=True,
use_prefetcher=True,
no_aug=False,
re_prob=timm_cfg.RE_PROB,
re_mode=timm_cfg.RE_MODE,
re_count=timm_cfg.RE_COUNT,
re_split=timm_cfg.RE_SPLIT,
scale=cfg.AUG.SCALE,
ratio=cfg.AUG.RATIO,
hflip=timm_cfg.HFLIP,
vflip=timm_cfg.VFLIP,
color_jitter=timm_cfg.COLOR_JITTER,
auto_augment=timm_cfg.AUTO_AUGMENT,
num_aug_splits=0,
interpolation=timm_cfg.INTERPOLATION,
mean=cfg.INPUT.MEAN,
std=cfg.INPUT.STD,
num_workers=cfg.WORKERS,
distributed=distributed,
collate_fn=None,
pin_memory=cfg.PIN_MEMORY,
use_multi_epochs_loader=True
)
else:
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size_per_gpu,
shuffle=shuffle,
num_workers=cfg.WORKERS,
pin_memory=cfg.PIN_MEMORY,
sampler=sampler,
drop_last=True if is_train else False,
)
return data_loader
|
CvT/lib/dataset/build.py/0
|
{
"file_path": "CvT/lib/dataset/build.py",
"repo_id": "CvT",
"token_count": 1703
}
| 310 |
"""
Copyright (C) Microsoft Corporation. All rights reserved.
Microsoft Corporation ("Microsoft") grants you a nonexclusive, perpetual,
royalty-free right to use, copy, and modify the software code provided by us
("Software Code"). You may not sublicense the Software Code or any use of it
(except to your affiliates and to vendors to perform work on your behalf)
through distribution, network access, service agreement, lease, rental, or
otherwise. This license does not purport to express any claim of ownership over
data you may have shared with Microsoft in the creation of the Software Code.
Unless applicable law gives you more rights, Microsoft reserves all other
rights not expressly granted herein, whether by implication, estoppel or
otherwise.
THE SOFTWARE CODE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
MICROSOFT OR ITS LICENSORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THE SOFTWARE CODE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import pickle
import csv
import numpy as np
import torch.nn as nn
import torch.utils.data as data
from torch.autograd import Variable
from tqdm import tqdm
from torch.utils.data import Dataset
from srcnn.net import *
import json
from msanomalydetector.util import average_filter
from msanomalydetector.spectral_residual import SpectralResidual
def read_pkl(path):
with open(path, 'rb') as f:
return pickle.load(f)
def read_csv_kpi(path):
tm = []
vl = []
lb = []
with open(path) as f:
input = csv.reader(f, delimiter=',')
cnt = 0
for row in input:
if cnt == 0:
cnt += 1
continue
tm.append(int(row[0]))
vl.append(float(row[1]))
lb.append(int(row[2]))
cnt += 1
f.close()
return tm, vl, lb
def read_csv(path):
tm = []
vl = []
with open(path, 'r+') as f:
input = csv.reader(f, delimiter=',')
cnt = 0
for row in input:
if cnt == 0:
cnt += 1
continue
tm.append(cnt)
vl.append(float(row[1]))
f.close()
return tm, vl
def sr_cnn(data_path, model_path, win_size, lr, epochs, batch, num_worker, load_path=None):
def adjust_lr(optimizer, epoch):
base_lr = lr
cur_lr = base_lr * (0.5 ** ((epoch + 10) // 10))
for param in optimizer.param_groups:
param['lr'] = cur_lr
def Var(x):
return Variable(x.cuda())
def loss_function(x, lb):
l2_reg = 0.
l2_weight = 0.
for W in net.parameters():
l2_reg = l2_reg + W.norm(2)
kpiweight = torch.ones(lb.shape)
kpiweight[lb == 1] = win_size // 100
kpiweight = kpiweight.cuda()
BCE = F.binary_cross_entropy(x, lb, weight=kpiweight, reduction='sum')
return l2_reg * l2_weight + BCE
def calc(pred, true):
TP = 0
FP = 0
TN = 0
FN = 0
for pre, gt in zip(pred, true):
if gt == 1:
if pre == 1:
TP += 1
else:
FN += 1
if gt == 0:
if pre == 1:
FP += 1
else:
TN += 1
print('TP=%d FP=%d TN=%d FN=%d' % (TP, FP, TN, FN))
return TP, FP, TN, FN
def train(epoch, net, gen_set):
train_loader = data.DataLoader(dataset=gen_set, shuffle=True, num_workers=num_worker, batch_size=batch,
pin_memory=True)
net.train()
train_loss = 0
totTP, totFP, totTN, totFN = 0, 0, 0, 0
threshold = 0.5
for batch_idx, (inputs, lb) in enumerate(tqdm(train_loader, desc="Iteration")):
optimizer.zero_grad()
inputs = inputs.float()
lb = lb.float()
valueseq = Var(inputs)
lb = Var(lb)
output = net(valueseq)
if epoch > 110:
aa = output.detach().cpu().numpy().reshape(-1)
res = np.zeros(aa.shape, np.int64)
res[aa > threshold] = 1
bb = lb.detach().cpu().numpy().reshape(-1)
TP, FP, TN, FN = calc(res, bb)
totTP += TP
totFP += FP
totTN += TN
totFN += FN
if batch_idx % 100 == 0:
print('TP=%d FP=%d TN=%d FN=%d' % (TP, FP, TN, FN))
loss1 = loss_function(output, lb)
loss1.backward()
train_loss += loss1.item()
optimizer.step()
torch.nn.utils.clip_grad_norm(net.parameters(), 5.0)
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(inputs), len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss1.item() / len(inputs)))
model = Anomaly(win_size)
net = model.cuda()
gpu_num = torch.cuda.device_count()
net = torch.nn.DataParallel(net, list(range(gpu_num)))
print(net)
base_lr = lr
bp_parameters = filter(lambda p: p.requires_grad, net.parameters())
optimizer = optim.SGD(bp_parameters, lr=base_lr, momentum=0.9, weight_decay=0.0)
if load_path != None:
net = load_model(model, load_path)
print("model loaded")
gen_data = gen_set(win_size, data_path)
for epoch in range(1, epochs + 1):
print('epoch :', epoch)
train(epoch, net, gen_data)
adjust_lr(optimizer, epoch)
if epoch % 5 == 0:
save_model(model, model_path + 'srcnn_retry' + str(epoch) + '_' + str(win_size) + '.bin')
return
def fft(values):
wave = np.array(values)
trans = np.fft.fft(wave)
realnum = np.real(trans)
comnum = np.imag(trans)
mag = np.sqrt(realnum ** 2 + comnum ** 2)
mag += 1e-5
spectral = np.exp(np.log(mag) - average_filter(np.log(mag)))
trans.real = trans.real * spectral / mag
trans.imag = trans.imag * spectral / mag
wave = np.fft.ifft(trans)
mag = np.sqrt(wave.real ** 2 + wave.imag ** 2)
return mag
def spectral_residual(values):
"""
This method transform a time series into spectral residual series
:param values: list.
a list of float values.
:return: mag: list.
a list of float values as the spectral residual values
"""
EPS = 1e-8
trans = np.fft.fft(values)
mag = np.sqrt(trans.real ** 2 + trans.imag ** 2)
maglog = [np.log(item) if abs(item) > EPS else 0 for item in mag]
spectral = np.exp(maglog - average_filter(maglog, n=3))
trans.real = [ireal * ispectral / imag if abs(imag) > EPS else 0
for ireal, ispectral, imag in zip(trans.real, spectral, mag)]
trans.imag = [iimag * ispectral / imag if abs(imag) > EPS else 0
for iimag, ispectral, imag in zip(trans.imag, spectral, mag)]
wave_r = np.fft.ifft(trans)
mag = np.sqrt(wave_r.real ** 2 + wave_r.imag ** 2)
return mag
class gen_set(Dataset):
def __init__(self, width, data_path):
self.genlen = 0
self.len = self.genlen
self.width = width
with open(data_path, 'r+') as fin:
self.kpinegraw = json.load(fin)
self.negrawlen = len(self.kpinegraw)
print('length :', len(self.kpinegraw))
self.len += self.negrawlen
self.kpineglen = 0
self.control = 0.
def __len__(self):
return self.len
def __getitem__(self, index):
idx = index % self.negrawlen
datas = self.kpinegraw[idx]
datas = np.array(datas)
data = datas[0, :].astype(np.float64)
lbs = datas[1, :].astype(np.float64)
wave = spectral_residual(data)
waveavg = average_filter(wave)
for i in range(self.width):
if wave[i] < 0.001 and waveavg[i] < 0.001:
lbs[i] = 0
continue
ratio = wave[i] / waveavg[i]
if ratio < 1.0 and lbs[i] == 1:
lbs[i] = 0
if ratio > 5.0:
lbs[i] = 1
srscore = abs(wave - waveavg) / (waveavg + 0.01)
sortid = np.argsort(srscore)
for idx in sortid[-2:]:
if srscore[idx] > 5:
lbs[idx] = 1
resdata = torch.from_numpy(100 * wave)
reslb = torch.from_numpy(lbs)
return resdata, reslb
def sr_cnn_eval(timestamp, value, label, window, net, ms_optioin, threshold=0.95, back_k=0, backaddnum=5, step=1):
def Var(x):
return Variable(x.cuda())
def modelwork(x, net):
with torch.no_grad():
x = torch.from_numpy(100 * x).float()
x = torch.unsqueeze(x, 0)
x = Var(x)
output = net(x)
aa = output.detach().cpu().numpy().reshape(-1)
res = np.zeros(aa.shape, np.int64)
res[aa > threshold] = 1
return res, aa
win_size = window
length = len(timestamp)
if back_k <= 5:
back = back_k
else:
back = 5
detres = [0] * (win_size - backaddnum)
scores = [0] * (win_size - backaddnum)
for pt in range(win_size - backaddnum + back + step, length - back, step):
head = max(0, pt - (win_size - backaddnum))
tail = min(length, pt)
wave = np.array(SpectralResidual.extend_series(value[head:tail + back]))
mag = spectral_residual(wave)
modeloutput, rawout = modelwork(mag, net)
for ipt in range(pt - step - back, pt - back):
detres.append(modeloutput[ipt - head])
scores.append(rawout[ipt - head].item())
detres += [0] * (length - len(detres))
scores += [0] * (length - len(scores))
if ms_optioin == 'anomaly':
last = -1
interval = min([timestamp[i] - timestamp[i - 1] for i in range(1, len(timestamp))])
for i in range(1, len(timestamp)):
if timestamp[i] - timestamp[i - 1] > interval:
if last >= 0 and i - last < 1000:
detres[i] = 1
scores[i] = 1
if detres[i] == 1:
last = i
return timestamp[:].tolist(), label[:], detres[:], scores[:]
|
anomalydetector/srcnn/utils.py/0
|
{
"file_path": "anomalydetector/srcnn/utils.py",
"repo_id": "anomalydetector",
"token_count": 5208
}
| 311 |
This is the list of Archai authors for copyright purposes.
This does not necessarily list everyone who has contributed code, since in some cases, their employer may be the copyright holder. To see the full list of contributors, see the revision history in source control.
- [Shital Shah](http://www.shitalshah.com)
- [Debadeepta Dey](https://www.debadeepta.com)
- [Gustavo de Rosa](https://www.microsoft.com/en-us/research/people/gderosa)
- Caio Mendes
- [Piero Kauffmann](https://www.microsoft.com/en-us/research/people/pkauffmann)
- [Chris Lovett](https://lovettsoftware.com)
- Allie Del Giorno
- Mojan Javaheripi
- [Ofer Dekel](https://www.microsoft.com/en-us/research/people/oferd)
|
archai/AUTHORS.md/0
|
{
"file_path": "archai/AUTHORS.md",
"repo_id": "archai",
"token_count": 220
}
| 312 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import atexit
import os
import subprocess
from typing import Optional, Tuple, Union
import yaml
from send2trash import send2trash
from torch.utils.tensorboard.writer import SummaryWriter
from archai.common import utils
from archai.common.apex_utils import ApexUtils
from archai.common.config import Config
from archai.common.ordered_dict_logger import get_global_logger
logger = get_global_logger()
class SummaryWriterDummy:
def __init__(self, log_dir):
pass
def add_scalar(self, *args, **kwargs):
pass
def flush(self):
pass
SummaryWriterAny = Union[SummaryWriterDummy, SummaryWriter]
_tb_writer: Optional[SummaryWriterAny] = None
_atexit_reg = False # is hook for atexit registered?
def get_conf(conf:Optional[Config]=None)->Config:
if conf is not None:
return conf
return Config.get_inst()
def get_conf_common(conf:Optional[Config]=None)->Config:
return get_conf(conf)['common']
def get_conf_dataset(conf:Optional[Config]=None)->Config:
return get_conf(conf)['dataset']
def get_experiment_name(conf:Optional[Config]=None)->str:
return get_conf_common(conf)['experiment_name']
def get_expdir(conf:Optional[Config]=None)->Optional[str]:
return get_conf_common(conf)['expdir']
def get_datadir(conf:Optional[Config]=None)->Optional[str]:
return get_conf(conf)['dataset']['dataroot']
def get_tb_writer() -> SummaryWriterAny:
global _tb_writer
assert _tb_writer
return _tb_writer
class CommonState:
def __init__(self) -> None:
global _conf, _tb_writer
self.conf = get_conf()
self.tb_writer = _tb_writer
def on_app_exit():
print('Process exit:', os.getpid(), flush=True)
writer = get_tb_writer()
writer.flush()
def pt_dirs()->Tuple[str, str]:
# dirs for pt infrastructure are supplied in env vars
pt_data_dir = os.environ.get('PT_DATA_DIR', '')
# currently yaml should be copying dataset folder to local dir
# so below is not needed. The hope is that less reads from cloud
# storage will reduce overall latency.
# if pt_data_dir:
# param_args = ['--nas.eval.loader.dataset.dataroot', pt_data_dir,
# '--nas.search.loader.dataset.dataroot', pt_data_dir,
# '--nas.search.seed_train.loader.dataset.dataroot', pt_data_dir,
# '--nas.search.post_train.loader.dataset.dataroot', pt_data_dir,
# '--autoaug.loader.dataset.dataroot', pt_data_dir] + param_args
pt_output_dir = os.environ.get('PT_OUTPUT_DIR', '')
return pt_data_dir, pt_output_dir
def _pt_params(param_args: list)->list:
pt_data_dir, pt_output_dir = pt_dirs()
if pt_output_dir:
# prepend so if supplied from outside it takes back seat
param_args = ['--common.logdir', pt_output_dir] + param_args
return param_args
def get_state()->CommonState:
return CommonState()
def init_from(state:CommonState)->None:
global _tb_writer
Config.set_inst(state.conf)
_tb_writer = state.tb_writer
def create_conf(config_filepath: Optional[str]=None,
param_args: list = [], use_args=True)->Config:
# modify passed args for pt infrastructure
# if pt infrastructure doesn't exit then param_overrides == param_args
param_overrides = _pt_params(param_args)
# create env vars that might be used in paths in config
if 'default_dataroot' not in os.environ:
os.environ['default_dataroot'] = default_dataroot()
conf = Config(config_filepath=config_filepath,
param_args=param_overrides,
use_args=use_args)
_update_conf(conf)
return conf
# TODO: rename this simply as init
# initializes random number gen, debugging etc
def common_init(config_filepath: Optional[str]=None,
param_args: list = [], use_args=True,
clean_expdir=False)->Config:
# TODO: multiple child processes will create issues with shared state so we need to
# detect multiple child processes but allow if there is only one child process.
# if not utils.is_main_process():
# raise RuntimeError('common_init should not be called from child process. Please use Common.init_from()')
# setup global instance
conf = create_conf(config_filepath, param_args, use_args)
Config.set_inst(conf)
# setup env vars which might be used in paths
update_envvars(conf)
# create experiment dir
create_dirs(conf, clean_expdir)
_create_sysinfo(conf)
# create apex to know distributed processing paramters
conf_apex = get_conf_common(conf)['apex']
apex = ApexUtils(conf_apex)
# setup tensorboard
global _tb_writer
_tb_writer = create_tb_writer(conf, apex.is_master())
# create hooks to execute code when script exits
global _atexit_reg
if not _atexit_reg:
atexit.register(on_app_exit)
_atexit_reg = True
return conf
def _create_sysinfo(conf:Config)->None:
expdir = get_expdir(conf)
if expdir and not utils.is_debugging():
# copy net config to experiment folder for reference
with open(expdir_abspath('config_used.yaml'), 'w') as f:
yaml.dump(conf.to_dict(), f)
if not utils.is_debugging():
sysinfo_filepath = expdir_abspath('sysinfo.txt')
subprocess.Popen([f'./scripts/sysinfo.sh "{expdir}" > "{sysinfo_filepath}"'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True)
def expdir_abspath(path:str, create=False)->str:
"""Returns full path for given relative path within experiment directory."""
return utils.full_path(os.path.join('$expdir',path), create=create)
def create_tb_writer(conf:Config, is_master=True)-> SummaryWriterAny:
conf_common = get_conf_common(conf)
tb_dir, conf_enable_tb = utils.full_path(conf_common['tb_dir']), conf_common['tb_enable']
tb_enable = conf_enable_tb and is_master and tb_dir is not None and len(tb_dir) > 0
logger.info({'conf_enable_tb': conf_enable_tb,
'tb_enable': tb_enable,
'tb_dir': tb_dir})
WriterClass = SummaryWriter if tb_enable else SummaryWriterDummy
return WriterClass(log_dir=tb_dir)
def is_pt()->bool:
"""Is this code running in pt infrastrucuture"""
return os.environ.get('PT_OUTPUT_DIR', '') != ''
def default_dataroot()->str:
# the home folder on ITP VMs is super slow so use local temp directory instead
return '/var/tmp/dataroot' if is_pt() else '~/dataroot'
def _update_conf(conf:Config)->None:
"""Updates conf with full paths resolving enviromental vars"""
conf_common = get_conf_common(conf)
conf_dataset = get_conf_dataset(conf)
experiment_name = conf_common['experiment_name']
# make sure dataroot exists
dataroot = conf_dataset['dataroot']
dataroot = utils.full_path(dataroot)
# make sure logdir and expdir exists
logdir = conf_common['logdir']
if logdir:
logdir = utils.full_path(logdir)
expdir = os.path.join(logdir, experiment_name)
# directory for non-master replica logs
distdir = os.path.join(expdir, 'dist')
else:
expdir = distdir = logdir
# update conf so everyone gets expanded full paths from here on
# set environment variable so it can be referenced in paths used in config
conf_common['logdir'] = logdir
conf_dataset['dataroot'] = dataroot
conf_common['expdir'] = expdir
conf_common['distdir'] = distdir
def update_envvars(conf)->None:
"""Get values from config and put it into env vars"""
conf_common = get_conf_common(conf)
logdir = conf_common['logdir']
expdir = conf_common['expdir']
distdir = conf_common['distdir']
conf_dataset = get_conf_dataset(conf)
dataroot = conf_dataset['dataroot']
# update conf so everyone gets expanded full paths from here on
# set environment variable so it can be referenced in paths used in config
os.environ['logdir'] = logdir
os.environ['dataroot'] = dataroot
os.environ['expdir'] = expdir
os.environ['distdir'] = distdir
def clean_ensure_expdir(conf:Optional[Config], clean_dir:bool, ensure_dir:bool)->None:
expdir = get_expdir(conf)
assert expdir
if clean_dir and os.path.exists(expdir):
send2trash(expdir)
if ensure_dir:
os.makedirs(expdir, exist_ok=True)
def create_dirs(conf:Config, clean_expdir:bool)->Optional[str]:
conf_common = get_conf_common(conf)
logdir = conf_common['logdir']
expdir = conf_common['expdir']
distdir = conf_common['distdir']
conf_dataset = get_conf_dataset(conf)
dataroot = utils.full_path(conf_dataset['dataroot'])
# make sure dataroot exists
os.makedirs(dataroot, exist_ok=True)
# make sure logdir and expdir exists
if logdir:
clean_ensure_expdir(conf, clean_dir=clean_expdir, ensure_dir=True)
os.makedirs(distdir, exist_ok=True)
else:
raise RuntimeError('The logdir setting must be specified for the output directory in yaml')
# get cloud dirs if any
pt_data_dir, pt_output_dir = pt_dirs()
# validate dirs
assert not pt_output_dir or not expdir.startswith(utils.full_path('~/logdir'))
logger.info({'expdir': expdir,
# create info file for current system
'PT_DATA_DIR': pt_data_dir, 'PT_OUTPUT_DIR': pt_output_dir})
|
archai/archai/common/common.py/0
|
{
"file_path": "archai/archai/common/common.py",
"repo_id": "archai",
"token_count": 3788
}
| 313 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import csv
import logging
import multiprocessing
import os
import pathlib
import platform
import random
import shutil
import subprocess
import sys
from collections import OrderedDict
from datetime import datetime
from itertools import zip_longest
from typing import (
Any,
Dict,
Iterable,
List,
Mapping,
MutableMapping,
Optional,
Sized,
Tuple,
Type,
Union,
)
from urllib.parse import unquote, urlparse
from urllib.request import url2pathname
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import yaml
from torchvision.datasets import utils as tvutils
class AverageMeter:
def __init__(self):
self.reset()
def reset(self):
self.avg = 0.
self.sum = 0.
self.cnt = 0
self.last = 0.
def update(self, val, n=1):
self.last = val
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def first_or_default(it: Iterable, default=None):
for i in it:
return i
return default
def deep_update(d: MutableMapping, u: Mapping, map_type: Type[MutableMapping] = dict) -> MutableMapping:
for k, v in u.items():
if isinstance(v, Mapping):
d[k] = deep_update(d.get(k, map_type()), v, map_type)
else:
d[k] = v
return d
def state_dict(val) -> Mapping:
assert hasattr(val, '__dict__'), 'val must be object with __dict__ otherwise it cannot be loaded back in load_state_dict'
# Can't do below because val has state_dict() which calls utils.state_dict
# if has_method(val, 'state_dict'):
# d = val.state_dict()
# assert isinstance(d, Mapping)
# return d
return {'yaml': yaml.dump(val)}
def load_state_dict(val: Any, state_dict: Mapping) -> None:
assert hasattr(val, '__dict__'), 'val must be object with __dict__'
# Can't do below because val has state_dict() which calls utils.state_dict
# if has_method(val, 'load_state_dict'):
# return val.load_state_dict(state_dict)
s = state_dict.get('yaml', None)
assert s is not None, 'state_dict must contain yaml key'
obj = yaml.load(s, Loader=yaml.Loader)
for k, v in obj.__dict__.items():
setattr(val, k, v)
def deep_comp(o1: Any, o2: Any) -> bool:
# NOTE: dict don't have __dict__
o1d = getattr(o1, '__dict__', None)
o2d = getattr(o2, '__dict__', None)
# if both are objects
if o1d is not None and o2d is not None:
# we will compare their dictionaries
o1, o2 = o1.__dict__, o2.__dict__
if o1 is not None and o2 is not None:
# if both are dictionaries, we will compare each key
if isinstance(o1, dict) and isinstance(o2, dict):
for k in set().union(o1.keys(), o2.keys()):
if k in o1 and k in o2:
if not deep_comp(o1[k], o2[k]):
return False
else:
return False # some key missing
return True
# mismatched object types or both are scalers, or one or both None
return o1 == o2
# We setup env variable if debugging mode is detected for vs_code_debugging.
# The reason for this is that when Python multiprocessing is used, the new process
# spawned do not inherit 'pydevd' so those process do not get detected as in debugging mode
# even though they are. So we set env var which does get inherited by sub processes.
if 'pydevd' in sys.modules:
os.environ['vs_code_debugging'] = 'True'
def is_debugging() -> bool:
return 'vs_code_debugging' in os.environ and os.environ['vs_code_debugging'] == 'True'
def full_path(path: str, create=False) -> str:
assert path
path = os.path.abspath(
os.path.expanduser(
os.path.expandvars(path)))
if create:
os.makedirs(path, exist_ok=True)
return path
def zero_file(filepath) -> None:
"""Creates or truncates existing file"""
open(filepath, 'w').close()
def write_string(filepath: str, content: str) -> None:
pathlib.Path(filepath).write_text(content)
def read_string(filepath: str) -> str:
return pathlib.Path(filepath).read_text()
def fmt(val: Any) -> str:
if isinstance(val, float):
return f'{val:.4g}'
return str(val)
def append_csv_file(filepath: str, new_row: List[Tuple[str, Any]], delimiter='\t'):
fieldnames, rows = [], []
if os.path.exists(filepath):
with open(filepath, 'r') as f:
dr = csv.DictReader(f, delimiter=delimiter)
fieldnames = dr.fieldnames
rows = [row for row in dr.reader]
if fieldnames is None:
fieldnames = []
new_fieldnames = OrderedDict([(fn, None) for fn, v in new_row])
for fn in fieldnames:
new_fieldnames[fn] = None
with open(filepath, 'w', newline='') as f:
dr = csv.DictWriter(f, fieldnames=new_fieldnames.keys(), delimiter=delimiter)
dr.writeheader()
for row in rows:
d = dict((k, v) for k, v in zip(fieldnames, row))
dr.writerow(d)
dr.writerow(OrderedDict(new_row))
def has_method(o, name):
return callable(getattr(o, name, None))
def extract_tar(src, dest=None, gzip=None, delete=False):
import tarfile
if dest is None:
dest = os.path.dirname(src)
if gzip is None:
gzip = src.lower().endswith('.gz')
mode = 'r:gz' if gzip else 'r'
with tarfile.open(src, mode) as tarfh:
tarfh.extractall(path=dest)
if delete:
os.remove(src)
def extract_zip(src, dest=None, delete=False):
import zipfile
if dest is None:
dest = os.path.dirname(src)
with zipfile.ZipFile(src, 'r') as zip_ref:
zip_ref.extractall(dest)
if delete:
os.remove(src)
def download_and_extract_tar(url, download_root, extract_root=None, filename=None,
md5=None, **kwargs):
download_root = os.path.expanduser(download_root)
if extract_root is None:
extract_root = download_root
if filename is None:
filename = os.path.basename(url)
if not tvutils.check_integrity(os.path.join(download_root, filename), md5):
tvutils.download_url(url, download_root, filename=filename, md5=md5)
extract_tar(os.path.join(download_root, filename), extract_root, **kwargs)
def download_and_extract_zip(url, download_root, extract_root=None, filename=None,
md5=None, **kwargs):
download_root = os.path.expanduser(download_root)
if extract_root is None:
extract_root = download_root
if filename is None:
filename = os.path.basename(url)
if not tvutils.check_integrity(os.path.join(download_root, filename), md5):
tvutils.download_url(url, download_root, filename=filename, md5=md5)
extract_zip(os.path.join(download_root, filename), extract_root, delete=True, **kwargs)
def setup_cuda(seed: Union[float, int], local_rank: int = 0):
seed = int(seed) + local_rank
# setup cuda
cudnn.enabled = True
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
# torch.cuda.manual_seed_all(seed)
cudnn.benchmark = True # set to false if deterministic
torch.set_printoptions(precision=10)
# cudnn.deterministic = False
# torch.cuda.empty_cache()
# torch.cuda.synchronize()
def cuda_device_names() -> str:
return ', '.join([torch.cuda.get_device_name(i) for i in range(torch.cuda.device_count())])
def exec_shell_command(command: str, print_command_start=True, print_command_end=True) -> subprocess.CompletedProcess:
if print_command_start:
print(f'[{datetime.now()}] Running: {command}')
ret = subprocess.run(command, shell=True, check=True)
if print_command_end:
print(f'[{datetime.now()}] returncode={ret.returncode} Finished: {command}')
return ret
def zip_eq(*iterables):
sentinel = object()
for count, combo in enumerate(zip_longest(*iterables, fillvalue=sentinel)):
if any(True for c in combo if sentinel is c):
shorter_its = ','.join([str(i) for i, c in enumerate(combo) if sentinel is c])
raise ValueError(f'Iterator {shorter_its} have length {count} which is shorter than others')
yield combo
def dir_downloads() -> str:
return full_path(str(os.path.join(pathlib.Path.home(), "Downloads")))
def filepath_without_ext(filepath: str) -> str:
"""Returns '/a/b/c/d.e' for '/a/b/c/d.e.f' """
return str(pathlib.Path(filepath).with_suffix(''))
def filepath_ext(filepath: str) -> str:
"""Returns '.f' for '/a/b/c/d.e.f' """
return pathlib.Path(filepath).suffix
def filepath_name_ext(filepath: str) -> str:
"""Returns 'd.e.f' for '/a/b/c/d.e.f' """
return pathlib.Path(filepath).name
def filepath_name_only(filepath: str) -> str:
"""Returns 'd.e' for '/a/b/c/d.e.f' """
return pathlib.Path(filepath).stem
def change_filepath_ext(filepath: str, new_ext: str) -> str:
"""Returns '/a/b/c/d.e.g' for filepath='/a/b/c/d.e.f', new_ext='.g' """
return str(pathlib.Path(filepath).with_suffix(new_ext))
def change_filepath_name(filepath: str, new_name: str, new_ext: Optional[str] = None) -> str:
"""Returns '/a/b/c/h.f' for filepath='/a/b/c/d.e.f', new_name='h' """
ext = new_ext or filepath_ext(filepath)
return str(pathlib.Path(filepath).with_name(new_name).with_suffix(ext))
def append_to_filename(filepath: str, name_suffix: str, new_ext: Optional[str] = None) -> str:
"""Returns '/a/b/c/h.f' for filepath='/a/b/c/d.e.f', new_name='h' """
ext = new_ext or filepath_ext(filepath)
name = filepath_name_only(filepath)
return str(pathlib.Path(filepath).with_name(name+name_suffix).with_suffix(ext))
def copy_file(src_file: str, dest_dir_or_file: str, preserve_metadata=False, use_shutil: bool = True) -> str:
if not use_shutil:
assert not preserve_metadata
return copy_file_basic(src_file, dest_dir_or_file)
# note that copy2 might fail on some Azure blobs if filesyste does not support OS level copystats
# so use preserve_metadata=True only if absolutely needed for maximum compatibility
try:
copy_fn = shutil.copy2 if preserve_metadata else shutil.copy
return copy_fn(src_file, dest_dir_or_file)
except OSError as ex:
if preserve_metadata or ex.errno != 38: # OSError: [Errno 38] Function not implemented
raise
return copy_file_basic(src_file, dest_dir_or_file)
def copy_file_basic(src_file: str, dest_dir_or_file: str) -> str:
# try basic python functions
# first if dest is dir, get dest file name
if os.path.isdir(dest_dir_or_file):
dest_dir_or_file = os.path.join(dest_dir_or_file, filepath_name_ext(src_file))
with open(src_file, 'rb') as src, open(dest_dir_or_file, 'wb') as dst:
dst.write(src.read())
return dest_dir_or_file
def copy_dir(src_dir: str, dest_dir: str, use_shutil: bool = True) -> None:
if os.path.isdir(src_dir):
if use_shutil:
shutil.copytree(src_dir, dest_dir)
else:
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
files = os.listdir(src_dir)
for f in files:
copy_dir(os.path.join(src_dir, f),
os.path.join(dest_dir, f), use_shutil=use_shutil)
else:
copy_file(src_dir, dest_dir, use_shutil=use_shutil)
if 'main_process_pid' not in os.environ:
os.environ['main_process_pid'] = str(os.getpid())
def is_main_process() -> bool:
"""Returns True if this process was started as main process instead of child process during multiprocessing"""
return multiprocessing.current_process().name == 'MainProcess' and os.environ['main_process_pid'] == str(os.getpid())
def main_process_pid() -> int:
return int(os.environ['main_process_pid'])
def process_name() -> str:
return multiprocessing.current_process().name
def is_windows() -> bool:
return platform.system() == 'Windows'
def path2uri(path: str, windows_non_standard: bool = False) -> str:
uri = pathlib.Path(full_path(path)).as_uri()
# there is lot of buggy regex based code out there which expects Windows file URIs as
# file://C/... instead of standard file:///C/...
# When passing file uri to such code, turn on windows_non_standard
if windows_non_standard and is_windows():
uri = uri.replace('file:///', 'file://')
return uri
def uri2path(file_uri: str, windows_non_standard: bool = False) -> str:
# there is lot of buggy regex based code out there which expects Windows file URIs as
# file://C/... instead of standard file:///C/...
# When passing file uri to such code, turn on windows_non_standard
if windows_non_standard and is_windows():
file_uri = file_uri.replace('file://', 'file:///')
parsed = urlparse(file_uri)
host = "{0}{0}{mnt}{0}".format(os.path.sep, mnt=parsed.netloc)
return os.path.normpath(
os.path.join(host, url2pathname(unquote(parsed.path)))
)
def get_ranks(items: list, key=lambda v: v, reverse=False) -> List[int]:
sorted_t = sorted(zip(items, range(len(items))),
key=lambda t: key(t[0]),
reverse=reverse)
sorted_map = dict((t[1], i) for i, t in enumerate(sorted_t))
return [sorted_map[i] for i in range(len(items))]
def dedup_list(l: List) -> List:
return list(OrderedDict.fromkeys(l))
def delete_file(filepath: str) -> bool:
if os.path.isfile(filepath):
os.remove(filepath)
return True
else:
return False
def save_as_yaml(obj, filepath: str) -> None:
with open(filepath, 'w', encoding='utf-8') as f:
yaml.dump(obj, f, default_flow_style=False)
def map_to_list(variable: Union[int, float, Sized], size: int) -> Sized:
if isinstance(variable, Sized):
size_diff = size - len(variable)
if size_diff < 0:
return variable[:size]
elif size_diff == 0:
return variable
elif size_diff > 0:
return variable + [variable[0]] * size_diff
return [variable] * size
def attr_to_dict(obj: Any, recursive: bool = True) -> Dict[str, Any]:
MAX_LIST_LEN = 10
variables = {}
var_dict = dict(vars(obj.__class__))
try:
var_dict.update(dict(vars(obj)))
except TypeError:
pass
for k, v in var_dict.items():
if k[0] == '_':
continue
if isinstance(v, (int, float, str)):
variables[k.lower()] = v
elif isinstance(v, list) and (len(v) == 0 or isinstance(v[0], (int, float, str))):
variables[k.lower()] = v[:MAX_LIST_LEN]
elif isinstance(v, set) and (len(v) == 0 or isinstance(next(iter(v)), (int, float, str))):
variables[k.lower()] = list(v)[:MAX_LIST_LEN]
elif recursive:
settings_fn = getattr(v, 'settings', None)
if callable(settings_fn):
variables[k.lower()] = settings_fn()
return variables
|
archai/archai/common/utils.py/0
|
{
"file_path": "archai/archai/common/utils.py",
"repo_id": "archai",
"token_count": 6440
}
| 314 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Callable, Optional
from overrides import overrides
from torch.utils.data import Dataset
from torchvision.datasets import SVHN
from torchvision.transforms import ToTensor
from archai.api.dataset_provider import DatasetProvider
from archai.common.ordered_dict_logger import OrderedDictLogger
logger = OrderedDictLogger(source=__name__)
class SVHNDatasetProvider(DatasetProvider):
"""SVHN dataset provider."""
def __init__(
self,
root: Optional[str] = "dataroot",
) -> None:
"""Initialize SVHN dataset provider.
Args:
root: Root directory of dataset where is saved.
"""
super().__init__()
self.root = root
@overrides
def get_train_dataset(
self,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> Dataset:
return SVHN(
self.root,
split="train",
transform=transform or ToTensor(),
target_transform=target_transform,
download=True,
)
@overrides
def get_val_dataset(
self,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> Dataset:
logger.warn("Validation set not available. Returning `extra` set ...")
return SVHN(
self.root,
split="extra",
transform=transform or ToTensor(),
target_transform=target_transform,
download=True,
)
@overrides
def get_test_dataset(
self,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> Dataset:
return SVHN(
self.root,
split="test",
transform=transform or ToTensor(),
target_transform=target_transform,
download=True,
)
|
archai/archai/datasets/cv/svhn_dataset_provider.py/0
|
{
"file_path": "archai/archai/datasets/cv/svhn_dataset_provider.py",
"repo_id": "archai",
"token_count": 879
}
| 315 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from abc import abstractmethod
from typing import List, Optional
from overrides import EnforceOverrides
from archai.api.dataset_provider import DatasetProvider
from archai.discrete_search.api.archai_model import ArchaiModel
class ModelEvaluator(EnforceOverrides):
"""Abstract class for synchronous model evaluators.
Evaluators are general-use classes used to evaluate architectures in
given criteria (task performance, speed, size, etc.).
Subclasses of `ModelEvaluator` are expected to implement `ModelEvaluator.evaluate`
Synchronous evaluators are computed by search algorithms sequentially.
For parallel / async. execution, please refer to `archai.api.AsyncModelEvaluator`.
For a list of built-in evaluators, please check `archai.discrete_search.evaluators`.
Examples:
>>> class MyValTaskAccuracy(ModelEvaluator):
>>> def __init__(self, dataset: DatasetProvider, batch_size: int = 32):
>>> self.dataset = dataset
>>> self.batch_size = batch_size
>>>
>>> @overrides
>>> def get_name(self) -> str:
>>> return f'MyValTaskAccuracy_on_{self.dataset.get_name()}}'
>>>
>>> @overrides
>>> def evaluate(self, model: ArchaiModel, budget: Optional[float] = None):
>>> _, val_data = self.dataset.get_train_val_datasets()
>>> val_dl = torch.utils.data.DataLoader(val_data, batch_size=self.batch_size)
>>>
>>> with torch.no_grad():
>>> labels = np.concatenate([y for _, y in val_dl], axis=0)
>>> preds = np.concatenate(
>>> [model.arch(x).cpu().numpy() for x, _ in val_dl],
>>> axis=0
>>> )
>>>
>>> return np.mean(labels == preds)
>>>
>>> class NumberOfModules(ModelEvaluator):
>>> @overrides
>>> def evaluate(self, model: ArchaiModel, budget: Optional[float] = None):
>>> return len(list(model.arch.modules()))
"""
@abstractmethod
def evaluate(self, arch: ArchaiModel, budget: Optional[float] = None) -> float:
"""Evaluate an `ArchaiModel` instance, optionally using a budget value.
Args:
arch: Model to be evaluated.
dataset: A dataset provider object.
budget: A budget multiplier value, used by search algorithms like `SuccessiveHalving`
to specify how much compute should be spent in this evaluation. In order to use
this type of search algorithm, the implementation of `evaluate()` must use the
passed `budget` value accordingly.
Returns:
Evaluation result.
"""
pass
class AsyncModelEvaluator(EnforceOverrides):
"""Abstract class for asynchronous model evaluators.
Evaluators are general-use classes used to evaluate architectures in given criteria
(task performance, speed, size, etc.).
Unlike `archai.api.ModelEvaluator`, `AsyncModelEvaluator` evaluates models in asynchronous
fashion, by sending evaluation jobs to a queue and fetching the results later.
Subclasses of `AsyncModelEvaluator` are expected to implement
`AsyncModelEvaluator.send(arch: ArchaiModel, budget: Optional[float])`
and `AsyncModelEvaluator.fetch_all()`.
`AsyncModelEvaluator.send` is a non-blocking call that schedules an evaluation job for a
given (model, budget) triplet. `AsyncModelEvaluator.fetch_all` is a blocking call
that waits and gathers the results from current evaluation jobs and cleans the job queue.
For a list of built-in evaluators, please check `archai.discrete_search.evaluators`.
>>> my_obj = MyAsyncObj(dataset) # My AsyncModelEvaluator subclass
>>>
>>> # Non blocking calls
>>> my_obj.send(model_1, budget=None)
>>> my_obj.send(model_2, budget=None)
>>> my_obj.send(model_3, budget=None)
>>>
>>> # Blocking call
>>> eval_results = my_obj.fetch_all()
>>> assert len(eval_results) == 3
>>>
>>> # Job queue is reset after `fetch_call` method
>>> my_obj.send(model_4, budget=None)
>>> assert len(my_obj.fetch_all()) == 1
"""
@abstractmethod
def send(self, arch: ArchaiModel, budget: Optional[float] = None) -> None:
"""Send an evaluation job for a given (model, budget) triplet.
Args:
arch: Model to be evaluated.
dataset: A dataset provider object.
budget: A budget multiplier value, used by search algorithms like `SuccessiveHalving`
to specify how much compute should be spent in this evaluation. In order to use
this type of search algorithm, the implementation of `send()` must use the passed
`budget` value accordingly.
"""
pass
@abstractmethod
def fetch_all(self) -> List[Optional[float]]:
"""Fetch all evaluation results from the job queue.
Returns:
List of evaluation results. Each result is a `float` or `None` if evaluation job failed.
"""
pass
|
archai/archai/discrete_search/api/model_evaluator.py/0
|
{
"file_path": "archai/archai/discrete_search/api/model_evaluator.py",
"repo_id": "archai",
"token_count": 2062
}
| 316 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Dict, List, Optional, Union
import torch
from overrides import overrides
from archai.discrete_search.api.archai_model import ArchaiModel
from archai.discrete_search.api.model_evaluator import ModelEvaluator
from archai.discrete_search.evaluators.pt_profiler_utils.pt_profiler_eval import profile
class TorchNumParameters(ModelEvaluator):
"""Total number of parameters."""
def __init__(
self, exclude_cls: Optional[List[torch.nn.Module]] = None, trainable_only: Optional[bool] = True
) -> None:
"""Initialize the evaluator.
Args:
exclude_cls: List of PyTorch module classes to exclude from parameter counting.
trainable_only: A flag indicating whether only trainable parameters should be counted.
"""
self.exclude_cls = exclude_cls
self.trainable_only = trainable_only
@overrides
def evaluate(self, model: ArchaiModel, budget: Optional[float] = None) -> float:
total_params = sum(
param.numel() for param in model.arch.parameters() if not self.trainable_only or param.requires_grad
)
exclude_params = (
0
if self.exclude_cls is None
else sum(
sum(param.numel() for param in module.parameters())
for module in model.arch.modules()
if isinstance(module, tuple(self.exclude_cls))
)
)
return total_params - exclude_params
class TorchFlops(ModelEvaluator):
"""Total number of FLOPs."""
def __init__(
self,
forward_args: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None,
forward_kwargs: Optional[Dict[str, torch.Tensor]] = None,
ignore_layers: Optional[List[str]] = None,
) -> None:
"""Initialize the evaluator.
Args:
forward_args: `model.forward()` arguments used for profilling.
forward_kwargs: `model.forward()` keyword arguments used for profilling.
ignore_layers: List of layer names that should be ignored during profiling.
"""
self.forward_args = forward_args
self.forward_kwargs = forward_kwargs
self.ignore_layers = ignore_layers
@overrides
def evaluate(self, model: ArchaiModel, budget: Optional[float] = None) -> float:
return profile(
model.arch,
self.forward_args,
self.forward_kwargs,
num_warmups=0,
num_samples=1,
ignore_layers=self.ignore_layers,
)["flops"]
class TorchMacs(ModelEvaluator):
"""Total number of MACs."""
def __init__(
self,
forward_args: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None,
forward_kwargs: Optional[Dict[str, torch.Tensor]] = None,
ignore_layers: Optional[List[str]] = None,
) -> None:
"""Initialize the evaluator.
Args:
forward_args: `model.forward()` arguments used for profilling.
forward_kwargs: `model.forward()` keyword arguments used for profilling.
ignore_layers: List of layer names that should be ignored during profiling.
"""
self.forward_args = forward_args
self.forward_kwargs = forward_kwargs
self.ignore_layers = ignore_layers
@overrides
def evaluate(self, model: ArchaiModel, budget: Optional[float] = None) -> float:
return profile(
model.arch,
self.forward_args,
self.forward_kwargs,
num_warmups=0,
num_samples=1,
ignore_layers=self.ignore_layers,
)["macs"]
class TorchLatency(ModelEvaluator):
"""Average/median latency (in seconds) of a PyTorch model using a sample input."""
def __init__(
self,
forward_args: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None,
forward_kwargs: Optional[Dict[str, torch.Tensor]] = None,
num_warmups: Optional[int] = 1,
num_samples: Optional[int] = 1,
use_cuda: Optional[bool] = False,
use_median: Optional[bool] = False,
ignore_layers: Optional[List[str]] = None,
) -> None:
"""Initialize the evaluator.
Args:
forward_args: `model.forward()` arguments used for profilling.
forward_kwargs: `model.forward()` keyword arguments used for profilling.
num_warmups: Number of warmup runs before profilling.
num_samples: Number of runs after warmup.
use_cuda: Whether to use CUDA instead of CPU.
use_median: Whether to use median instead of mean to average memory and latency.
ignore_layers: List of layer names that should be ignored during profiling.
"""
self.forward_args = forward_args
self.forward_kwargs = forward_kwargs
self.ignore_layers = ignore_layers
self.num_warmups = num_warmups
self.num_samples = num_samples
self.use_cuda = use_cuda
self.use_median = use_median
self.ignore_layers = ignore_layers
@overrides
def evaluate(self, model: ArchaiModel, budget: Optional[float] = None) -> float:
return profile(
model.arch,
self.forward_args,
self.forward_kwargs,
num_warmups=self.num_warmups,
num_samples=self.num_samples,
use_cuda=self.use_cuda,
use_median=self.use_median,
ignore_layers=self.ignore_layers,
)["latency"]
class TorchPeakCudaMemory(ModelEvaluator):
"""Measures CUDA peak memory (in bytes) of a PyTorch model using a sample input."""
def __init__(
self,
forward_args: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None,
forward_kwargs: Optional[Dict[str, torch.Tensor]] = None,
num_warmups: Optional[int] = 1,
num_samples: Optional[int] = 1,
use_median: Optional[bool] = False,
ignore_layers: Optional[List[str]] = None,
) -> None:
"""Initialize the evaluator.
Args:
forward_args: `model.forward()` arguments used for profilling.
forward_kwargs: `model.forward()` keyword arguments used for profilling.
num_warmups: Number of warmup runs before profilling.
num_samples: Number of runs after warmup.
use_median: Whether to use median instead of mean to average memory and latency.
ignore_layers: List of layer names that should be ignored during profiling.
"""
self.forward_args = forward_args
self.forward_kwargs = forward_kwargs
self.ignore_layers = ignore_layers
self.num_warmups = num_warmups
self.num_samples = num_samples
self.use_median = use_median
self.ignore_layers = ignore_layers
@overrides
def evaluate(self, model: ArchaiModel, budget: Optional[float] = None) -> float:
return profile(
model.arch,
self.forward_args,
self.forward_kwargs,
num_warmups=self.num_warmups,
num_samples=self.num_samples,
use_cuda=True,
use_median=self.use_median,
ignore_layers=self.ignore_layers,
)["peak_memory"]
class TorchPeakCpuMemory(ModelEvaluator):
"""Measures CPU peak memory (in bytes) of a PyTorch model using a sample input."""
def __init__(
self,
forward_args: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None,
forward_kwargs: Optional[Dict[str, torch.Tensor]] = None,
):
"""Initialize the evaluator.
Args:
forward_args: `model.forward()` arguments used for profilling.
forward_kwargs: `model.forward()` keyword arguments used for profilling.
"""
forward_args = forward_args if forward_args is not None else []
self.forward_args = [forward_args] if isinstance(forward_args, torch.Tensor) else forward_args
self.forward_kwargs = forward_kwargs or {}
@overrides
def evaluate(self, model: ArchaiModel, budget: Optional[float] = None):
model.arch.to("cpu")
forward_args = tuple([arg.to("cpu") for arg in self.forward_args])
forward_kwargs = {key: value.to("cpu") for key, value in self.forward_kwargs.items()}
is_training = model.arch.training
model.arch.eval()
with torch.profiler.profile(
activities=[torch.profiler.ProfilerActivity.CPU], record_shapes=True, profile_memory=True
) as prof:
with torch.profiler.record_function("model_inference"):
model.arch(*forward_args, **forward_kwargs)
event_list = prof.key_averages()
peak_memory = max(event.cpu_memory_usage for event in event_list)
if is_training:
model.arch.train()
return peak_memory
|
archai/archai/discrete_search/evaluators/pt_profiler.py/0
|
{
"file_path": "archai/archai/discrete_search/evaluators/pt_profiler.py",
"repo_id": "archai",
"token_count": 3869
}
| 317 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from copy import deepcopy
from typing import Any, Dict, List, Optional, Union
from archai.discrete_search.search_spaces.config.discrete_choice import DiscreteChoice
def repeat_config(
config_dict: Dict[str, Any], repeat_times: Union[int, List[int]], share_arch: Optional[bool] = False
) -> Dict[str, Any]:
"""Repeats an architecture config a variable number of times.
Args:
config_dict (Dict[str, Any]): Config dictionary to repeat.
repeat_times (Union[int, List[int]]): If an integer, the number of times to repeat the config
will be treated as constant. If a list of integers, the number of times to repeat the config will be
also considered an architecture parameter and will be sampled from the list.
share_arch (bool, optional): Whether to share the architecture parameters across the
repeated configs. Defaults to False.
Returns:
Dict[str, Any]: Config dictionary with the repeated config.
"""
repeat_times = [repeat_times] if isinstance(repeat_times, int) else repeat_times
return {
"_config_type": "config_list",
"_share_arch": share_arch,
"_repeat_times": DiscreteChoice(repeat_times),
"_configs": {str(i): (config_dict if share_arch else deepcopy(config_dict)) for i in range(max(repeat_times))},
}
|
archai/archai/discrete_search/search_spaces/config/helpers.py/0
|
{
"file_path": "archai/archai/discrete_search/search_spaces/config/helpers.py",
"repo_id": "archai",
"token_count": 499
}
| 318 |
import math
import os
from dataclasses import dataclass
from typing import Optional, Tuple, Union, Any
import torch
import torch.utils.checkpoint
from torch import nn
from torch.cuda.amp import autocast
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
SequenceClassifierOutputWithPast,
TokenClassifierOutput,
)
from transformers.activations import ACT2FN
from transformers.modeling_utils import PreTrainedModel, SequenceSummary
from transformers.pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer
from transformers.utils import ModelOutput, logging
from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
from transformers.models.gpt2.modeling_gpt2 import GPT2PreTrainedModel
from transformers.models.gpt2.configuration_gpt2 import GPT2Config
from archai.discrete_search.search_spaces.config import ArchConfig
from ...mixed_op import MixedAttentionBlock
from ...utils import make_broadcast_map, make_asso_map
from .block import GPT2Block
logger = logging.get_logger(__name__)
GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = [
"gpt2",
"gpt2-medium",
"gpt2-large",
"gpt2-xl",
"distilgpt2",
# See all GPT-2 models at https://huggingface.co/models?filter=gpt2
]
@dataclass
class GPT2DoubleHeadsModelOutput(ModelOutput):
"""
Base class for outputs of models predicting if two sentences are consecutive or not.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss.
mc_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mc_labels` is provided):
Multiple choice classification loss.
logits (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
mc_logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`):
Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
past_key_values (`Tuple[Tuple[torch.Tensor]]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of length `config.n_layers`, containing tuples of tensors of shape `(batch_size, num_heads,
sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
GPT2Attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads.
"""
loss: Optional[torch.FloatTensor] = None
mc_loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
mc_logits: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
class GPT2Model(GPT2PreTrainedModel):
_keys_to_ignore_on_load_missing = ["attn.masked_bias"]
def __init__(self, arch_config: ArchConfig, hf_config: GPT2Config, eos_id: int = 50256):
super().__init__(hf_config)
self.config = hf_config
self.hidden_size = arch_config.pick('hidden_size')
self.wte = nn.Embedding(hf_config.vocab_size, self.hidden_size)
self.wpe = nn.Embedding(hf_config.max_position_embeddings, self.hidden_size)
self.embd_pdrop = hf_config.embd_pdrop
self.resid_pdrop = hf_config.resid_pdrop
self.h = nn.ModuleList([
GPT2Block(
block_config, hf_config, self.hidden_size, layer_idx=i,
resid_dropout1=hf_config.embd_pdrop if i == 0 else self.resid_pdrop,
resid_dropout2=self.resid_pdrop
)
for i, block_config in enumerate(arch_config.pick('hidden_layers'))
])
self.ln_f = nn.LayerNorm(self.hidden_size, eps=hf_config.layer_norm_epsilon)
# For broadcast hard-coded attention
self.eos_id = eos_id
# Model parallel
self.model_parallel = False
self.device_map = None
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def parallelize(self, device_map=None):
# Check validity of device_map
self.device_map = (
get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map
)
assert_device_map(self.device_map, len(self.h))
self.model_parallel = True
self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys()))
self.last_device = "cuda:" + str(max(self.device_map.keys()))
self.wte = self.wte.to(self.first_device)
self.wpe = self.wpe.to(self.first_device)
# Load onto devices
for k, v in self.device_map.items():
for block in v:
cuda_device = "cuda:" + str(k)
self.h[block] = self.h[block].to(cuda_device)
# ln_f to last
self.ln_f = self.ln_f.to(self.last_device)
def deparallelize(self):
self.model_parallel = False
self.device_map = None
self.first_device = "cpu"
self.last_device = "cpu"
self.wte = self.wte.to("cpu")
self.wpe = self.wpe.to("cpu")
for index in range(len(self.h)):
self.h[index] = self.h[index].to("cpu")
self.ln_f = self.ln_f.to("cpu")
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, new_embeddings):
self.wte = new_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.h[layer].attn.prune_heads(heads)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size = input_ids.shape[0]
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size = inputs_embeds.shape[0]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if position_ids is not None:
position_ids = position_ids.view(-1, input_shape[-1])
if past_key_values is None:
past_length = 0
past_key_values = tuple([None] * len(self.h))
else:
past_length = past_key_values[0][0].size(-2)
if position_ids is None:
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
bin_attention_mask = attention_mask
# Hard-coded attention
# #assoc_hc_attn = make_asso_map(input_ids, attention_mask)
## broad_hc_attn = make_broadcast_map(input_ids, attention_mask, self.eos_id)
# GPT2Attention mask.
if attention_mask is not None:
if batch_size <= 0:
raise ValueError("batch_size has to be defined and > 0")
attention_mask = attention_mask.view(batch_size, -1)
# We save the binarized attention mask for LocalAttention and LSHAttention
bin_attention_mask = attention_mask.clone()
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and the dtype's smallest value for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
bin_attention_mask = bin_attention_mask.to(dtype=self.dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.add_cross_attention and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
head_mask = self.get_head_mask(head_mask, len(self.h))
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
hidden_states = inputs_embeds + position_embeds
if token_type_ids is not None:
token_type_embeds = self.wte(token_type_ids)
hidden_states = hidden_states + token_type_embeds
output_shape = input_shape + (hidden_states.size(-1),)
presents = () if use_cache else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
all_hidden_states = () if output_hidden_states else None
residual = None
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
# Model parallel
if self.model_parallel:
torch.cuda.set_device(hidden_states.device)
# Ensure layer_past is on same device as hidden_states (might not be correct)
if layer_past is not None:
layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)
# Ensure that attention_mask is always on the same device as hidden_states
if attention_mask is not None:
attention_mask = attention_mask.to(hidden_states.device)
bin_attention_mask = bin_attention_mask.to(hidden_states.device)
if isinstance(head_mask, torch.Tensor):
head_mask = head_mask.to(hidden_states.device)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, use_cache, output_attentions)
return custom_forward
outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(block),
hidden_states,
None,
attention_mask,
head_mask[i],
encoder_hidden_states,
encoder_attention_mask,
#assoc_hc_attn=assoc_hc_attn,
#broad_hc_attn=broad_hc_attn,
bin_attention_mask=bin_attention_mask
)
else:
hidden_states, residual = block(
hidden_states,
residual,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask[i],
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
#assoc_hc_attn=assoc_hc_attn,
#broad_hc_attn=broad_hc_attn,
bin_attention_mask=bin_attention_mask
)
if use_cache is True:
raise NotImplementedError
presents = presents + (outputs[1],)
if output_attentions:
raise NotImplementedError
all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)
# Model Parallel: If it's the last layer for that device, put things on the next device
if self.model_parallel:
for k, v in self.device_map.items():
if i == v[-1] and "cuda:" + str(k) != self.last_device:
hidden_states = hidden_states.to("cuda:" + str(k + 1))
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(output_shape)
# Add last hidden state
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=presents,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class GPT2LMHeadModel(GPT2PreTrainedModel):
_keys_to_ignore_on_load_missing = [r"attn.masked_bias", r"attn.bias", r"lm_head.weight"]
def __init__(self, arch_config: ArchConfig, hf_config: GPT2Config):
super().__init__(hf_config)
self.config = hf_config
self.transformer = GPT2Model(arch_config, hf_config)
self.lm_head = nn.Linear(arch_config.pick('hidden_size'), hf_config.vocab_size, bias=False)
# Model parallel
self.model_parallel = False
self.device_map = None
# Initialize weights and apply final processing
self.post_init()
def parallelize(self, device_map=None):
self.device_map = (
get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))
if device_map is None
else device_map
)
assert_device_map(self.device_map, len(self.transformer.h))
self.transformer.parallelize(self.device_map)
self.lm_head = self.lm_head.to(self.transformer.first_device)
self.model_parallel = True
def deparallelize(self):
self.transformer.deparallelize()
self.transformer = self.transformer.to("cpu")
self.lm_head = self.lm_head.to("cpu")
self.model_parallel = False
torch.cuda.empty_cache()
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
token_type_ids = kwargs.get("token_type_ids", None)
# only last token for inputs_ids if past is defined in kwargs
if past:
input_ids = input_ids[:, -1].unsqueeze(-1)
if token_type_ids is not None:
token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
attention_mask = kwargs.get("attention_mask", None)
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past:
position_ids = position_ids[:, -1].unsqueeze(-1)
else:
position_ids = None
return {
"input_ids": input_ids,
"past_key_values": past,
"use_cache": kwargs.get("use_cache"),
"position_ids": position_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.transformer.first_device)
hidden_states = hidden_states.to(self.lm_head.weight.device)
lm_logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=lm_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
cross_attentions=transformer_outputs.cross_attentions,
)
@staticmethod
def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]:
"""
This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
[`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
beam_idx at every generation step.
"""
return tuple(
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
for layer_past in past
)
|
archai/archai/discrete_search/search_spaces/nlp/tfpp/backbones/gpt2/model.py/0
|
{
"file_path": "archai/archai/discrete_search/search_spaces/nlp/tfpp/backbones/gpt2/model.py",
"repo_id": "archai",
"token_count": 10766
}
| 319 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from transformers.models.transfo_xl.configuration_transfo_xl import TransfoXLConfig
class MemTransformerConfig(TransfoXLConfig):
model_type = "mem-transformer"
def __init__(self, *args, **kwargs) -> None:
if "primer_conv" not in kwargs:
kwargs["primer_conv"] = False
if "primer_square" not in kwargs:
kwargs["primer_square"] = False
if "fp16" not in kwargs:
kwargs["fp16"] = False
if "use_cache" not in kwargs:
kwargs["use_cache"] = False
super().__init__(*args, **kwargs)
|
archai/archai/discrete_search/search_spaces/nlp/transformer_flex/models/configuration_mem_transformer.py/0
|
{
"file_path": "archai/archai/discrete_search/search_spaces/nlp/transformer_flex/models/configuration_mem_transformer.py",
"repo_id": "archai",
"token_count": 282
}
| 320 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Any, Mapping, Optional, Tuple
import torch
from overrides import overrides
from transformers.configuration_utils import PretrainedConfig
from archai.onnx.config_utils.onnx_config_base import OnnxConfig, OnnxConfigWithPast
class GPT2OnnxConfig(OnnxConfigWithPast):
"""GPT-2 ONNX configuration (with past key/values support)."""
def __init__(
self,
config: PretrainedConfig,
task: Optional[str] = "causal-lm",
use_past: Optional[bool] = False,
) -> None:
super().__init__(config, task=task, use_past=use_past, past_key_values=2)
@property
def num_layers(self) -> int:
return self.config.n_layer
@property
def is_ort_graph_optimizable(self) -> bool:
return True
@property
def ort_graph_optimizer_args(self) -> Tuple[Any, ...]:
return (self.num_attention_heads, self.hidden_size)
class GPT2FlexOnnxConfig(OnnxConfigWithPast):
"""GPT-2 Flex ONNX configuration (with past key/values support)."""
def __init__(
self, config: PretrainedConfig, task: Optional[str] = "causal-lm", use_past: Optional[bool] = False
) -> None:
super().__init__(config, task=task, use_past=use_past, past_key_values=2)
@property
def num_layers(self) -> int:
return self.config.n_layer
@property
def is_ort_graph_optimizable(self) -> bool:
return all(nh == self.num_attention_heads[0] for nh in self.num_attention_heads)
@property
def ort_graph_optimizer_args(self) -> Tuple[Any, ...]:
return (self.num_attention_heads[0], self.hidden_size)
@overrides
def generate_dummy_inputs(
self, batch_size: int = 2, seq_len: int = 8, past_seq_len: int = 8
) -> Mapping[str, torch.Tensor]:
dummy_inputs = OnnxConfig.generate_dummy_inputs(self, batch_size, seq_len)
if self.use_past:
# [past_key_values, batch_size, n_head[i], past_seq_len, d_head[i]]
dummy_inputs["past_key_values"] = tuple(
[
torch.zeros(
self.config.past_key_values,
batch_size,
self.num_attention_heads[i],
past_seq_len,
self.hidden_size // self.num_attention_heads[i],
)
for i in range(self.num_layers)
]
)
return dummy_inputs
|
archai/archai/onnx/config_utils/gpt2_onnx_config.py/0
|
{
"file_path": "archai/archai/onnx/config_utils/gpt2_onnx_config.py",
"repo_id": "archai",
"token_count": 1166
}
| 321 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import List, Optional
import onnx
import torch
from onnx import onnx_pb as onnx_proto
from onnx.onnx_ml_pb2 import NodeProto
from onnxruntime.quantization.onnx_quantizer import ONNXQuantizer
from onnxruntime.quantization.operators.base_operator import QuantOperatorBase
from onnxruntime.quantization.quant_utils import attribute_to_kwarg, ms_domain
from onnxruntime.quantization.quantize import quantize_dynamic
from onnxruntime.quantization.registry import IntegerOpsRegistry
from archai.common.file_utils import create_file_name_identifier
from archai.common.ordered_dict_logger import OrderedDictLogger
from archai.quantization.quantization_utils import rgetattr, rsetattr
logger = OrderedDictLogger(source=__name__)
class GemmQuant(QuantOperatorBase):
"""Quantized version of the Gemm operator."""
def __init__(self, onnx_quantizer: ONNXQuantizer, onnx_node: NodeProto) -> None:
"""Override initialization method with custom arguments.
Args:
onnx_quantizer: An instance of the quantizer itself.
onnx_node: Node to be quantized.
"""
super().__init__(onnx_quantizer, onnx_node)
def quantize(self) -> None:
"""Quantize a Gemm node into QGemm.
This method replaces the original `Gemm` node with a `QGemm` node, which is a quantized
version of the `Gemm` operator. It also adds a `Cast` node to cast the output of `QGemm`
to float, and an `Add` node to sum the remaining bias to the `Gemm` output.
"""
node = self.node
assert node.op_type == "Gemm"
# Updates original attributes to current node
kwargs = {}
for attribute in node.attribute:
kwargs.update(attribute_to_kwarg(attribute))
kwargs.pop("beta")
# Adds proper domain and missing attributes
kwargs["domain"] = ms_domain
kwargs["transA"] = 0
# Creates proper inputs for the QGemm node
(q_names, zp_names, scale_names, nodes) = self.quantizer.quantize_inputs(
node, [0, 1], reduce_range=True, op_level_per_channel=True
)
qgemm_inputs = []
for (q_name, scale_name, zp_name) in zip(q_names, scale_names, zp_names):
qgemm_inputs += [q_name, scale_name, zp_name]
# Adds a "QGemm" node to replace original Gemm with its quantized version
qgemm_output = node.output[0] + "_output_quantized"
qgemm_name = node.name + "_quant" if node.name != "" else ""
qgemm_node = onnx.helper.make_node("QGemm", qgemm_inputs, [qgemm_output], qgemm_name, **kwargs)
nodes.append(qgemm_node)
# Adds a "Cast" node to cast QGemm output to float
cast_op_output = qgemm_output + "_cast_output"
cast_node = onnx.helper.make_node(
"Cast", [qgemm_output], [cast_op_output], qgemm_output + "_cast", to=onnx_proto.TensorProto.FLOAT
)
nodes.append(cast_node)
# Adds a "Add" node to sum the remaining bias to the Gemm output
bias_node = onnx.helper.make_node(
"Add", [cast_node.output[0], "crit.out_layers_biases.0"], [node.output[0]], qgemm_name + "_output_add"
)
nodes.append(bias_node)
# Adds new nodes to the original quantizer list
self.quantizer.new_nodes += nodes
def add_new_quant_operators() -> None:
"""Add support for new quantization operators by changing
internal onnxruntime registry dictionaries.
"""
# Changes the internal `IntegerOpsRegistry`
# and adds support for new quantization operators
IntegerOpsRegistry["Gemm"] = GemmQuant
def dynamic_quantization_onnx(onnx_model_path: str) -> str:
"""Perform dynamic quantization on an ONNX model.
The quantized model is saved to a new file with "-int8" appended
to the original file name.
Args:
onnx_model_path: Path to the ONNX model to be quantized.
Returns:
Path to the dynamic quantized ONNX model.
"""
logger.info(f"Quantizing model: {onnx_model_path}")
# Adds new quantization operators
# For now, we are only adding support for Gemm
# add_new_quant_operators()
# Performs the dynamic quantization
qnt_model_path = create_file_name_identifier(onnx_model_path, "-int8")
quantize_dynamic(onnx_model_path, qnt_model_path, per_channel=False, reduce_range=False, optimize_model=False)
return qnt_model_path
def dynamic_quantization_torch(
model: torch.nn.Module, embedding_layers: Optional[List[str]] = ["word_emb", "transformer.wpe", "transformer.wte"]
) -> None:
"""Perform dynamic quantization on a PyTorch model.
This function performs dynamic quantization on the input PyTorch model, including
any specified embedding layers.
Args:
model: PyTorch model to be quantized.
embedding_layers: List of string-based identifiers of embedding layers to be quantized.
"""
logger.info("Quantizing model ...")
# Sets the number of threads
# Quantized model only uses maximum of 1 thread
torch.set_num_threads(1)
# Performs an initial dynamic quantization
model_qnt = torch.quantization.quantize_dynamic(model, {torch.nn.Linear}, inplace=False)
# Currently, code below works as a caveat to quantize the embedding layers
for layer in embedding_layers:
# Checks if supplied embedding layer really exists
if rgetattr(model_qnt, layer, 0):
# Sets the appropriate `qconfig` for embedding layers
attr = layer + ".qconfig"
rsetattr(model_qnt, attr, torch.quantization.float_qparams_weight_only_qconfig)
# Prepares the model for quantization and quantizes it
model_qnt = torch.quantization.prepare(model_qnt, inplace=False)
model_qnt = torch.quantization.convert(model_qnt, inplace=False)
return model_qnt
|
archai/archai/quantization/ptq.py/0
|
{
"file_path": "archai/archai/quantization/ptq.py",
"repo_id": "archai",
"token_count": 2297
}
| 322 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from overrides import overrides
from archai.supergraph.algos.petridish.petridish_model_desc_builder import (
PetridishModelBuilder,
)
from archai.supergraph.nas.arch_trainer import ArchTrainer, TArchTrainer
from archai.supergraph.nas.exp_runner import ExperimentRunner
class PetridishExperimentRunner(ExperimentRunner):
@overrides
def model_desc_builder(self)->PetridishModelBuilder:
return PetridishModelBuilder()
@overrides
def trainer_class(self)->TArchTrainer:
return ArchTrainer
|
archai/archai/supergraph/algos/nasbench101/nasbench101_exp_runner.py/0
|
{
"file_path": "archai/archai/supergraph/algos/nasbench101/nasbench101_exp_runner.py",
"repo_id": "archai",
"token_count": 197
}
| 323 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import copy
from typing import List, Tuple
from overrides import overrides
from archai.common.config import Config
from archai.supergraph.algos.xnas.xnas_op import XnasOp
from archai.supergraph.nas.model_desc import (
CellType,
ConvMacroParams,
EdgeDesc,
NodeDesc,
OpDesc,
TensorShape,
TensorShapes,
)
from archai.supergraph.nas.model_desc_builder import ModelDescBuilder
from archai.supergraph.nas.operations import Op
class XnasModelDescBuilder(ModelDescBuilder):
@overrides
def pre_build(self, conf_model_desc:Config)->None:
Op.register_op('xnas_op',
lambda op_desc, arch_params, affine:
XnasOp(op_desc, arch_params, affine))
@overrides
def build_nodes(self, stem_shapes:TensorShapes, conf_cell:Config,
cell_index:int, cell_type:CellType, node_count:int,
in_shape:TensorShape, out_shape:TensorShape) \
->Tuple[TensorShapes, List[NodeDesc]]:
assert in_shape[0]==out_shape[0]
reduction = (cell_type==CellType.Reduction)
nodes:List[NodeDesc] = []
conv_params = ConvMacroParams(in_shape[0], out_shape[0])
# add xnas op for each edge
for i in range(node_count):
edges=[]
for j in range(i+2):
op_desc = OpDesc('xnas_op',
params={
'conv': conv_params,
'stride': 2 if reduction and j < 2 else 1
}, in_len=1, trainables=None, children=None)
edge = EdgeDesc(op_desc, input_ids=[j])
edges.append(edge)
nodes.append(NodeDesc(edges=edges, conv_params=conv_params))
out_shapes = [copy.deepcopy(out_shape) for _ in range(node_count)]
return out_shapes, nodes
|
archai/archai/supergraph/algos/xnas/xnas_model_desc_builder.py/0
|
{
"file_path": "archai/archai/supergraph/algos/xnas/xnas_model_desc_builder.py",
"repo_id": "archai",
"token_count": 978
}
| 324 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import torchvision
from overrides import overrides
from torchvision.transforms import transforms
from archai.common import utils
from archai.common.config import Config
from archai.supergraph.datasets.dataset_provider import (
DatasetProvider,
ImgSize,
TrainTestDatasets,
register_dataset_provider,
)
class Food101Provider(DatasetProvider):
def __init__(self, conf_dataset:Config):
super().__init__(conf_dataset)
self._dataroot = utils.full_path(conf_dataset['dataroot'])
@overrides
def get_datasets(self, load_train:bool, load_test:bool,
transform_train, transform_test)->TrainTestDatasets:
trainset, testset = None, None
if load_train:
trainpath = os.path.join(self._dataroot, 'food-101', 'train')
trainset = torchvision.datasets.ImageFolder(trainpath, transform=transform_train)
if load_test:
testpath = os.path.join(self._dataroot, 'food-101', 'test')
testset = torchvision.datasets.ImageFolder(testpath, transform=transform_test)
return trainset, testset
@overrides
def get_transforms(self, img_size:ImgSize)->tuple:
print(f'IMG SIZE: {img_size}')
if isinstance(img_size, int):
img_size = (img_size, img_size)
# TODO: Need to rethink the food101 transforms
MEAN = [0.5451, 0.4435, 0.3436]
STD = [0.2171, 0.2251, 0.2260] # TODO: should be [0.2517, 0.2521, 0.2573]
train_transf = [
transforms.RandomResizedCrop(img_size, scale=(0.75, 1)),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.2)
]
# food101 has images of varying sizes and are ~512 each side
margin_size = (int(img_size[0] + img_size[0]*0.1), int(img_size[1] + img_size[1]*0.1))
test_transf = [transforms.Resize(margin_size), transforms.CenterCrop(img_size)]
normalize = [
transforms.ToTensor(),
transforms.Normalize(MEAN, STD)
]
train_transform = transforms.Compose(train_transf + normalize)
test_transform = transforms.Compose(test_transf + normalize)
return train_transform, test_transform
register_dataset_provider('food101', Food101Provider)
|
archai/archai/supergraph/datasets/providers/food101_provider.py/0
|
{
"file_path": "archai/archai/supergraph/datasets/providers/food101_provider.py",
"repo_id": "archai",
"token_count": 1097
}
| 325 |
import os
import torch
import torch.nn as nn
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d']
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=10, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
## CIFAR10: kernel_size 7 -> 3, stride 2 -> 1, padding 3->1
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
## END
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.reshape(x.size(0), -1)
x = self.fc(x)
return x
def _resnet(arch, block, layers, pretrained, progress, device, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
script_dir = os.path.dirname(__file__)
state_dict = torch.load(script_dir + '/state_dicts/'+arch+'.pt', map_location=device)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, device='cpu', **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, device,
**kwargs)
def resnet34(pretrained=False, progress=True, device='cpu', **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, device,
**kwargs)
def resnet50(pretrained=False, progress=True, device='cpu', **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, device,
**kwargs)
def resnet101(pretrained=False, progress=True, device='cpu', **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, device,
**kwargs)
def resnet152(pretrained=False, progress=True, device='cpu', **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, device,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, device='cpu', **kwargs):
"""Constructs a ResNeXt-50 32x4d model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, device, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, device='cpu', **kwargs):
"""Constructs a ResNeXt-101 32x8d model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, device, **kwargs)
|
archai/archai/supergraph/models/resnet.py/0
|
{
"file_path": "archai/archai/supergraph/models/resnet.py",
"repo_id": "archai",
"token_count": 4870
}
| 326 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Dict, Optional
from overrides import EnforceOverrides
from torch import nn
from archai.common import ml_utils, utils
from archai.common.config import Config
from archai.common.ordered_dict_logger import get_global_logger
from archai.supergraph.datasets import data
from archai.supergraph.nas import nas_utils
from archai.supergraph.nas.model import Model
from archai.supergraph.nas.model_desc import ModelDesc
from archai.supergraph.nas.model_desc_builder import ModelDescBuilder
from archai.supergraph.utils.checkpoint import CheckPoint
from archai.supergraph.utils.metrics import Metrics
from archai.supergraph.utils.trainer import Trainer
logger = get_global_logger()
class EvalResult:
def __init__(self, train_metrics:Metrics) -> None:
self.train_metrics = train_metrics
class Evaluater(EnforceOverrides):
def evaluate(self, conf_eval:Config, model_desc_builder:ModelDescBuilder)->EvalResult:
logger.pushd('eval_arch')
# region conf vars
conf_checkpoint = conf_eval['checkpoint']
resume = conf_eval['resume']
model_filename = conf_eval['model_filename']
metric_filename = conf_eval['metric_filename']
# endregion
model = self.create_model(conf_eval, model_desc_builder)
checkpoint = nas_utils.create_checkpoint(conf_checkpoint, resume)
train_metrics = self.train_model(conf_eval, model, checkpoint)
train_metrics.save(metric_filename)
# save model
if model_filename:
model_filename = utils.full_path(model_filename)
ml_utils.save_model(model, model_filename)
logger.info({'model_save_path': model_filename})
logger.popd()
return EvalResult(train_metrics)
def train_model(self, conf_train:Config, model:nn.Module,
checkpoint:Optional[CheckPoint])->Metrics:
conf_loader = conf_train['loader']
conf_train = conf_train['trainer']
# get data
data_loaders = self.get_data(conf_loader)
trainer = Trainer(conf_train, model, checkpoint)
train_metrics = trainer.fit(data_loaders)
return train_metrics
def get_data(self, conf_loader:Config)->data.DataLoaders:
# this dict caches the dataset objects per dataset config so we don't have to reload
# the reason we do dynamic attribute is so that any dependent methods
# can do ray.remote
if not hasattr(self, '_data_cache'):
self._data_cache:Dict[int, data.DataLoaders] = {}
# first get from cache
if id(conf_loader) in self._data_cache:
data_loaders = self._data_cache[id(conf_loader)]
else:
data_loaders = data.get_data(conf_loader)
self._data_cache[id(conf_loader)] = data_loaders
return data_loaders
def _default_module_name(self, dataset_name:str, function_name:str)->str:
"""Select PyTorch pre-defined network to support manual mode"""
module_name = ''
# TODO: below detection code is too week, need to improve, possibly encode image size in yaml and use that instead
if dataset_name.startswith('cifar'):
if function_name.startswith('res'): # support resnext as well
module_name = 'archai.supergraph.models.resnet'
elif function_name.startswith('dense'):
module_name = 'archai.supergraph.models.densenet'
elif dataset_name.startswith('imagenet') or dataset_name.startswith('sport8'):
module_name = 'torchvision.models'
if not module_name:
raise NotImplementedError(f'Cannot get default module for {function_name} and dataset {dataset_name} because it is not supported yet')
return module_name
def create_model(self, conf_eval:Config, model_desc_builder:ModelDescBuilder,
final_desc_filename=None, full_desc_filename=None)->nn.Module:
assert model_desc_builder is not None, 'Default evaluater requires model_desc_builder'
# region conf vars
# if explicitly passed in then don't get from conf
if not final_desc_filename:
final_desc_filename = conf_eval['final_desc_filename']
full_desc_filename = conf_eval['full_desc_filename']
conf_model_desc = conf_eval['model_desc']
# endregion
# load model desc file to get template model
template_model_desc = ModelDesc.load(final_desc_filename)
model_desc = model_desc_builder.build(conf_model_desc,
template=template_model_desc)
# save desc for reference
model_desc.save(full_desc_filename)
model = self.model_from_desc(model_desc)
logger.info({'model_factory':False,
'cells_len':len(model.desc.cell_descs()),
'init_node_ch': conf_model_desc['model_stems']['init_node_ch'],
'n_cells': conf_model_desc['n_cells'],
'n_reductions': conf_model_desc['n_reductions'],
'n_nodes': conf_model_desc['cell']['n_nodes']})
return model
def model_from_desc(self, model_desc)->Model:
return Model(model_desc, droppath=True, affine=True)
|
archai/archai/supergraph/nas/evaluater.py/0
|
{
"file_path": "archai/archai/supergraph/nas/evaluater.py",
"repo_id": "archai",
"token_count": 2185
}
| 327 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Any, Dict, List, Optional
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.axes import Axes
def heatmap(
data: np.array,
ax: Optional[Axes] = None,
xtick_labels: Optional[List[str]] = None,
ytick_labels: Optional[List[str]] = None,
cbar_kwargs: Optional[Dict[str, Any]] = None,
cbar_label: Optional[str] = None,
fmt: Optional[str] = "{x:.2f}",
**kwargs,
) -> None:
"""Plot a heatmap.
Args:
data: Data to plot.
ax: Axis to plot on.
xtick_labels: Labels for the x-axis.
ytick_labels: Labels for the y-axis.
cbar_kwargs: Keyword arguments to pass to the color bar.
cbar_label: Label for the color bar.
fmt: Format of the annotations.
"""
# Create the axis and plot the heatmap
if ax is None:
ax = plt.gca()
im = ax.imshow(data, **kwargs)
# Create the color bar
if cbar_kwargs is None:
cbar_kwargs = {}
cbar = ax.figure.colorbar(im, ax=ax, **cbar_kwargs)
cbar.ax.set_ylabel(cbar_label, rotation=-90, va="bottom")
# Display all ticks
if xtick_labels is None:
xtick_labels = [i for i in range(data.shape[1])]
ax.set_xticks(np.arange(data.shape[1]), labels=xtick_labels)
ax.set_xticks(np.arange(data.shape[1] + 1) - 0.5, minor=True)
if ytick_labels is None:
ytick_labels = [i for i in range(data.shape[0])]
ax.set_yticks(np.arange(data.shape[0]), labels=ytick_labels)
ax.set_yticks(np.arange(data.shape[0] + 1) - 0.5, minor=True)
# Adjust the grid layout and ticks positioning
ax.spines[:].set_visible(False)
ax.grid(which="minor", color="w", linestyle="-", linewidth=3)
ax.tick_params(which="minor", top=False, bottom=False, left=False, labeltop=True, labelbottom=False)
# Annotate the heatmap
if isinstance(fmt, str):
fmt = matplotlib.ticker.StrMethodFormatter(fmt)
for i in range(data.shape[0]):
for j in range(data.shape[1]):
im.axes.text(j, i, fmt(data[i, j], None), horizontalalignment="center", verticalalignment="center")
|
archai/archai/supergraph/utils/heatmap.py/0
|
{
"file_path": "archai/archai/supergraph/utils/heatmap.py",
"repo_id": "archai",
"token_count": 948
}
| 328 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import math
from typing import Dict, Optional
from transformers.trainer_callback import TrainerCallback, TrainerControl, TrainerState
from transformers.training_args import TrainingArguments
class BPCTrainerCallback(TrainerCallback):
"""A `TrainerCallback` that adds bits per character metrics to the logs."""
def __init__(self, *args, **kwargs) -> None:
"""Initialize the `BPCTrainerCallback` with custom arguments and keyword arguments."""
super().__init__(*args, **kwargs)
def on_log(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs) -> None:
"""Add bits per character metrics to the training logs.
Args:
args: The training arguments.
state: The trainer state.
control: The trainer control.
"""
current_log = state.log_history[-1]
# Check whether the last log comes from the training step
if "loss" in current_log:
try:
current_log["bpc"] = current_log["loss"] / math.log(2)
except OverflowError:
current_log["bpc"] = math.inf
# Check whether the last log comes from the evaluation step
if "eval_loss" in current_log:
try:
current_log["eval_bpc"] = current_log["eval_loss"] / math.log(2)
except OverflowError:
current_log["eval_bpc"] = math.inf
def on_evaluate(
self,
args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
metrics: Optional[Dict[str, float]] = None,
**kwargs
) -> None:
"""Add bits per character metrics to the evaluation metrics.
Args:
args: The training arguments.
state: The trainer state.
control: The trainer control.
metrics: The evaluation metrics.
"""
# Checks whether metrics have validation loss
if "eval_loss" in metrics:
try:
metrics["eval_bpc"] = metrics["eval_loss"] / math.log(2)
except OverflowError:
metrics["eval_bpc"] = math.inf
# Checks whether metrics have testing loss
if "test_loss" in metrics:
try:
metrics["test_bpc"] = metrics["test_loss"] / math.log(2)
except OverflowError:
metrics["test_bpc"] = math.inf
class PerplexityTrainerCallback(TrainerCallback):
"""A `TrainerCallback` that adds perplexity metrics to the logs."""
def __init__(self, *args, **kwargs) -> None:
"""Initialize the `PerplexityTrainerCallback` with custom arguments and keyword arguments."""
super().__init__(*args, **kwargs)
def on_log(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs) -> None:
"""Add perplexity metrics to the training logs.
Args:
args: The training arguments.
state: The trainer state.
control: The trainer control.
"""
current_log = state.log_history[-1]
# Checks whether last log comes from training step
if "loss" in current_log:
try:
current_log["ppl"] = math.exp(current_log["loss"])
except OverflowError:
current_log["ppl"] = math.inf
# Checks whether last log comes from evaluation step
if "eval_loss" in current_log:
try:
current_log["eval_ppl"] = math.exp(current_log["eval_loss"])
except OverflowError:
current_log["eval_ppl"] = math.inf
def on_evaluate(
self,
args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
metrics: Optional[Dict[str, float]] = None,
**kwargs
) -> None:
"""Add perplexity metrics to the evaluation metrics.
Args:
args: The training arguments.
state: The trainer state.
control: The trainer control.
metrics: The evaluation metrics.
"""
# Checks whether metrics have validation loss
if "eval_loss" in metrics:
try:
metrics["eval_ppl"] = math.exp(metrics["eval_loss"])
except OverflowError:
metrics["eval_ppl"] = math.inf
# Checks whether metrics have testing loss
if "test_loss" in metrics:
try:
metrics["test_ppl"] = math.exp(metrics["test_loss"])
except OverflowError:
metrics["test_ppl"] = math.inf
|
archai/archai/trainers/nlp/hf_callbacks.py/0
|
{
"file_path": "archai/archai/trainers/nlp/hf_callbacks.py",
"repo_id": "archai",
"token_count": 2021
}
| 329 |
__include__: 'darts.yaml' # defaults are loaded from this file
common:
#yaml_log: False
apex:
ray:
enabled: True # initialize ray. Note: ray cannot be used if apex distributed is enabled
local_mode: False # if True then ray runs in serial mode
nas:
eval:
final_desc_foldername: '$expdir/model_desc_gallery' #
model_desc:
n_reductions: 2 # number of reductions to be applied
n_cells: 20 # number of max cells, for pareto frontier, we use cell_count_scale to multiply cells and limit by n_cells
aux_weight: 0.4 # weight for loss from auxiliary towers in test time arch
num_edges_to_sample: 2 # number of edges each node will take inputs from
model_stems:
init_node_ch: 36 # num of input/output channels for nodes in 1st cell
cell:
n_nodes: 5 # number of nodes in a cell if template desc is not provided
cell_post_op: 'proj_channels'
petridish:
cell_count_scale: 1.0 # for eval first multiply number of cells used in search by this factor, limit to n_cells
trainer:
epochs: 600
search:
final_desc_foldername: '$expdir/model_desc_gallery' # the gallery of models that eval will train from scratch
petridish:
convex_hull_eps: 0.025 # tolerance
max_madd: 200000000 # if any parent model reaches this many multiply-additions then the search is terminated or it reaches maximum number of parent pool size
max_hull_points: 100 # if the pool of parent models reaches this size then search is terminated or if it reaches max multiply-adds
checkpoints_foldername: '$expdir/petridish_search_checkpoints'
search_iters: 4
pareto:
max_cells: 8
max_reductions: 3
max_nodes: 3
enabled: True # if false then there will only be one seed model. if true a number of seed models with different number of cells, reductions and nodes will be used to initialize the search. this provides more coverage of the frontier.
model_desc:
n_cells: 3
n_reductions: 1
num_edges_to_sample: 2 # number of edges each node will take inputs from
cell:
n_nodes: 1
cell_post_op: 'proj_channels'
seed_train:
trainer:
epochs: 80 # number of epochs model will be trained before search
loader:
train_batch: 128
post_train:
trainer:
epochs: 80 # number of epochs model will be trained after search
loader:
train_batch: 96
trainer:
l1_alphas: 0.001 # as per paper
epochs: 80 # number of epochs model will be trained during search
loader:
train_batch: 96
val_ratio: 0.2 #split portion for test set, 0 to 1
|
archai/confs/algos/petridish.yaml/0
|
{
"file_path": "archai/confs/algos/petridish.yaml",
"repo_id": "archai",
"token_count": 967
}
| 330 |
# Using Docker to Run Archai
This folder contains tools for creating development and production environments that are secure and isolated from the host system, including Docker and gVisor.
## Docker
The Dockerfile can be used to build a development environment for running experiments. The `build_image.sh` and `run_container.sh` scripts can be used to build the Docker image and run the container, respectively:
```bash
bash build_image.sh
bash run_container.sh
```
## Docker and gVisor for Enhanced Security
[gVisor](https://gvisor.dev) is a runtime that provides an additional layer of security for containers by intercepting and monitoring runtime instructions before they reach the host system. Its primary goal is to enable the execution of untrusted workloads without compromising the security of other workloads or the underlying infrastructure.
To install the latest release of gVisor and use it as a Docker runtime:
Download and install gVisor:
```bash
(
set -e
ARCH=$(uname -m)
URL=https://storage.googleapis.com/gvisor/releases/release/latest/${ARCH}
wget ${URL}/runsc ${URL}/runsc.sha512 ${URL}/containerd-shim-runsc-v1 ${URL}/containerd-shim-runsc-v1.sha512
sha512sum -c runsc.sha512 -c containerd-shim-runsc-v1.sha512
rm -f *.sha512
chmod a+rx runsc containerd-shim-runsc-v1
sudo mv runsc containerd-shim-runsc-v1 /usr/local/bin
)
```
Set gVisor as the Docker runtime:
```bash
sudo /usr/local/bin/runsc install
sudo systemctl restart docker
```
To run the container with Docker and gVisor:
```bash
bash run_container_with_gvisor.sh
```
|
archai/docker/README.md/0
|
{
"file_path": "archai/docker/README.md",
"repo_id": "archai",
"token_count": 475
}
| 331 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
from pathlib import Path
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Generates new tokens with a pre-trained model.")
parser.add_argument("pre_trained_model_path", type=str, help="Path to the pre-trained model file.")
parser.add_argument("hub_tokenizer_path", type=str, help="Path to the Hugging Face's Hub tokenizer.")
parser.add_argument("prompt", type=str, help="Prompt to serve as the generation's context.")
parser.add_argument("--output_path",
type=str,
help="Path to a file where the generated text will be saved. If not specified, it will be printed to the console.")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
full_path = max(Path(args.pre_trained_model_path).glob("checkpoint-*"), key=lambda x: int(x.stem.split("-")[-1]))
model = AutoModelForCausalLM.from_pretrained(full_path).to(device)
model.config.use_cache = True
tokenizer = AutoTokenizer.from_pretrained(args.hub_tokenizer_path)
inputs = tokenizer(args.prompt, return_tensors="pt").to(device)
outputs = model.generate(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
pad_token_id=model.config.eos_token_id,
do_sample=True,
temperature=0.8,
top_p=0.95,
max_new_tokens=128,
)
if args.output_path:
with open(args.output_path, "x") as f:
f.write(tokenizer.decode(outputs[0], skip_special_tokens=True))
else:
print(f"Generated: \n{tokenizer.decode(outputs[0], skip_special_tokens=True)}")
|
archai/docs/advanced_guide/cloud/azure/notebooks/text_generation/src/generate_text.py/0
|
{
"file_path": "archai/docs/advanced_guide/cloud/azure/notebooks/text_generation/src/generate_text.py",
"repo_id": "archai",
"token_count": 746
}
| 332 |
<jupyter_start><jupyter_text>Transformer++ Search Space ```{warning}This is an experimental feature and could change at any time``` This notebook shows how to use Archai's Tranformer++ search space for Language Modelling. This search space consists in 8 different token-mixing primitives that can be used to create a wide variety of architectures. The Transformer++ model functions like a regular decoder-only Transformer architecture, comprising of an embedding layer, followed by a sequence $L$ decoder layers and a final language model head.The Transformer++ search space supports using one or more primitives on decoder layers by sharding the embedding dimension across multiple primitives: List of Available Primitives | Primitive | Extra params | Custom CUDA Kernel | Reference ||-------------------------- |-------------------------------------------- |-------------------- |----------- || Multihead Self-Attention | | 🗸 | [Link](https://arxiv.org/abs/1706.03762) || SGConv | `kernel_size` | 🗸 | [Link](https://openreview.net/forum?id=TGJSPbRpJX-) || SGConv3 | `kernel_size` | 🗸 | || Local Attention | `window_size` | | [Link](https://arxiv.org/abs/2004.05150v2) || LSH Attention | `bucket_size`, `num_buckets`, `num_hashes` | | [Link](https://arxiv.org/abs/2001.04451) || Separable Conv1D | `kernel_size` | | | Examples Sampling architectures<jupyter_code>from archai.discrete_search.search_spaces.nlp import TfppSearchSpace
from transformers import GPT2Tokenizer
ss = TfppSearchSpace(
backbone='codegen', embed_dims=[768, 768*2], inner_dims=[768*4, 1024*4], total_heads=[12],
total_layers=range(6), op_subset=['mha', 'sgconv', 'local_attn'],
local_attn_window_sizes=[256, 512], sgconv_kernel_sizes=[128, 256],
mixed_ops=False, # Only one primitive per layer
homogeneous=False,
seed=42,
# Huggingface kwargs
n_positions=8192, # Maximum Seq len
vocab_size=50257
)
m = ss.random_sample()
m.arch<jupyter_output><empty_output><jupyter_text>Model forward pass<jupyter_code>from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('gpt2')
tokenizer.add_special_tokens({'pad_token': '<|endoftext|>'})
x = tokenizer(['Just testing', 'something'], return_tensors='pt', padding=True, truncation=True)
m.arch(**x)<jupyter_output><empty_output><jupyter_text>Use with custom CUDA Kernels Some primitives have custom CUDA kernels that can be used depending on the hardware available. For more information on installation instructions, see [flash_attention](https://github.com/HazyResearch/flash-attention) and [H3](https://github.com/HazyResearch/H3/tree/main) repos by HazyResearch.To install archai with flash-attention kernel dependencies, use```shellpython3 -m pip install archai[flash-attn]``` Available CUDA Kernels* FusedDense (for linear projections)* FusedMLP* FlashAttention (used in MHA)* FlashRotaryEmb (used in MHA)* FastFFTConv (used in SGconv and SGconv3)<jupyter_code>ss = TfppSearchSpace(
backbone='codegen', embed_dims=[768, 768*2], inner_dims=[768*4, 1024*4], total_heads=[12],
total_layers=range(1, 6), op_subset=['mha', 'sgconv', 'local_attn'],
local_attn_window_sizes=[256, 512], sgconv_kernel_sizes=[128, 256],
mixed_ops=False, # Only one primitive per layer
homogeneous=False,
seed=42,
# Extra kwargs
n_positions=8192, # Maximum Seq len
vocab_size=50257,
# CUDA kernel flags
fused_mlp=True,
fused_dense=True,
fast_fftconv=True,
flash_attn=True,
flash_rotary_emb=True
)
#NBVAL_SKIP
m = ss.random_sample()<jupyter_output><empty_output>
|
archai/docs/getting_started/notebooks/nlp/tfpp_ss.ipynb/0
|
{
"file_path": "archai/docs/getting_started/notebooks/nlp/tfpp_ss.ipynb",
"repo_id": "archai",
"token_count": 1701
}
| 333 |
API
===
Archai Model
------------
.. automodule:: archai.discrete_search.api.archai_model
:members:
:undoc-members:
Model Evaluator
---------------
.. automodule:: archai.discrete_search.api.model_evaluator
:members:
:undoc-members:
Predictor
---------
.. automodule:: archai.discrete_search.api.predictor
:members:
:undoc-members:
Search Objectives
-----------------
.. automodule:: archai.discrete_search.api.search_objectives
:members:
:undoc-members:
Search Results
--------------
.. automodule:: archai.discrete_search.api.search_results
:members:
:undoc-members:
Search Space
------------
.. automodule:: archai.discrete_search.api.search_space
:members:
:undoc-members:
Searcher
--------
.. automodule:: archai.discrete_search.api.searcher
:members:
:undoc-members:
|
archai/docs/reference/api/archai.discrete_search.api.rst/0
|
{
"file_path": "archai/docs/reference/api/archai.discrete_search.api.rst",
"repo_id": "archai",
"token_count": 301
}
| 334 |
Configuration Utilities
=======================
ONNX Configuration (Base)
-------------------------
.. automodule:: archai.onnx.config_utils.onnx_config_base
:members:
:undoc-members:
CodeGen ONNX Configuration
--------------------------
.. automodule:: archai.onnx.config_utils.codegen_onnx_config
:members:
:undoc-members:
GPT-2 ONNX Configuration
------------------------
.. automodule:: archai.onnx.config_utils.gpt2_onnx_config
:members:
:undoc-members:
|
archai/docs/reference/api/archai.onnx.config_utils.rst/0
|
{
"file_path": "archai/docs/reference/api/archai.onnx.config_utils.rst",
"repo_id": "archai",
"token_count": 166
}
| 335 |
Datasets
========
.. toctree::
:maxdepth: 2
archai.supergraph.datasets.providers
Augmentation Policies
---------------------
.. automodule:: archai.supergraph.datasets.aug_policies
:members:
:undoc-members:
Augmentations
-------------
.. automodule:: archai.supergraph.datasets.augmentation
:members:
:undoc-members:
Data
----
.. automodule:: archai.supergraph.datasets.data
:members:
:undoc-members:
Dataset Provider
----------------
.. automodule:: archai.supergraph.datasets.dataset_provider
:members:
:undoc-members:
Distributed Stratified Sampler
------------------------------
.. automodule:: archai.supergraph.datasets.distributed_stratified_sampler
:members:
:undoc-members:
Limit Dataset
-------------
.. automodule:: archai.supergraph.datasets.limit_dataset
:members:
:undoc-members:
Meta Dataset
------------
.. automodule:: archai.supergraph.datasets.meta_dataset
:members:
:undoc-members:
|
archai/docs/reference/api/archai.supergraph.datasets.rst/0
|
{
"file_path": "archai/docs/reference/api/archai.supergraph.datasets.rst",
"repo_id": "archai",
"token_count": 349
}
| 336 |
# Evaluating Models on HumanEval
This guide will provide step-by-step instructions to install the required dependencies and evaluate pre-trained models on HumanEval.
## Installing Dependencies
To begin, please install the required dependencies by running the following command:
```bash
pip install -r requirements.txt
```
## Evaluating a DeepSpeed Model
If you are evaluating a pre-trained DeepSpeed model, the process is different from evaluating with Hugging Face. Unlike Hugging Face, DeepSpeed does not have the benefit of `model.from_pretrained()`. Therefore, we need to load the DeepSpeed checkpoint, gather the model's state, and apply it to the model instance. To evaluate a pre-trained DeepSpeed model, run the following command:
```bash
python deepspeed/evaluate_human_eval.py --help
```
## Evaluating a Hugging Face Model
If you are evaluating a pre-trained Hugging Face model, the only requirement is that the checkpoint folder follows the Hugging Face format, which includes a folder named `checkpoint-step_number` and is composed of `config.json` and `*.pt` files. To evaluate a pre-trained Hugging Face model, run the following command:
```bash
python hf/evaluate_human_eval.py --help
```
*The `--help` argument will prompt the helper and provide a description of each argument.*
|
archai/scripts/eval/README.md/0
|
{
"file_path": "archai/scripts/eval/README.md",
"repo_id": "archai",
"token_count": 323
}
| 337 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import itertools
import pathlib
from concurrent.futures import ThreadPoolExecutor
import subprocess
import sys
from threading import Lock
import numpy as np
from PIL import Image
try:
from runstats import Statistics
except:
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'runstats'])
from runstats import Statistics
class ImageStats:
def __init__(self) -> None:
self.dims = set()
self.counts = {}
self.sizes = Statistics()
self.suffixes = set()
self.lock = Lock()
def push(self, filepath: pathlib.Path) -> None:
with Image.open(str(filepath)) as img:
shape = np.array(img).shape
filesize = filepath.stat().st_size
with self.lock:
self.dims.add(shape)
parent = str(filepath.parent)
self.counts[parent] = self.counts.get(parent, 0) + 1
self.sizes.push(filesize)
self.suffixes.add(filepath.suffix)
if __name__ == "__main__":
path = r"D:\datasets\ImageNet"
stats = ImageStats()
executor = ThreadPoolExecutor(max_workers=32)
for p in itertools.chain(pathlib.Path(path).rglob("*.jp*g"), pathlib.Path(path).rglob("*.png")):
executor.submit(stats.push, p)
print(stats.sizes.mean())
print(stats.sizes.stddev())
print(stats.suffixes)
print(stats.counts)
print(stats.dims)
exit(0)
|
archai/scripts/supergraph/download_datasets/img_stats.py/0
|
{
"file_path": "archai/scripts/supergraph/download_datasets/img_stats.py",
"repo_id": "archai",
"token_count": 622
}
| 338 |
"""Assess the changes in rank due to change in LR"""
import argparse
import os
import pathlib
import statistics
from ast import literal_eval
import scipy
from archai.common import delimited_text, utils
def main():
default_dir = r"D:\GitHubSrc\archaiphilly\phillytools\nasbench_darts_lr0.025_wd3_b128"
parser = argparse.ArgumentParser(description="Pytorch cifar training")
parser.add_argument("--in-dir", default=default_dir)
parser.add_argument("--out-dir", default=default_dir)
args = parser.parse_args()
parsed_metrics = delimited_text.DelimitedText()
in_dir = pathlib.Path(utils.full_path(args.in_dir))
assert in_dir.exists(), f"Does not exist: {in_dir}"
metrics_filepaths = in_dir.rglob("metrics*.tsv")
for metrics_filepath in metrics_filepaths:
text = metrics_filepath.read_text()
parsed_metrics.add_from_text(text, has_header=True)
assert len(parsed_metrics) >= 1
model_nums = [int(r) for r in parsed_metrics.get_col("model_name")]
nasbench_acc = [statistics.mean(literal_eval(r)) for r in parsed_metrics.get_col("nasbenc101_test_acc")]
retrain_acc = [float(r) for r in parsed_metrics.get_col("test_acc")]
stats = list(zip(model_nums, nasbench_acc, retrain_acc))
stats.sort(key=lambda t: t[0])
retrain_ranks = utils.get_ranks(stats, key=lambda t: t[2])
stats = list((i, rr, *t) for i, (t, rr) in enumerate(zip(stats, retrain_ranks)))
corr = scipy.stats.pearsonr([t[0] for t in stats], [t[1] for t in stats])
out_metrics = delimited_text.DelimitedText()
out_metrics.add_from_cols_list(
stats, header=["nasbench_rank", "rerank", "model_num", "nasbench_acc", "retrain_acc"]
)
rerank_filepath = os.path.join(utils.full_path(args.out_dir), "reranking.tsv")
out_metrics.save(rerank_filepath)
corr_filepath = os.path.join(utils.full_path(args.out_dir), "corr.txt")
utils.write_string(corr_filepath, str(corr))
if __name__ == "__main__":
main()
|
archai/scripts/supergraph/nasbench101/rank_change_for_lr.py/0
|
{
"file_path": "archai/scripts/supergraph/nasbench101/rank_change_for_lr.py",
"repo_id": "archai",
"token_count": 811
}
| 339 |
{
"fp16": {
"enabled": true,
"initial_scale_power": 12
},
"optimizer": {
"type": "AdamW",
"params": {
"lr": 1.8e-3,
"betas": [
0.9,
0.95
],
"eps": 1e-7,
"weight_decay": 0.1
}
},
"scheduler": {
"type": "WarmupDecayLR",
"params": {
"warmup_min_lr": 0.0,
"warmup_max_lr": 1.8e-3,
"warmup_type": "linear",
"warmup_num_steps": 50,
"total_num_steps": 1000
}
},
"zero_optimization": {
"stage": 0
},
"gradient_clipping": 1.0,
"steps_per_print": 10,
"train_batch_size": 256,
"train_micro_batch_size_per_gpu": 8,
"wall_clock_breakdown": false,
"zero_allow_untested_optimizer": true
}
|
archai/scripts/trainers/deepspeed/ds_config.json/0
|
{
"file_path": "archai/scripts/trainers/deepspeed/ds_config.json",
"repo_id": "archai",
"token_count": 514
}
| 340 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
import os
import sys
from archai.common.store import ArchaiStore
CONNECTION_NAME = 'MODEL_STORAGE_CONNECTION_STRING'
def delete(con_str):
parser = argparse.ArgumentParser(description='Delete a model from azure using its friendly name')
parser.add_argument('name', help='The friendly name allocated by the upload script.')
parser.add_argument('--file', help='Delete just the one file associated with the friendly name.')
args = parser.parse_args()
storage_account_name, storage_account_key = ArchaiStore.parse_connection_string(con_str)
store = ArchaiStore(storage_account_name, storage_account_key, table_name=experiment_name)
store.delete_blobs(args.name, args.file)
if not args.file:
store.delete_status(args.name)
if __name__ == '__main__':
experiment_name = os.getenv("EXPERIMENT_NAME", "facesynthetics")
con_str = os.getenv(CONNECTION_NAME)
if not con_str:
print(f"Please specify your {CONNECTION_NAME} environment variable.")
sys.exit(1)
delete(con_str, experiment_name)
|
archai/tasks/face_segmentation/aml/azure/delete.py/0
|
{
"file_path": "archai/tasks/face_segmentation/aml/azure/delete.py",
"repo_id": "archai",
"token_count": 387
}
| 341 |
# Readme
This folder contains some handy stuff for setting up an Azure account so you can run the code in the
[Azure](../../azure/readme.md) folder and create a docker image for running SNPE model quantization
jobs on a kubernetes cluster. You can also run this docker image in a Linux container on Windows
using the Docker Desktop for Windows.
First you will need to decide which Azure Subscription to use, install the
[Azure Command Line Interface](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli-windows?tabs=azure-cli)
and run `az account set --subscription id` to make the this subscription your default.
## setup.ps1
This [PowerShell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-linux?view=powershell-7.3)
script creates the azure resources in your chosen subscription needed by the `runner.py` script.
This includes a storage account for storing models and a status table, and a usage table. The script
contains a default `$plan_location` of `westus2`, feel free to change that to whatever you need. It
also creates an azure docker container registry and AKS kubernetes cluster, but using the Kubernetes
cluster for model quantization is optional, you can run the `runner.py` script without AKS.
The setup script requires the following environment variables be set before hand:
- **SNPE_SDK** - points to a local zip file containing SNPE SDK (we have tested version `snpe-2.5.0.4052.zip`)
- **ANDROID_NDK** - points to a local zip file containing the Android NDK zip file (we have tested version `android-ndk-r25c-linux.zip`)
- **INPUT_TESTSET** - points to a local zip file containing 10,000 image test set from your dataset.
The [SNPE Readme](../../snpe/readme.md) shows where to find those zip files.
After running this script you will see further instructions, first a docker command line in case you
want to build the docker image that runs in a kubernetes cluster. Second, you will see a
`set MODEL_STORAGE_CONNECTION_STRING=...` that you can set for your system so that subsequent scripts
talk to the right azure storage account. On linux this would be an export in your `~/.profile`, and
don't forget the double quotes.
```
export MODEL_STORAGE_CONNECTION_STRING="DefaultEndpointsProtocol=...==;EndpointSuffix=core.windows.net"
```
## Dockerfile
This builds a docker image that you can run in a Azure Kubernetes cluster that will do SNPE model
quantization in the cloud. This frees up your Linux box that is managing Qualcomm devices and helps
you increase your Qualcomm device utilization.
The `setup.ps1` script shows what docker commands to run to build the image, how to login to your
azure docker container registry, how to take your image for that container registry and push it
to Azure. So you do not need to use the public docker.org container registry.
You can test your docker image locally by running:
```
docker run -e MODEL_STORAGE_CONNECTION_STRING=$MODEL_STORAGE_CONNECTION_STRING -it <image_id>
```
If you need to debug your docker image interactively you can run this instead:
```
docker run -it <image_id> /bin/bash
```
Then you can poke around the `run.sh` and other things to verify things manually.
## Publish image to your Azure Container Registry
First you need to tag your newly created image with the correct name:
```
docker tag <image_id> snpecontainerregistry001.azurecr.io/quantizer:1.27
```
You can find the correct version in the `quantizer.yaml` file that was updated by `setup.ps1`.
Then you can push this image to your Azure Container Registry named `snpecontainerregistry001`. You
can configure your local docker so it can talk to this Azure Kubernetes cluster (AKS). The Azure
Portal has a connect script under the AKS resource Overview. You will see a `Connect` button
that shows a string like this:
```
az aks get-credentials --resource-group snpe-quantizaton-rg --name snpe-quantizer-aks
```
Run that locally and then you can push docker images to this registry:
```
docker push snpecontainerregistry001.azurecr.io/quantizer:1.27
```
Again, make sure you specify the right version here. The `setup.ps1` script will automatically
increment this version number each time it runs in case you need to push new versions of this image.
## quantizer.yaml
Then you can use `kubectl apply -f quantizer.yaml` to deploy this new image version to your AKS custer.
Notice this yaml configures AKS to scale up to 100 nodes if necessary and the scaling is triggered
when a given node passes 40% CPU utilization. You can tweak these numbers however you like to fit
your budget. But you may be surprised by how cheap AKS is. In a month of quantizing over 8000
models, the Azure cost analysis shows a total cost of around $8. A drop in the bucket compared to
model training costs. The AKS cluster auto-scales, so most of the time it is scaled down to 1 node
and sitting idle, generating very little cost.
This quantizer runs in the `snpe` kubernetes namespace, and you can make this your default namespace
by running:
```
kubectl config set-context --current --namespace=snpe
```
You can run `kubectl get pods` to see what is running in Azure and you should see something like this:
```
NAME READY STATUS RESTARTS AGE
snpe-quantizer-54dcf74c99-kfj8p 0/1 ContainerCreating 0 4s
snpe-quantizer-845c7cfcd8-q8kjh 1/1 Running 0 47h
```
You can watch what these pods are doing by running:
```
kubectl logs snpe-quantizer-54dcf74c99-kfj8p -f
```
And you will see some output like this:
```
Sleeping for 30 seconds...
Using storage account: "nasfacemodels"
snpe-quantizer-d9f4b6c96-jsb7q: model test is running on: clovett-14_e6dc0375
# skip entity test because someone else is working on it
No work found.
Sleeping for 30 seconds...
```
This is good and means the pod is waiting for work to show up in the `status` table in your
Azure storage account. You can now use the [upload.py](../../azure/upload.py) script to upload a
face segmentation ONNX model to do a test run. You can train one of these models using
[train.py](../../../train.py) to train one of good model architectures listed in
[archs/snp_target](../../../archs/snp_target).
## run.sh
This little script is used as the entry point to the Docker image, you will see this in the last
`RUN` command in the Dockerfile. The reason this `run.sh` contains a loop is because the Python
script checks for memory leaks and auto-terminates itself if it sees memory usage climb too high.
This way the quantizer pod can run pretty much forever, or at least until you deploy a new version.
|
archai/tasks/face_segmentation/aml/docker/quantizer/readme.md/0
|
{
"file_path": "archai/tasks/face_segmentation/aml/docker/quantizer/readme.md",
"repo_id": "archai",
"token_count": 1946
}
| 342 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.