text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
import { DefaultPalette, IStackItemStyles, Stack } from "@fluentui/react";
import { OperationStep } from "../../models/operation";
interface ResourceOperationStepsListProps {
header: String,
val?: OperationStep[]
}
export const ResourceOperationStepsList: React.FunctionComponent<ResourceOperationStepsListProps> = (props: ResourceOperationStepsListProps) => {
const stackItemStyles: IStackItemStyles = {
root: {
padding: '5px 0',
color: DefaultPalette.neutralSecondary
}
}
return (
<Stack wrap horizontal>
<Stack.Item styles={stackItemStyles} style={{ width: '20%' }}>
{props.header}
</Stack.Item>
<div style={{ width: '80%' }}>
{props.val?.map((step: OperationStep, i: number) => {
return (
<Stack.Item styles={stackItemStyles} key={i}>
<div >
{i + 1}{')'} {step.stepTitle}
</div>
<div style={{ color: DefaultPalette.neutralTertiary }}>
{step.message}
</div>
</Stack.Item>
)
})}
</div>
</Stack>
);
}
|
AzureTRE/ui/app/src/components/shared/ResourceOperationStepsList.tsx/0
|
{
"file_path": "AzureTRE/ui/app/src/components/shared/ResourceOperationStepsList.tsx",
"repo_id": "AzureTRE",
"token_count": 504
}
| 139 |
import { DefaultButton, MessageBar, MessageBarType, Spinner, SpinnerSize, Stack } from "@fluentui/react";
import { useEffect, useState } from "react";
import { LoadingState } from "../../../models/loadingState";
import { HttpMethod, useAuthApiCall } from "../../../hooks/useAuthApiCall";
import { APIError } from "../../../models/exceptions";
import { ExceptionLayout } from "../ExceptionLayout";
interface SelectTemplateProps {
templatesPath: string,
workspaceApplicationIdURI?: string | undefined,
onSelectTemplate: (templateName: string) => void
}
export const SelectTemplate: React.FunctionComponent<SelectTemplateProps> = (props: SelectTemplateProps) => {
const [templates, setTemplates] = useState<any[] | null>(null);
const [loading, setLoading] = useState(LoadingState.Loading as LoadingState);
const apiCall = useAuthApiCall();
const [apiError, setApiError] = useState({} as APIError);
useEffect(() => {
const getTemplates = async () => {
try {
const templatesResponse = await apiCall(props.templatesPath, HttpMethod.Get, props.workspaceApplicationIdURI);
setTemplates(templatesResponse.templates);
setLoading(LoadingState.Ok);
} catch (err: any){
err.userMessage = 'Error retrieving templates';
setApiError(err);
setLoading(LoadingState.Error);
}
};
// Fetch resource templates only if not already fetched
if (!templates) {
getTemplates();
}
}, [apiCall, props.templatesPath, templates, props.workspaceApplicationIdURI]);
switch (loading) {
case LoadingState.Ok:
return (
templates && templates.length > 0 ? <Stack>
{
templates.map((template: any, i) => {
return (
<div key={i}>
<h2>{template.title}</h2>
<p>{template.description}</p>
<DefaultButton text="Create" onClick={() => props.onSelectTemplate(template.name)}/>
</div>
)
})
}
</Stack> : <MessageBar
messageBarType={MessageBarType.info}
isMultiline={true}
>
<h3>No templates found</h3>
<p>Looks like there aren't any templates registered for this resource type.</p>
</MessageBar>
)
case LoadingState.Error:
return (
<ExceptionLayout e={apiError} />
);
default:
return (
<div style={{ marginTop: 20 }}>
<Spinner label="Loading templates" ariaLive="assertive" labelPosition="top" size={SpinnerSize.large} />
</div>
)
}
}
|
AzureTRE/ui/app/src/components/shared/create-update-resource/SelectTemplate.tsx/0
|
{
"file_path": "AzureTRE/ui/app/src/components/shared/create-update-resource/SelectTemplate.tsx",
"repo_id": "AzureTRE",
"token_count": 1404
}
| 140 |
import React from "react";
export const AppRolesContext = React.createContext({
roles: [] as Array<string>,
setAppRoles: (roles: Array<string>) => { }
});
|
AzureTRE/ui/app/src/contexts/AppRolesContext.ts/0
|
{
"file_path": "AzureTRE/ui/app/src/contexts/AppRolesContext.ts",
"repo_id": "AzureTRE",
"token_count": 51
}
| 141 |
import { Resource } from "./resource";
import { Workspace } from "./workspace";
import { WorkspaceService } from "./workspaceService";
export enum ResourceType {
Workspace = "workspace",
WorkspaceService = "workspace-service",
UserResource = "user-resource",
SharedService = "shared-service"
}
export interface CreateFormResource {
resourceType: ResourceType,
resourceParent?: Workspace | WorkspaceService,
updateResource?: Resource,
onAdd?: (r: Resource) => void,
workspaceApplicationIdURI?: string
}
|
AzureTRE/ui/app/src/models/resourceType.ts/0
|
{
"file_path": "AzureTRE/ui/app/src/models/resourceType.ts",
"repo_id": "AzureTRE",
"token_count": 152
}
| 142 |
<!-- BioC.dtd -->
<!--
BioC is designed to allow programs that process text and
annotations on that text to easily share data and work
together. This DTD describes how that data is represented in XML
files.
Some believe XML is easily read by humans and that should be
supported by clearly formatting the elements. In the long run,
this is distracting. While the only meaningful spaces are in text
elements and the other spaces can be ignored, current tools add no
additional space. Formatters and editors may be used to make the
XML file appear more readable.
The possible variety of annotations that one might want to produce
or use is nearly countless. There is no guarantee that these are
organized in the nice nested structure required for XML
elements. Even if they were, it would be nice to easily ignore
unwanted annotations. So annotations are recorded in a stand off
manner, external to the annotated text. The exceptions are
passages and sentences because of their fundamental place in text.
The text is expected to be encoded in Unicode, specifically
UTF-8. This is one of the encodings required to be implemented by
XML tools, is portable between big-endian and little-endian
machines and is a superset of 7-bit ASCII. Code points beyond 127
may be expressed directly in UTF-8 or indirectly using numeric
entities. Since many tools today still only directly process
ASCII characters, conversion should be available and
standardized. Offsets should be in 8 bit code units (bytes) for
easier processing by naive programs.
collection: Group of documents, usually from a larger corpus. If
a group of documents is from several corpora, use several
collections.
source: Name of the source corpus from which the documents were selected
date: Date documents extracted from original source. Can be as
simple as yyyymmdd or an ISO timestamp.
key: Separate file describing the infons used and any other useful
information about the data in the file. For example, if a file
includes part-of-speech tags, this file should describe the set of
part-of-speech tags used.
infon: key-value pairs. Can record essentially arbitrary
information. "type" will be a particular common key in the major
sub elements below. For PubMed references, passage "type" might
signal "title" or "abstract". For annotations, it might indicate
"noun phrase", "gene", or "disease". In the programming language
data structures, infons are typically represented as a map from a
string to a string. This means keys should be unique within each
parent element.
document: A document in the collection. A single, complete
stand-alone document as described by its parent source.
id: Typically, the id of the document in the parent
source. Should at least be unique in the collection.
passage: One portion of the document. In the sample collection of
PubMed documents, each document has a title and frequently an
abstract. Structured abstracts could have additional passages. For
a full text document, passages could be sections such as
Introduction, Materials and Methods, or Conclusion. Another option
would be paragraphs. Passages impose a linear structure on the
document. Further structure in the document can be described by
infon values.
offset: Where the passage occurs in the parent document. Depending
on the source corpus, this might be a very relevant number. They
should be sequential and identify a passage's position in the
document. Since the sample PubMed collection is extracted from an
XML file, literal offsets have little value. The title is given an
offset of zero, while the abstract is assumed to begin after the
title and one space.
text: The original text of the passage.
sentence: One sentence of the passage.
offset: A document offset to where the sentence begins in the
passage. This value is the sum of the passage offset and the local
offset within the passage.
text: The original text of the sentence.
annotation: Stand-off annotation
id: Used to refer to this annotation in relations. Should be
unique at whatever level relations at appear. If relations appear
at the sentence level, annotation ids need to be unique within
each sentence. Similarly, if relations appear at the passage
level, annotation ids need to be unique within each passage.
location: Location of the annotated text. Multiple locations
indicate a multi-span annotation.
offset: Document offset to where the annotated text begins in
the passage or sentence. The value is the sum of the passage or
sentence offset and the local offset within the passage or
sentence.
length: Length of the annotated text. While unlikely, this could
be zero to describe an annotation that belongs between two
characters.
text: Typically the annotated text.
relation: Relation between multiple annotations and / or other
relations. Relations are allowed to appear at several levels
(document, passage, and sentence). Typically they will all appear
at one level, the level at which they are determined.
Significantly different types of relations might appear at
different levels.
id: Used to refer to this relation in other relations. This id
needs to be unique at whatever level relations appear. (See
discussion of annotation ids.)
refid: Id of an annotation or an other relation.
role: Describes how the referenced annotattion or other relation
participates in the current relation. Has a default value so it
can be left out if there is no meaningful value.
-->
<!ELEMENT collection ( source, date, key, infon*, document+ ) >
<!ELEMENT source (#PCDATA)>
<!ELEMENT date (#PCDATA)>
<!ELEMENT key (#PCDATA)>
<!ELEMENT infon (#PCDATA)>
<!ATTLIST infon key CDATA #REQUIRED >
<!ELEMENT document ( id, infon*, passage+, relation* ) >
<!ELEMENT id (#PCDATA)>
<!ELEMENT passage ( infon*, offset, ( ( text?, annotation* ) | sentence* ), relation* ) >
<!ELEMENT offset (#PCDATA)>
<!ELEMENT text (#PCDATA)>
<!ELEMENT sentence ( infon*, offset, text?, annotation*, relation* ) >
<!ELEMENT annotation ( infon*, location*, text ) >
<!ATTLIST annotation id CDATA #IMPLIED >
<!ELEMENT location EMPTY>
<!ATTLIST location offset CDATA #REQUIRED >
<!ATTLIST location length CDATA #REQUIRED >
<!ELEMENT relation ( infon*, node* ) >
<!ATTLIST relation id CDATA #IMPLIED >
<!ELEMENT node EMPTY>
<!ATTLIST node refid CDATA #REQUIRED >
<!ATTLIST node role CDATA "" >
|
BioGPT/data/BC5CDR/raw/BC5CDR_Evaluation-0.0.3/BioC.dtd/0
|
{
"file_path": "BioGPT/data/BC5CDR/raw/BC5CDR_Evaluation-0.0.3/BioC.dtd",
"repo_id": "BioGPT",
"token_count": 1830
}
| 143 |
{
"chemical2id": {
"famotidine": "D015738",
"indomethacin": "D007213",
"sodium": "D012964",
"idm": "D007213",
"prostaglandin": "D011453",
"angiotensin": "D000809",
"tacrolimus": "D016559",
"prednisolone": "D011239",
"corticosteroid": "D000305",
"cyclosporine": "D016572",
"corticosteroids": "D000305",
"methamphetamine": "D008694",
"levodopa": "D007980",
"cyclophosphamide": "D003520",
"cyp": "D003520",
"suramin": "D013498",
"gr 82334": "C079014",
"clopidogrel": "C055162",
"bortezomib": "C400082",
"dexamethasone": "D003907",
"bort": "C400082",
"dex": "D003907",
"bisphenol a": "C006780",
"bpa": "C006780",
"isoproterenol": "D007545",
"lactate": "D019344",
"creatine": "D003401",
"alanine": "D000409",
"aspartate": "D001224",
"cholesterol": "D002784",
"triglycerides": "D014280",
"fatty acid": "D005227",
"malondialdehyde": "D008315",
"mda": "D008315",
"nitric oxide": "D009569",
"glutathione": "D005978",
"superoxide": "D013481",
"captopril": "D002216",
"lenalidomide": "C467567",
"thalidomide": "D013792",
"ifosfamide": "D007069",
"mesna": "D015080",
"ifo": "D007069",
"nitrogen": "D009584",
"sodium 2-sulfanylethanesulfonate": "D015080",
"everolimus": "C107135",
"rapamycin": "D020123",
"tenofovir disoproxil fumarate": "C418563",
"tenofovir": "C096918",
"tdf": "C418563",
"thiopentone": "D013874",
"propofol": "D015742",
"meth": "D008694",
"dopamine": "D004298",
"dopac": "D015102",
"hva": "D006719",
"tyrosine": "D014443",
"serotonin": "D012701",
"linezolid": "C098010",
"ethambutol": "D004977",
"epinephrine": "D004837",
"levobupivacaine": "C476513",
"heparin": "D006493",
"zolmitriptan": "C089750",
"3,4-methylenedioxymethamphetamine": "D018817",
"mdma": "D018817",
"ecstasy": "D018817",
"alcohol": "D000431",
"nicotine": "D009538",
"cannabis": "D002188",
"glutamate": "D018698",
"crocin": "C029036",
"diazinon": "D003976",
"organophosphorus": "D010755",
"triglyceride": "D014280",
"gem": "C056507",
"gemcitabine": "C056507",
"cisplatin": "D002945",
"methylprednisolone": "D008775",
"cortisol": "D006854",
"contrast": "D003287",
"creatinine": "D003404",
"cr": "D002857",
"bevacizumab": "-1",
"irinotecan": "C051890",
"desvenlafaxine": "C086816",
"mirtazapine": "C035133",
"doxorubicin": "D004317",
"dox": "D004317",
"bilirubin": "D001663",
"tac": "D016559",
"flavonoid": "D005419",
"apigenin": "D047310",
"scopolamine": "D012601",
"cholecystokinin-octapeptide": "D012844",
"morphine": "D009020",
"cck-8": "D012844",
"streptozotocin": "D013311",
"stz": "D013311",
"nitrite": "D009573",
"ca": "D002118",
"memantine": "D008559",
"ibuprofen": "D007052",
"sodium chloride": "D012965",
"diltiazem": "D004110",
"sodium bicarbonate": "D017693",
"calcium": "D002118",
"dextrose": "D005947",
"blood urea nitrogen": "D001806",
"bun": "D001806",
"methotrexate": "D008727",
"mtx": "D008727",
"tranexamic acid": "D014148",
"tna": "D014148",
"bupivacaine": "D002045",
"ketamine": "D007649",
"fluvastatin": "C065180",
"statins": "D019821",
"fluconazole": "D015725",
"epirubicine": "D015251",
"epirubicin": "D015251",
"anthracycline": "D018943",
"etomidate": "D005045",
"fentanyl": "D005283",
"midazolam": "D008874",
"phosphorus": "D010758",
"dexmedetomidine": "D020927",
"quetiapine": "C069541",
"zuclopenthixol": "D003006",
"lithium": "D008094",
"iron": "D007501",
"hydrogen peroxide": "D006861",
"h2o2": "D006861",
"valproate": "D014635",
"n-methyl-d-aspartate": "D016202",
"nmda": "D016202",
"pethidine": "D008614",
"norpethidine": "C002752",
"ketoconazole": "D007654",
"pilsicainide": "C042288",
"all-trans retinoic acid": "D014212",
"atra": "D014212",
"lomustine": "D008130",
"ccnu": "D008130",
"ctx": "D003520",
"nelarabine": "C104457",
"cytarabine": "D003561",
"gadolinium": "D005682",
"capsaicin": "D002211",
"sodium 4-phenylbutyrate": "C075773",
"ginsenosides": "D036145",
"re ginsenosides": "C049864",
"rg1 ginsenosides": "C035054",
"rb1 ginsenosides": "C442759",
"re": "C049864",
"rg1": "C035054",
"rb1": "C442759",
"acetic acid": "D019342",
"ginsenoside re": "C049864",
"hydroxytyrosol": "C005975",
"amiodarone": "D000638",
"thyroxine": "D013974",
"steroids": "D013256",
"levothyroxine": "D013974",
"iodine": "D007455",
"argatroban": "C031942",
"warfarin": "D014859",
"dehydroepiandrosterone": "D003687",
"amphetamine": "D000661",
"dhea": "D003687",
"apomorphine": "D001058",
"haloperidol": "D006220",
"k": "D011188",
"paroxetine": "D017374",
"terfenadine": "D016593",
"citalopram": "D015283",
"s-53482": "C106487",
"s-23121": "C083440",
"mannitol": "D008353",
"metformin": "D008687",
"pentylenetetrazole": "D010433",
"ptz": "D010433",
"metronidazole": "D008795",
"aconitine": "D000157",
"na": "D012964",
"dutp": "-1",
"biotin": "D001710",
"amp": "D000667",
"tetrabenazine": "D013747",
"tiapride": "D063325",
"neuroleptic drugs": "D014150",
"metoprolol": "D008790",
"terbinafine": "C041359",
"bisoprolol": "D017298",
"testosterone": "D013739",
"androgen": "D000728",
"flutamide": "D005485",
"estrogen": "D004967",
"tamoxifen": "D013629",
"letrozole": "C067431",
"garcinielliptone fc": "C573355",
"pilocarpine": "D010862",
"gfc": "C573355",
"amino acid": "D000596",
"r-aminobutyric acid": "D005680",
"gaba": "D005680",
"glutamine": "D018698",
"vancomycin": "D014640",
"gentamicin": "D005839",
"tobramycin": "D014031",
"telaprevir": "C486464",
"simvastatin": "D019821",
"ribavirin": "D012254",
"pegylated interferon": "C417083",
"statin": "D019821",
"sirolimus": "D020123",
"cyclosporin a": "D016572",
"csa": "D016572",
"srl": "D020123",
"hematoxylin": "D006416",
"eosin": "D004801",
"urea": "D014508",
"fluocinolone acetonide": "D005446",
"rocuronium": "C061870",
"succinylcholine": "D013390",
"camp": "D000242",
"potassium": "D011188",
"amlodipine": "D017311",
"clarithromycin": "D017291",
"suxamethonium": "D013390",
"mivacurium": "C049430",
"butyrylthiocholine": "D002092",
"btc": "D002092",
"benzoylcholine": "D001588",
"dibucaine": "D003992",
"fluoride": "D005459",
"hydrogen": "D006859",
"artesunate": "C039726",
"betaine": "D001622",
"clozapine": "D003024",
"steroid": "D013256",
"curcumin": "D003474",
"maleate": "C030272",
"oxygen": "D010100",
"glucose": "D005947",
"adp": "D000244",
"malate": "C030298",
"organophosphate": "D010755",
"ozone": "D010126",
"particulate matter": "D052638",
"caffeine": "D002110",
"angiotensin ii": "D000804",
"1,3-butadiene": "C031763",
"metolachlor": "C051786",
"metolachor": "C051786",
"arsenic": "D001151",
"inorganic arsenic": "D001152",
"ias": "D001152",
"inorganic as": "D001152",
"piperazine": "C034930",
"trans-2-amino-3-hydroxy-1, 2, 3, 4-tetrahydroanaphthalene": "-1",
"n-(trans-3-hydroxy-1,2,3,4-tetrahydro-2-naphthyl)-n-(3-oxo-3-phenyl-2-methylpropyl)-piperazine hydrochloride": "C013741",
"p11": "C013741",
"bacl2": "C024986",
"chloroform": "D002725",
"adrenaline": "D004837",
"strophantine g": "D010042",
"vitamin e": "D014810",
"dianabol": "D008696",
"ciba": "-1",
"isradipine": "D017275",
"salbutamol": "D000420",
"ipratropium bromide": "D009241",
"glu": "D005947",
"3-methoxy-4-hydroxyphenethyleneglycol": "D008734",
"mhpg": "D008734",
"adenosine": "D000241",
"cobalt": "D003035",
"catecholamines": "D002395",
"norepinephrine": "D009638",
"alkylxanthines": "-1",
"carbetocin": "C020731",
"oxytocin": "D010121",
"dobutamine": "D004280",
"cocaine": "D003042",
"atropine": "D001285",
"digoxin": "D004077",
"melphalan": "D008558",
"crack cocaine": "D016578",
"levetiracetam": "C026098",
"phenobarbital": "D010634",
"heroin": "D003932",
"ethanol": "D000431",
"verapamil": "D014700",
"risperidone": "D018967",
"clonidine": "D003000",
"naphazoline": "D009278",
"xylometazoline": "C009695",
"codeine": "D003061",
"pentazocine": "D010423",
"codine": "D003061",
"noradrenaline": "D009638",
"monoamines": "D015306",
"propranolol": "D011433",
"oestrogen": "D004967",
"progestagen": "D011372",
"norethisterone": "D009640",
"diphenylhydantoin": "D010672",
"carbenicillin": "D002228",
"aminocaproic acid": "D000614",
"epsilon aminocaproic acid": "D015119",
"eaca": "D015119",
"propanolol": "D011433",
"diazepam": "D003975",
"fenoterol-hydrobromide": "D005280",
"ritodrin-hcl": "D012312",
"methylprednisolone acetate": "C000873",
"lidocaine": "D008012",
"penicillin": "D010406",
"cephalothin": "D002512",
"ampicillin": "D000667",
"l-dopa": "D007980",
"aldosterone": "D000450",
"phenytoin": "D010672",
"dph": "D010672",
"d-glucarates": "D005937",
"aminoglycoside": "D000617",
"2,5-di-o-acetyl-d-glucaro-1,4-6,3-dilactone": "C038936",
"kanamycin": "D007612",
"saccharic acid": "D005937",
"hexauronic acids": "D006603",
"hexaaldonic acids": "-1",
"sugar alcohols": "D013402",
"tca": "D002952",
"d-glucarate": "D005937",
"monosaccharides": "D009005",
"methothexate": "D008727",
"carbachol": "D002217",
"guanethidine": "D006145",
"hexamethonium": "D018738",
"phentolamine": "D010646",
"desmethylimipramine": "D003891",
"chlorpromazine": "D002746",
"amino": "D015119",
"caproate": "C037652",
"norleucine": "-1",
"norvaline": "-1",
"epsilon-aminocaproate": "-1",
"methionine": "D008715",
"leucine": "-1",
"tetracycline": "D013752",
"oleic acid": "D019301",
"vincristine": "D014750",
"progesterone": "D011374",
"pentobarbital": "D010424",
"hcl": "D006851",
"estradiol": "D004958",
"desferrioxamine": "D003676",
"dfx": "D003676",
"neuroleptic": "D014150",
"bromocriptine": "D001971",
"neuroleptic medications": "D014150",
"ethacrynic acid": "D004976",
"5-hydroxytryptamine": "D012701",
"5-ht": "D012701",
"gamma-aminobutyric acid": "D005680",
"acetylcholine": "D000109",
"aminophosphonovaleric acid": "-1",
"beta-carboline": "D002243",
"abecarnil": "C062769",
"isopropyl-6- benzyloxy-4-methoxymethyl-beta-carboline-3-carboxylate": "C062769",
"abecarrnil": "C062769",
"benzodiazepine": "D001569",
"muscimol": "D009118",
"t-[35s]butylbicyclophosphorothionate": "C037476",
"[35s]tbps": "C037476",
"ro 16-6028": "C054626",
"tert-butyl-(s)-8-bromo-11,12,13,13a-tetrahydro-9-oxo-9h- imidazo[1,5-a]-pyrrolo-[2,1-c][1,4]benzodiazepine-1-carboxylate": "C054626",
"isoniazide": "D007538",
"isoniazid": "D007538",
"carbamazepine": "D002220",
"cbz": "D002220",
"phenylalanine": "D010649",
"rifampin": "D012293",
"zidovudine": "D015215",
"2'3' dideoxyinosine": "D016049",
"coumadin": "D014859",
"fluoxetine": "D005473",
"gemfibrozil": "D015248",
"lovastatin": "D008148",
"androgens": "D000728",
"n-methyl-d-aspartic acid": "D016202",
"trimethaphan": "D014294",
"n-(2-hydroxypropyl)methacrylamide": "C032976",
"hpma": "C032976",
"gly-phe-leu-gly": "C504380",
"galactosamine": "D005688",
"amitriptyline": "D000639",
"diethylcarbamazine": "D004049",
"dec": "D004049",
"misoprostol": "D016595",
"oxidized glutathione": "D019803",
"reduced glutathione": "D005978",
"gssg": "D019803",
"diphenhydramine": "D004155",
"cimetidine": "D002927",
"histamine": "D006632",
"rifampicin": "D012293",
"thiotepa": "D013852",
"tspa": "D013852",
"cytosine arabinoside": "D003561",
"ara-c": "D003561",
"cromakalim": "D019806",
"pinacidil": "D020110",
"nitroglycerin": "D005996",
"beta adrenergic receptors blockade": "D000319",
"muscarinic receptors blockade": "D018727",
"mefenamic acid": "D008528",
"calcium carbonate": "D002119",
"phosphate": "D010710",
"caco3": "D002119",
"vitamin d": "D014807",
"bicarbonate": "D001639",
"methyldopa": "D008750",
"aldomet": "D008750",
"danazol": "D003613",
"chlorhexidine diphosphanilate": "C048279",
"chlorhexidine phosphanilate": "C048279",
"chp": "C048279",
"silver sulphadiazine": "D012837",
"agsd": "D012837",
"busulfan": "D002066",
"clonazepam": "D002998",
"d-tubocurarine": "D014403",
"chlorpheniramine": "D002744",
"lindane": "D001556",
"gamma-hexachlorocyclohexane": "D001556",
"propylthiouracil": "D011441",
"methimazole": "D008713",
"mk-801": "D016291",
"nifedipine": "D009543",
"diethylstilbesterol": "D004054",
"silver": "D012834",
"biperiden": "D001712",
"biperiden lactate": "C036432",
"orciprenaline": "D009921",
"muscarine": "D009116",
"labetalol": "D007741",
"halothane": "D006221",
"enflurane": "D004737",
"isoflurane": "D007530",
"h": "D006221",
"e": "D004737",
"i": "D007530",
"fluorescein": "D019793",
"acc-9653": "C043114",
"disodium phosphate ester": "-1",
"3-hydroxymethyl-5,5-diphenylhydantoin": "C043104",
"phenytoin sodium": "C043114",
"ouabain": "D010042",
"strophanthidin": "D013327",
"pertussis vaccine": "D010567",
"cyproheptadine": "D003533",
"mianserin": "D008803",
"8-phenyltheophylline": "C028322",
"theophylline": "D013806",
"enprofylline": "C034347",
"p-aminohippuric acid": "D010130",
"polyethylene glycol": "D011092",
"naoh": "D012972",
"isotretinoin": "D015474",
"procaterol": "D017265",
"terbutaline": "D013726",
"ornithine": "D009952",
"oxitropium bromide": "C017590",
"oxitropium": "C017590",
"valproic acid": "D014635",
"naloxone": "D009270",
"methyl scopolamine": "D019832",
"luminal": "D010634",
"alfentanil": "D015760",
"h2o": "D014867",
"iso": "D007545",
"catecholamine": "D002395",
"sodium salicylate": "D012980",
"phenylbutazone": "D010653",
"etoposide": "D005047",
"vp-16-213": "D005047",
"thiabendazole": "D013827",
"1,4-dihydropyridine": "C038806",
"calcium channel blockers": "D002121",
"calcium channel blocking agents": "D002121",
"dihydropyridine": "C038806",
"nitrendipine": "D009568",
"nisoldipine": "D015737",
"nimodipine": "D009553",
"aminonucleoside": "D011692",
"puromycin-aminonucleoside": "D011692",
"amns": "D011692",
"protamine sulfate": "D011479",
"ps": "D010718",
"ruthenium": "D012428",
"aminophylline": "D000628",
"naproxen": "D009288",
"fludrocortisone": "D005438",
"5-fluorouracil": "D005472",
"cddp": "D002945",
"5-fu": "D005472",
"urethane": "D014520",
"5,7-dihydroxytryptamine": "D015116",
"imipramine": "D007099",
"beclomethasone dipropionate": "D001507",
"beclomethasone": "D001507",
"prednisone": "D011241",
"cya": "D003520",
"aminoglycosides": "D000617",
"flurothyl": "D005481",
"monosodium glutamate": "D012970",
"msg": "D012970",
"ether": "D004986",
"gamma-vinyl-gaba": "D020888",
"l-glutamic acid": "D018698",
"d,l-4-amino-hex-5-enoic acid": "D020888",
"dipyridamole": "D004176",
"mp": "D008775",
"denopamine": "C037293",
"ta-064": "C037293",
"timolol": "D013999",
"hydrochlorothiazide": "D006852",
"timolol maleate": "D013999",
"salicylate": "D012459",
"prostaglandins": "D011453",
"glucuronyl": "-1",
"aspirin": "D001241",
"pge2": "D015232",
"pgf2 alpha": "D015237",
"hemicholinium": "D006426",
"neuroleptics": "D014150",
"sodium pentobarbital": "D010424",
"barium chloride": "C024986",
"barium": "D001464",
"barbiturate": "C032232",
"xylazine": "D014991",
"phenylpropanolamine": "D010665",
"ppa": "D010665",
"puromycin aminonucleoside": "D011692",
"pan": "D011692",
"carbon": "D002244",
"dithiothreitol": "D004229",
"p-aminophenol": "C026729",
"acetaminophen": "D000082",
"bis(p-nitrophenyl) phosphate": "C002887",
"apap": "D000082",
"pap": "C026729",
"bnpp": "C002887",
"morphine sulfate": "D009020",
"vincristine sulfate": "D014750",
"flurazepam": "D005479",
"fzp": "D005479",
"pentylenetetrazol": "D010433",
"atenolol": "D001262",
"progestins": "D011372",
"conjugated estrogens": "D004966",
"premarin": "D004966",
"medroxyprogesterone acetate": "D017258",
"azathioprine": "D001379",
"aza": "D001379",
"picrotoxin": "D010852",
"cholecystokinin": "D002766",
"cholecystokinin octapeptide": "D012844",
"caerulein": "D002108",
"vasopressin": "D014667",
"doca": "D003900",
"ddavp": "D003894",
"disulfiram": "D004221",
"quinacrine hydrochloride": "D011796",
"sodium nitrite": "D012977",
"pilocarpine nitrate": "D010862",
"edrophonium": "D004491",
"neostigmine": "D009388",
"[omega-i-131]heptadecanoic acid": "C013102",
"i-131 ha": "C013102",
"sodium nitroprusside": "D009599",
"snp": "D009599",
"trimetaphan": "D014294",
"tmp": "D014294",
"nitroprusside": "D009599",
"o2": "D010100",
"dsp4": "C012102",
"d-amphetamine": "D003913",
"desipramine": "D003891",
"cis-platinum": "D002945",
"adriamycin": "D004317",
"hexamethylmelamine": "D006585",
"cisplatinum": "D002945",
"cpdd": "C034868",
"hmm": "D006585",
"oral contraceptives": "D003276",
"bepridil": "D015764",
"kcl": "D011189",
"methoxyflurane": "D008733",
"nacl": "D012965",
"adenosine triphosphate": "D000255",
"atp": "D000255",
"endografine": "C006753",
"diatrizoate": "D003973",
"vasurix polyvidone": "D000100",
"acetrizoate": "D000100",
"dimer-x": "C025504",
"iocarmate": "C025504",
"hexabrix": "D007485",
"ioxaglate": "D007485",
"contrast media": "D003287",
"scoline": "D013390",
"fazadinium": "C084773",
"althesin": "D000530",
"chloride": "D002712",
"bromide": "D001965",
"l-norepinephrine": "D009638",
"quinine": "D011803",
"quinine sulfate": "D011803",
"nitrous oxide": "D009609",
"pancuronium": "D010197",
"trimethoprim-sulfamethoxazole": "D015662",
"trimethoprim": "D014295",
"cephalexin": "D002506",
"tmp-smz": "D015662",
"paclitaxel": "D017239",
"carboplatin": "D016190",
"taxol": "D017239",
"angiotensin-converting enzyme inhibitor": "D000806",
"angiotensin-converting enzyme (ace) inhibitors": "D000806",
"ace inhibitors": "D000806",
"lorazepam": "D008140",
"granisetron": "D017829",
"kytril": "D017829",
"spironolactone": "D013148",
"dihydrothienopyridine calcium": "-1",
"s-312-d": "C059447",
"s-312": "C059447",
"s-312-l": "-1",
"flunarizine": "D005444",
"bemegride": "D001534",
"sumatriptan": "D018170",
"flumazenil": "D005442",
"gm": "D005839",
"guanosine 3',5'-cyclic monophosphate": "D006152",
"cgmp": "D006152",
"dimethylthiourea": "C038983",
"dmtu": "C038983",
"avp": "D001127",
"arginine vasopressin": "D001127",
"phenylephrine": "D010656",
"phe": "D010656",
"sn": "D009599",
"piroxicam": "D010894",
"ganciclovir": "D015774",
"desmethylfluoxetine": "C036139",
"aracytine-c": "D003561",
"clentiazem": "C056595",
"1,5-benzothiazepine": "C106746",
"icrf-187": "D064730",
"1,25(oh)2d": "D002117",
"alkali": "D000468",
"omeprazole": "D009853",
"sucralfate": "D013392",
"calcitriol": "D002117",
"pamidronate": "C019248",
"hydrocortisone": "D006854",
"diethylstilbestrol": "D004054",
"des": "D004054",
"acrolein": "D000171",
"cd-832": "C082828",
"nefiracetam": "C058876",
"dm-9384": "C058876",
"pyrrolidone": "D011760",
"sch 23390": "C534628",
"spiperone": "D013134",
"corticotropin": "D000324",
"thyrotropin": "D013972",
"co2": "D002245",
"lamivudine": "D019259",
"hepatitis b surface antigen": "D006514",
"2',3'-dideoxy cytosine": "D019259",
"hbsag": "D006514",
"ocs": "D003276",
"progestagens": "D011374",
"gestodene": "C033273",
"desogestrel": "D017135",
"oc": "D003276",
"ethinyloestradiol": "D004997",
"folinic acid": "D002955",
"granulocyte colony-stimulating factor": "D016179",
"d,l-sotalol": "D013015",
"da": "D004298",
"ne": "D009638",
"5-hydroxyindoleacetic acid": "D006897",
"5-hiaa": "D006897",
"all-trans-retinoic acid": "D014212",
"diethylpropion": "D004053",
"dep": "D004053",
"ondansetron": "D017294",
"albuterol": "D000420",
"cocaine hydrochloride": "D003042",
"indinavir": "D019469",
"sumatriptan succinate": "D018170",
"methylphenidate": "D008774",
"17alpha-ethinylestradiol": "D004997",
"bile salt": "D001647",
"ee": "D004997",
"bs": "D001647",
"sterol": "D013261",
"cholate": "D020355",
"chenodeoxycholate": "D002635",
"glibenclamide": "D005905",
"helodermin": "C040442",
"levcromakalim": "D019806",
"ach": "D000109",
"dubutamine": "D004280",
"contrast materials": "D003287",
"prc": "D003287",
"ammonium": "D000644",
"4-aminopyridine": "D015761",
"nociceptin": "C096012",
"orphanin fq": "C096012",
"nocistatin": "C111148",
"meloxicam": "C065757",
"remoxipride": "D017330",
"(s)-(-)-3-bromo-n-[(1-ethyl-2-pyrrolidinyl)methyl]-2,6-dimethoxybenz amide": "D017330",
"pyrrolidine": "C032519",
"hoechst 33342": "C017807",
"propidium iodide": "D011419",
"catechol": "C034221",
"hydroquinone": "C031927",
"ncq436": "C084325",
"ncq344": "C112341",
"phenols": "D010636",
"fla797": "C050313",
"phenol": "D019800",
"benzene": "D001554",
"1-(1,2-dihydro-2-acenaphthylenyl)piperazine": "-1",
"5-{2-[4-(1,2-dihydro-2-acenaphthylenyl)piperazinyl]ethyl}-2,3-dihy dro-1h- indol-2-one": "-1",
"nitric-oxide": "D009569",
"nadph": "D009249",
"ng-nitro-l-arginine": "D019335",
"l-noarg": "D019335",
"no": "D009569",
"nicotinamide adenine dinucleotide phosphate": "D009249",
"fenfluramine": "D005277",
"amphetamines": "D000662",
"fenfluramines": "D005277",
"heparins": "D006493",
"fk506": "D016559",
"fk 506": "D016559",
"amphotericin b": "D000666",
"fungizone": "D000666",
"amphotericin b-sodium deoxycholate": "C059765",
"d-amb": "C059765",
"sulfadiazine": "D013411",
"dronedarone": "C118667",
"platinum": "D010984",
"doxil": "D004317",
"olanzapine": "C076029",
"olanzipine": "C076029",
"tropicamide": "D014331",
"vitamin a": "D014801",
"methoxamine": "D008729",
"angiotensin-converting enzyme inhibitors": "D000806",
"lisinopril": "D017706",
"ace inhibitor": "D000806",
"cocaethylene": "C066444",
"ce": "C105934",
"veralipride": "C027429",
"viracept": "D019888",
"ptu": "D011441",
"sotalol": "D013015",
"azimilide": "C086123",
"copper": "D003300",
"zinc": "D015032",
"cu": "D003300",
"zn": "D015032",
"pdn": "D011241",
"erythromycin": "D004917",
"amoxicillin": "D000658",
"ciprofloxacin": "D002939",
"ofloxacin": "D015242",
"cotrimoxazole": "D015662",
"glyceryl trinitrate": "D005996",
"diclofenac": "D004008",
"gtn": "D005996",
"dcf": "D004008",
"temocapril": "C055603",
"cy": "D003520",
"mitomycin c": "D016685",
"fa": "D002955",
"mmc": "D016685",
"nicergoline": "D009530",
"sermion": "D009530",
"bendazac lysine": "C036067",
"levomepromazine": "D008728",
"triphenyltetrazolium": "C009591",
"ttc": "C009591",
"carvedilol": "C043211",
"2-p-(2-carboxyethyl)phenethylamino-5'-n-ethylcarboxamidoadenosine": "C061282",
"cgs 21680": "C061282",
"n6-cyclopentyladenosine": "C048599",
"cpa": "D017373",
"5'-n-ethylcarboxamidoadenosine": "D019830",
"neca": "D019830",
"dmpx": "C057837",
"3,7-dimethyl-1-propargylxanthine": "C057837",
"cpt": "C053907",
"8-cyclopentyltheophylline": "C053907",
"tiaprofenic acid": "C021270",
"lanthanum nitrate": "C016534",
"sodium thiopental": "D013874",
"paracetamol": "D000082",
"glycopyrrolate": "D006024",
"aluminum chloride": "C010845",
"disoprivan": "D015742",
"vinorelbine": "C030852",
"vnr": "C030852",
"vindesine": "D014751",
"vds": "D014751",
"fluorouracil": "D005472",
"anthracyclines": "D018943",
"trimethoprim-sulfomethoxazole": "D015662",
"vigabatrin": "D020888",
"ma": "D008694",
"niacin extended-release/lovastatin": "C451780",
"niacin": "D009525",
"terminalia chebula": "D010936",
"ethanolic extract of terminalia chebula fruits": "D010936",
"peroxides": "D010545",
"t. chebula extract": "D010936",
"peroxide": "D010545",
"droperidol": "D004329",
"diamorphine": "D003932",
"interferon": "D016898",
"interferon-alpha-2b": "D016898",
"adr": "D004317",
"4-hydroxy-2-nonenal": "C027576",
"4hne": "C027576",
"3-nitrotyrosine": "C002744",
"3nt": "C002744",
"nifekalant hydrochloride": "C076259",
"nifekalant": "C076259",
"trazodone": "D014196",
"dexamphetamine": "D003913",
"ergometrine": "D004874",
"pyrrolidine dithiocarbamate": "C020972",
"pdtc": "C020972",
"2,3,5-triphenyltetrazolium chloride": "C009591",
"evans blue": "D005070",
"celecoxib": "C105934",
"nimesulide": "C012655",
"p": "D000082",
"n": "C012655",
"pyrazolones": "D047069",
"amisulpride": "C012052",
"calcium gluconate": "D002125",
"cycloheximide": "D003513",
"pilo": "D010862",
"chx": "D003513",
"oestrogens": "D004967",
"progestogens": "D011374",
"amoxicillin-clavulanate": "D019980",
"d-ribose": "D012266",
"erdosteine": "C048498",
"vcm": "D014640",
"domperidone": "D004294",
"domperione": "D004294",
"cholesterols": "D002784",
"c": "D002784",
"tg": "D014280",
"cyproterone acetate": "D017373",
"alpha-fluoro-beta-alanine": "C032348",
"fbal": "C032348",
"dihydropyrimidine": "C020047",
"monoher": "C522803",
"monohydroxyethylrutoside": "C522803",
"bepridil hydrochloride": "D015764",
"bpd": "D015764",
"methadone": "D008691",
"nomega-nitro-l-arginine": "D019335",
"lnna": "D019335",
"tempol": "C001803",
"pergolide": "D010479",
"minocycline": "D008911",
"hbeag": "D006513",
"eslicarbazepine acetate": "C416835",
"bia 2-093": "C416835",
"s-(-)-10-acetoxy-10,11-dihydro-5h-dibenzo/b,f/azepine-5-carboxamide": "C416835",
"oxcarbazepine": "C036006",
"oxc": "C036006",
"anthraquinones": "D000880",
"chinese herbal": "D004365",
"chinese herbs": "D004365",
"aristolochic acids": "D034341",
"anthraquinone": "D000880",
"chloroacetaldehyde": "C004656",
"sulfhydryl": "-1",
"thiol": "D013438",
"caa": "C004656",
"sh": "-1",
"trypan blue": "D014343",
"thiols": "D013438",
"cysteine": "D003545",
"galantamine": "D005702",
"hydroxychloroquine": "D006886",
"hydroxychloroquine sulfate": "D006886",
"cox-2 inhibitors": "D052246",
"non-steroidal anti-inflammatory drugs": "D000894",
"rofecoxib": "C116926",
"potassium aspartate": "D001224",
"quinidine": "D011802",
"penicillamine": "D010396",
"zinc acetate": "D019345",
"atropine sulphate": "D001285",
"peg-asparaginase": "C042705",
"polyethylene glycol-asparaginase": "C042705",
"valsartan": "C081489",
"val": "C081489",
"hctz": "D006852",
"succimer": "D004113",
"lead": "D007854",
"pb": "D010634",
"piperacillin/tazobactam": "C085143",
"piperacillin": "D010878",
"ammonia": "D000641",
"ropivacaine": "C037663",
"prilocaine": "D011318",
"melatonin": "D008550",
"bcnu": "D002330",
"carmustine": "D002330",
"1,3-bis (2-chloroethyl)-1-nitrosoure": "D002330",
"myo-inositol-1-phosphate": "C002647",
"mip": "C002647",
"inositol": "D007294",
"prostaglandin e2": "D015232",
"thromboxane b2": "D013929",
"txb2": "D013929",
"thromboxane": "D013931",
"phosphatidylserine": "D010718",
"fluo3": "C059715",
"3tc": "D019259",
"d4t": "D018119",
"nevirapine": "D019829",
"nvp": "D019829",
"azt": "D015215",
"efavirenz": "C098320",
"efv": "C098320",
"sulphonylurea": "-1",
"sulphonylureas": "-1",
"glipizide": "D005913",
"glimepiride": "C057619",
"gliclazide": "D005907",
"losartan": "D019808",
"los": "D019808",
"isoprenaline": "D007545",
"fatty acids": "D005227",
"procaine": "D011343",
"topotecan": "D019772",
"remifentanil": "C071741",
"vpa": "D014635",
"carbimazole": "D002231",
"anti-thyroid drugs": "D013956",
"antithyroid medications": "D013956",
"antithyroidmedications": "D013956",
"6-hydroxydopamine": "D016627",
"proenkephalin": "C029992",
"penk": "C029992",
"6-ohda": "D016627",
"l-dopa+benserazide": "C005177",
"thiobarbituric acid": "C029684",
"citrate": "D019343",
"citric acid": "D019343",
"flecainide": "D005424",
"folic acid": "D005492",
"pantoprazole": "C064276",
"rad001": "C107135",
"vgb": "D020888",
"ssri": "D017367",
"metallothionein": "D008668",
"mt": "D008668",
"znso(4)": "D019287",
"gsh": "D005978",
"mptp": "D015632",
"1-methyl-4-phenyl-1,2,3,6-tetrahydropyridine": "D015632",
"benserazide": "D001545",
"contrast medium": "D003287",
"cm": "D003287",
"cnsb002": "C401121",
"carrageenan": "D002351",
"direct thrombin inhibitor": "D000991",
"direct thrombin inhibitors": "D000991",
"botox": "C542870",
"mitoq": "C476756",
"sulfamethoxazole": "D013420",
"temsirolimus": "C401859",
"spiranolactone": "D013148",
"ramipril": "D017257",
"alendronate": "D019386",
"bisphosphonates": "D004164",
"biphosphonate": "-1",
"bisphosphonate": "D004164",
"daptomycin": "D017576",
"methicillin": "D008712",
"levofloxacin": "D064704",
"tazobactam": "C043265",
"oxacillin": "D010068",
"nafcillin": "D009254",
"l-arginine": "D001120",
"l-name": "D019331",
"galactose": "D005690",
"d-galactose": "D005690",
"d-glucose": "D005947"
},
"disease2id": {
"delirium": "D003693",
"ulcers": "D014456",
"hypotension": "D007022",
"scleroderma renal crisis": "D007674",
"src": "D007674",
"systemic sclerosis": "D012595",
"ssc": "D012595",
"thrombotic microangiopathy": "D057049",
"psychosis": "D011605",
"psychiatric disorders": "D001523",
"psychotic symptoms": "D011618",
"depressive disorder": "D003866",
"bipolar disorder": "D001714",
"antisocial personality disorder": "D000987",
"major depressive disorder": "D003865",
"affective disorder": "D019964",
"antisocial personality": "D000987",
"parkinson's disease": "D010302",
"dyskinetic": "D004409",
"pd": "D010302",
"dyskinesias": "D004409",
"lids": "D004409",
"abnormal involuntary movements": "D004409",
"cystitis": "D003556",
"pain": "D010146",
"edema": "D004487",
"hepatitis": "D056486",
"hepatotoxicity": "D056486",
"hepatic injury": "D056486",
"multiple myeloma": "D009101",
"mm": "D009101",
"peripheral neuropathy": "D010523",
"anxiety": "D001008",
"cardiotoxicity": "D066126",
"cardiovascular diseases": "D002318",
"cvds": "D002318",
"myeloma": "D009101",
"rrmm": "D009101",
"myelosuppression": "D001855",
"deep vein thrombosis": "D020246",
"hemorrhagic": "D020300",
"genotoxicity": "D030342",
"dyskinesia": "D004409",
"proteinuria": "D011507",
"al": "D000686",
"amyloidosis": "D000686",
"fibrosis": "D005355",
"atrophy": "D001284",
"glomerulopathy": "D007674",
"kidney injury": "D007674",
"acute kidney injury": "D058186",
"kidney tubular dysfunction": "D007674",
"fanconi syndrome": "D005198",
"kidney disease": "D007674",
"glomerular dysfunction": "D007674",
"postoperative delirium": "D011183",
"neurological dysfunctions": "D009422",
"confusion": "D003221",
"neurotoxic": "D020258",
"depressive": "D003866",
"depressive symptoms": "D003866",
"depression": "D003866",
"optic neuropathy": "D009901",
"loss of vision": "D014786",
"extensively drug-resistant tuberculosis": "D054908",
"xdr-tb": "D054908",
"optic disc edema": "C531767",
"toxic optic neuropathy": "D009901",
"deterioration of vision": "D015354",
"cardiac toxicity": "D066126",
"toxicity": "D064420",
"cardiovascular collapse": "-1",
"ventricular tachycardia": "D017180",
"fibrillation": "D001281",
"thrombocytopenia type ii": "D013921",
"thrombocytopenia": "D013921",
"end-stage liver disease": "D058625",
"portal hypertension": "D006975",
"endotoxemia": "D019446",
"hit type ii": "D013921",
"malignant liver disease": "D017114",
"liver cirrhosis": "D008103",
"hit ii": "D013921",
"hit": "D013921",
"end-stage hepatic failure": "D058625",
"takotsubo syndrome": "D054549",
"apical ballooning syndrome": "D054549",
"ts": "D054549",
"broken heart syndrome": "D054549",
"acute coronary syndrome": "D054058",
"mitral valve prolapse": "D008945",
"migraines": "D008881",
"chest pain": "D002637",
"migraine headache": "D008881",
"status migrainosus": "D008881",
"coronary artery vasospasm": "D003329",
"impulsiveness": "D010554",
"impaired memory": "D008569",
"sleep disturbance": "D020920",
"visual hallucinations": "D006212",
"idiopathic parkinson's disease": "D010300",
"hyperlipemia": "D006949",
"hodgkin lymphoma": "D006689",
"hl": "D006689",
"malignancy": "D009369",
"toxicities": "D064420",
"neutropenia": "D009503",
"psychobiological dysfunction": "D008107",
"encephalopathy": "D001927",
"cancers": "D009369",
"sarcomas": "D012509",
"lymphoma": "D008223",
"gynecologic and testicular cancers": "D009369",
"cancer": "D009369",
"convulsions": "D012640",
"non-convulsive status epilepticus": "D013226",
"ncse": "D013226",
"nephropathy": "D007674",
"acute renal failure": "D058186",
"hypertension": "D006973",
"syndrome of inappropriate antidiuretic hormone": "D007177",
"syndrome of inappropriate anti-diuretic hormone": "D007177",
"siadh": "D007177",
"hyponatraemia": "D007010",
"nausea": "D009325",
"hyponatremia": "D007010",
"cardiac disarrangement": "D006331",
"necrosis": "D009336",
"seizure": "D012640",
"seizures": "D012640",
"tonic-clonic seizures": "D004830",
"epileptic": "D004827",
"amnesia": "D000647",
"memory loss": "D008569",
"memory impairment": "D008569",
"neurotoxicity": "D020258",
"memory impaired": "D008569",
"memory deficit": "D008569",
"neuroinflammation": "D007249",
"excitotoxicity": "D064420",
"neuroinflammatory": "D007249",
"arf": "D058186",
"acute lymphoblastic leukemia": "D054198",
"leukoencephalopathy": "D056784",
"leukemia": "D007938",
"attention problems": "D003072",
"renal failure": "D051437",
"chronic kidney disease": "D051436",
"tubulointerstial disease": "D004194",
"anemia": "D000740",
"menorrhagia": "D008595",
"deterioration of renal function": "D007674",
"bleeding": "D006470",
"tonic clonic convulsions": "D004830",
"nervous system abnormalities": "D009421",
"convulsion": "D012640",
"overdose": "D062787",
"cardiovascular depression": "D002318",
"dysrhythmia": "D001145",
"asystole": "D006323",
"cardiotoxic": "D066126",
"drug-induced acute liver injury": "D056486",
"drug-induced liver injury": "D056486",
"liver damage": "D056486",
"anorexia": "D000855",
"abdominal pain": "D015746",
"hepatic damage": "D056486",
"agranulocytosis": "D000380",
"blood dyscrasias": "D006402",
"bone marrow suppression": "D001855",
"myocardial strain": "D009202",
"cardiac dysfunction": "D006331",
"non-hodgkin lymphoma": "D008228",
"heart failure": "D006333",
"myoclonus": "D009207",
"myoclonic movements": "D009069",
"cholestatic": "D002779",
"poisoning": "D011041",
"acute hepatitis": "D006505",
"acute liver failure": "D017114",
"cholestasis": "D002779",
"vasovagal syncope": "D019462",
"bradycardia": "D001919",
"syncope": "D013575",
"agitation": "D011595",
"schizo-affective disorder": "D011618",
"personality disorder": "D010554",
"antisocial disorder": "D000987",
"substance abuse disorder": "D019966",
"aggressiveness": "D010554",
"manic": "D001714",
"akathisia": "D017109",
"weight loss": "D015431",
"convulsive": "D012640",
"abnormal involuntary motor movements": "D004409",
"generalised and focal seizures": "D012640",
"congestive heart failure": "D006333",
"hypothermic": "D007035",
"postoperative complication": "D011183",
"dysfunctional overnight memory": "D008569",
"sleep-related impairments": "D012893",
"impaired overnight memory": "D008569",
"memory impairments": "D008569",
"aggression": "D001523",
"auditory hallucinations": "D006212",
"delusions": "D003072",
"hemiparesis": "D010291",
"hemiplegic migraine": "D020325",
"encephalitis": "D004660",
"cerebellar dysfunction": "D002526",
"oculomotor dysfunction": "D005128",
"vomiting": "D014839",
"impaired renal function": "D007674",
"impaired liver function": "D017093",
"irritability": "D001523",
"dysmetria": "D002524",
"dysarthria": "D004401",
"baboon syndrome": "-1",
"maculopapular eruption": "D003875",
"sudden cardiac death": "D016757",
"torsades de pointes": "D016171",
"atrial fibrillation": "D001281",
"torsade de pointes": "D016171",
"myositis": "D009220",
"acute promyelocytic leukemia": "D015473",
"apl": "D015473",
"renal toxicity": "D007674",
"tumor": "D009369",
"t-cell lymphoblastic lymphoma": "D054218",
"leukemic": "D007938",
"tumor lysis syndrome": "D015275",
"tls": "D015275",
"neuropathy": "D009422",
"hyperammonemic": "D022124",
"neurological complications": "D009422",
"epilepsy": "D004827",
"impaired consciousness": "D003244",
"hyperammonemia": "D022124",
"necrotising fasciitis": "D019115",
"waldenstrom macroglobulinaemia": "D008258",
"bacterial infections": "D001424",
"malignancies": "D009369",
"cardiomyopathy": "D009202",
"subcellular degeneration": "D009410",
"cardiomyocyte degeneration": "D009410",
"hypertrophy": "D006984",
"lv dysfunction": "D018487",
"diastolic dysfunction": "-1",
"hyperalgesia": "D006930",
"allodynia": "D006930",
"pain disorders": "D013001",
"secondary hyperalgesia": "D006930",
"hyperalgesic": "D006930",
"allodynic": "D006930",
"glaucoma": "D005901",
"ocular hypertension": "D009798",
"primary open-angle glaucoma": "C562750",
"poag": "C562750",
"retinal ganglion": "D012173",
"axonal degeneration": "D009410",
"oih": "D006930",
"opioid addiction": "D009293",
"mitochondrial dysfunction": "D028361",
"breast cancer": "D001943",
"cardiovascular disease": "D002318",
"breast tumors": "D001943",
"cardiac disturbances": "D006331",
"heart damage": "D006331",
"myxoedema": "D007037",
"coma": "D003128",
"hypothermia": "D007035",
"respiratory failure": "D012131",
"thyroid disease": "D013959",
"thrombolysis": "D055499",
"thrombosis": "D013927",
"upper-extremity deep venous thrombosis": "D056824",
"dvt": "D020246",
"pulmonary embolism": "D011655",
"superior vena cava (svc) syndrome": "D013479",
"epistaxis": "D004844",
"vision loss": "D014786",
"hearing loss": "D034381",
"end-stage renal disease": "D007676",
"svc syndrome": "D013479",
"schizophrenia": "D012559",
"hyper": "D006948",
"catalepsy": "D002375",
"qt prolongation": "D008133",
"tdp": "D016171",
"embryolethality": "D020964",
"teratogenicity": "D064793",
"ventricular septal defects": "D006345",
"growth retardation": "D005317",
"embryonic death": "D020964",
"ventricular septal defect": "D006345",
"teratogenic": "D064793",
"nephrotoxicity": "D007674",
"aki": "D058186",
"head and neck cancer": "D006258",
"learning impairments": "D007859",
"cognitive impairment": "D003072",
"cognition deficits": "D003072",
"cognitive deficits": "D003072",
"arrhythmia": "D001145",
"myocardial injury": "D009202",
"cytotoxicity": "D064420",
"left ventricular dysfunction": "D018487",
"myocardial infarction": "D009203",
"acute myocardial infarction": "D009203",
"neuroleptic malignant syndrome": "D009459",
"huntington's disease": "D006816",
"sinus bradycardia": "D012804",
"coronary artery disease": "D003324",
"onychomycosis": "D014009",
"adverse drug reaction": "D064420",
"polyneuropathy": "D011115",
"visual loss": "D014786",
"paresthesias": "D010292",
"impaired the memory": "D008569",
"skin diseases": "D012871",
"diarrheas": "D003967",
"inflammatory diseases": "D007249",
"status epilepticus": "D013226",
"nephrotoxic": "D007674",
"infections": "D007239",
"rhabdomyolysis": "D012206",
"hepatitis c virus infected": "D006526",
"hepatitis c virus infection": "D006526",
"muscle toxicity": "D009135",
"cytopenia": "D006402",
"renal lesions": "D007674",
"renal impairment": "D007674",
"kidney lesions": "D007674",
"renal damage": "D007674",
"acute renal injury": "D058186",
"acute tubular necrosis": "D007683",
"inflammation": "D007249",
"diabetic nephropathy": "D003928",
"necrotic": "D009336",
"birdshot retinochoroidopathy": "C537630",
"retinal vasculitis": "D031300",
"cataract": "D002386",
"raised intraocular pressure": "D009798",
"vasculitis": "D014657",
"cystoid macular edema": "D008269",
"increased intraocular pressure": "D009798",
"fasciculation": "D005207",
"myalgia": "D063806",
"muscle fasciculation": "D005207",
"fasciculations": "D005207",
"nephrogenic diabetes insipidus": "D018500",
"ndi": "D018500",
"polyuria": "D011141",
"dysguesia": "D004408",
"loss of taste sensation": "D012678",
"myopathy": "D009135",
"apnea": "D001049",
"butyrylcholinesterase deficiency": "C537417",
"hemolytic anemia": "D000743",
"malaria": "D008288",
"myocardial ischemia": "D017202",
"myocardial ischemic injury": "D017202",
"ventricular remodeling": "D020257",
"myocardial damage": "D009202",
"bipolar": "D001714",
"hepatocellular carcinoma": "D006528",
"leucopenia": "D007970",
"hepatic dysfunction": "D008107",
"fever": "D005334",
"lateral epicondylitis": "D013716",
"paresthesia": "D010292",
"renal injury": "D058186",
"tumours": "D009369",
"lung cancer": "D008175",
"tumour": "D009369",
"kidney cancer": "D007680",
"prostate cancer": "D011471",
"glomerulosclerosis": "D005921",
"intrauterine growth retardation": "D005317",
"iugr": "D005317",
"interstitial fibrosis": "D005355",
"dysplasia of fetal kidneys": "D007674",
"cml": "D015464",
"chronic myeloid leukemia": "D015464",
"philadelphia chromosome": "D010677",
"liver neoplasms": "D008113",
"liver cancer": "D008113",
"follicular cell lymphoma": "D008224",
"muscular dystrophy": "D009136",
"myodystrophy": "D009136",
"myopathic": "D009135",
"myopathic disease": "D009135",
"cerebral hemorrhage": "D002543",
"pulmonary hemorrhage": "D008171",
"hemorrhage": "D006470",
"embryopathy": "D005315",
"nasal hypoplasia": "-1",
"stippled epiphyses": "D002806",
"chondrodysplasia punctata": "D002806",
"headache": "D006261",
"dizziness": "D004244",
"palpitation": "-1",
"flushing": "D005483",
"postural hypotension": "D007024",
"asthmatics": "D001249",
"asthma": "D001249",
"asthmatic": "D001249",
"tremor": "D014202",
"palpitations": "-1",
"panic disorders": "D016584",
"agoraphobia": "D000379",
"panic attacks": "D016584",
"panic disorder": "D016584",
"restlessness": "D011595",
"tremors": "D014202",
"abnormalities in neuronal systems": "D009421",
"anxiety disorders": "D001008",
"rheumatic disease": "D012216",
"myxomatous degeneration": "-1",
"coronary disease": "D003327",
"clonic seizure": "D012640",
"clonic seizures": "D012640",
"tonic seizures": "D012640",
"carcinoma of the renal pelvis": "D010386",
"hydroureteronephrosis": "D006869",
"hematuria": "D006417",
"cerebral vasculitis": "D020293",
"carcinoma": "D002277",
"carcinoma of the urinary tract": "D014571",
"carcinomas of the urinary bladder": "D001749",
"carcinoma of the prostate": "D011471",
"urinary tract cancer": "D014571",
"obstructive uropathy": "-1",
"retained placenta": "D018457",
"blood loss": "D006473",
"ischemia": "D007511",
"hypokinesis": "D018476",
"tachydysrhythmias": "-1",
"sinus tachycardia": "D013616",
"hypokalemia": "D007008",
"proarrhythmia": "-1",
"hypomagnesemia": "C537153",
"acute renal insufficiency": "D058186",
"primary systemic amyloidosis": "C531616",
"ari": "D058186",
"hypoalbuminemia": "D034141",
"tubular injury": "-1",
"impaired fear recognition": "D001925",
"deficit in fear recognition": "D001925",
"psychopaths": "D001523",
"amygdala dysfunction": "-1",
"corneal ulcers": "D003320",
"drug abuse": "D019966",
"substance abuse": "D019966",
"epithelial defects": "-1",
"idiopathic epilepsy": "C562694",
"lethargy": "D053609",
"inappetence": "-1",
"haemorrhagic": "D006470",
"infarction of the globus pallidus": "D020520",
"ischemic stroke": "D002544",
"haemorrhagic stroke": "D020521",
"ischemia of the globus pallidus": "D002545",
"globus pallidus infarctions": "D020520",
"basal ganglia infarcts": "D020520",
"cardiac arrhythmia": "D001145",
"respiratory dysfunction": "D012131",
"cerebral hypoperfusion": "-1",
"burkitt lymphoma": "D002051",
"hepatic toxicity": "D056486",
"biliary atresia": "D001656",
"ptld": "D008232",
"post-transplantation lymphoproliferative disorder": "D008232",
"burkitt-type malignant lymphoma": "D002051",
"hypovolemia": "D020896",
"acute prerenal failure": "-1",
"cerebral microbleeds": "-1",
"intracerebral hemorrhage": "D002543",
"mb": "-1",
"ich": "D002543",
"is": "D002544",
"transient ischemic attack": "D002546",
"tia": "D002546",
"stroke": "D020521",
"ischemic": "D007511",
"hyperprolactinemia": "D006966",
"macroprolactinemia": "D015175",
"macroprolactinoma": "D015175",
"microprolactinoma": "D015175",
"pseudoprolactinoma": "-1",
"cataleptic": "D002375",
"hypoglycaemia": "D007003",
"diabetics": "D003920",
"endometrial disease": "D014591",
"hyperplasia": "D006965",
"endometrial carcinoma": "D016889",
"endometrial hyperplasia": "D004714",
"pure red cell aplasia": "D012010",
"toxic dermatitis": "D003875",
"lymphadenopathy": "D008206",
"skin rash": "D005076",
"pneumonia": "D011014",
"infection": "D007239",
"pneumoniae": "D011014",
"gram-negative bacillary infections": "D016905",
"azotemia": "D053099",
"subarachnoid hemorrhage": "D013345",
"acute renal artery thrombosis": "D007674",
"sah": "D013345",
"thrombotic": "D013927",
"intracranial vascular thrombosis": "D020767",
"thrombi": "D013927",
"thromboembolic phenomena": "D013923",
"disseminated intravascular coagulation": "D004211",
"consumption coagulopathies": "D004211",
"infarction": "D007238",
"thrombosis of a normal renal artery": "D007674",
"hypoglycemia": "D007003",
"hyperbilirubinemia": "D006932",
"polycythemia": "D011086",
"neonatal apnea": "D001049",
"idiopathic orthostatic hypotension": "C544351",
"hypersensitivity": "D004342",
"myoclonia": "D009207",
"retinal artery and choriocapillaris occlusion": "D015356",
"retinal artery and choriocapillaris occlusions": "D015356",
"blindness": "D001766",
"palsy": "D010243",
"pupillary abnormalities": "D011681",
"hemorrhages": "D006470",
"chorioretinal atrophy": "C566236",
"renal disease": "D007674",
"hemolytic anemias": "D000743",
"parkinsonian": "D010300",
"rash": "D005076",
"dehydrated": "D003681",
"renal damages": "D007674",
"paraplegia": "D010264",
"central nervous system leukemia": "D002493",
"folate deficiency": "C536409",
"enlargement of pulse pressure": "D006973",
"tachycardia": "D013610",
"hyperglycemic": "D006943",
"hyperglycemia": "D006943",
"glucosuria": "D006030",
"fatty liver": "D005234",
"myeloencephalopathy": "D001927",
"leukaemia": "D007938",
"sensory and motor dysfunction": "D007049",
"arrhythmic": "D001145",
"thalassemia": "D013789",
"renal insufficiency": "D051437",
"amenorrhea": "D000568",
"oligomenorrhea": "D009839",
"psychiatric symptoms": "D011618",
"galactorrhea": "D005687",
"spasm": "D013035",
"asterixis": "D020820",
"hypotensive": "D007022",
"syndrome of inappropriate secretion of antidiuretic hormone": "D007177",
"weakness": "D018908",
"arrhythmias": "D001145",
"mi": "D009203",
"hemolysis": "D006461",
"leprosy": "D007918",
"aids": "D000163",
"thoracic hematomyelia": "D020758",
"mania": "D001714",
"depressed": "D003866",
"hypomania": "D001714",
"attention-deficit hyperactivity disorder": "D001289",
"psychotic": "D011618",
"hyperlipoproteinemias": "D006951",
"hyperlipidemia": "D006949",
"atherosclerotic vascular disease": "D002340",
"myoglobinuria": "D009212",
"fanconi's anemia": "D005199",
"septicemia": "D018805",
"hemorrhagic bronchopneumonia": "D001996",
"peliosis": "D010382",
"hepatic tumors": "D008113",
"hepatic neoplasms": "D008113",
"hypertensive": "D006973",
"damage of substantia nigra pars reticulata": "D001930",
"epileptic seizure": "D004827",
"prolonged status epilepticus": "D013226",
"metabolic derangement": "D008659",
"neuronal damage": "D009422",
"vasogenic edema": "D001929",
"lesioned snr": "D001930",
"damage of snr": "D001930",
"neurotransmitter dysfunction": "D001480",
"post-herpetic neuralgia": "D051474",
"phn": "D051474",
"mastitis": "D008413",
"sensory disturbance": "-1",
"paranoia": "D010259",
"paranoid": "D010259",
"depressive disorders": "D003866",
"loiasis": "D008118",
"filariasis": "D005368",
"microfilaremia": "-1",
"reperfusion injury": "D015427",
"liver dysfunction": "D017093",
"pulmonary tuberculosis": "D014397",
"neurologic toxicity": "D020258",
"axonal neuropathy": "D056768",
"neurologic toxicities": "D020258",
"hypothyroidism": "D007037",
"hypothyroid": "D007037",
"hypercalcemia": "D006934",
"hypercalcemic": "D006934",
"autoimmune hemolytic anemia": "D000744",
"emergency department": "D004630",
"trauma": "D014947",
"hereditary angioedema": "D054179",
"menstrual abnormalities": "D008599",
"weight gain": "D015430",
"muscle cramps": "D009120",
"myalgias": "D063806",
"burns": "D002056",
"burn": "D002056",
"tumors": "D009369",
"brain tumors": "D001932",
"neurological symptoms": "D009461",
"central nervous system disease": "D002493",
"hepatocellular necrosis": "D047508",
"lupus-like syndrome": "D008180",
"hyperthyroidism": "D006980",
"brain damage": "D001930",
"autonomic neuropathy": "D009422",
"diabetic": "D003920",
"atrial flutter": "D001282",
"renal carcinomas": "D002292",
"postzosteric": "D006562",
"trigeminal neuralgia": "D014277",
"dysphagia": "D003680",
"hepatic failure": "D017093",
"hepatocellular damage": "D056486",
"drug hypersensitivity": "D004342",
"pertussis": "D014917",
"ischaemic": "D007511",
"acne": "D000152",
"blepharoconjunctivitis": "D003231",
"dry eyes": "D014985",
"blurred vision": "D014786",
"photodermatitis": "D010787",
"papilledema": "D010211",
"pseudotumor cerebri": "D011559",
"corneal opacities": "D003318",
"congenital abnormalities": "D000013",
"microphthalmos": "D008850",
"hypertelorism": "D006972",
"optic nerve hypoplasia": "C563492",
"bronchial asthma": "D001249",
"tremorgenic": "D014202",
"heart hypertrophy": "D006332",
"hypertrophied hearts": "D006332",
"hypertrophied": "D006984",
"anaphylaxis": "D000707",
"dementia": "D003704",
"retention deficit": "D008569",
"lesions of the urinary bladder": "D001745",
"necroses": "D009336",
"muscle fasciculations": "D005207",
"diabetes": "D003920",
"acute neurologic dysfunction": "D009422",
"malignant glioma": "D005910",
"hematologic malignancies": "D019337",
"glioma": "D005910",
"neurologic deterioration": "D009422",
"somnolence": "D006970",
"motor deficits": "-1",
"bile duct injury": "D001649",
"jaundice": "D007565",
"drug-induced liver damage": "D056486",
"bile duct destruction": "D001649",
"angina": "D000787",
"nephrosis": "D009401",
"focal segmental glomerular sclerosis": "D005923",
"fsgs": "D005923",
"nephrotic syndrome": "D009404",
"hyperkalemia": "D006947",
"rheumatoid arthritis": "D001172",
"hypoaldosteronism": "D006994",
"type iv renal tubular acidosis": "D006994",
"colorectal carcinoma": "D015179",
"aplastic anemia": "D000741",
"bone marrow toxicity": "D001855",
"bradycardiac": "D001919",
"neuroblastoma": "D009447",
"behavior disorder": "D002653",
"oral candidiasis": "D002180",
"hoarseness": "D006685",
"thrush": "D002180",
"candidiasis": "D002177",
"wiskott-aldrich syndrome": "D014923",
"severe combined immunodeficiency syndrome": "D016511",
"immunodeficiencies": "D007153",
"glomerular or tubular dysfunction": "D007674",
"liver disease": "D008107",
"shock": "D012769",
"temporal lobe epilepsy": "D004833",
"anterior infarction": "D056988",
"ant-mi": "D056988",
"inferior infarction": "D056989",
"inf-mi": "D056989",
"heart disease": "D006331",
"downbeat nystagmus": "D009759",
"oscillopsia": "D015835",
"nystagmus": "D009759",
"cardiac failure": "D006333",
"infantile spasms": "D013036",
"drowsiness": "D006970",
"ataxia": "D001259",
"fatigue": "D005221",
"papillary necrosis": "D007681",
"pathological renal medullary lesions": "D058186",
"ventricular fibrillation": "D014693",
"cardiovascular dysfunction": "D002318",
"disturbances within the cardiovascular system": "D002318",
"metabolic disturbances": "D008659",
"cardiomyopathic disorder": "D009202",
"glomerular sclerosis": "D007674",
"mesangial dysfunction": "D007674",
"proteinuric": "D011507",
"sclerosis": "D012598",
"tubular necrosis": "D007683",
"renal tubular necrosis": "D007683",
"pseudomonas infections": "D011552",
"leukocytosis": "D007964",
"leukopenia": "D007970",
"sepsis": "D018805",
"loss of consciousness": "D014474",
"toxocity": "D064420",
"salivation": "D012798",
"muscle tremors": "D014202",
"mastodynia": "D059373",
"bacteremias": "D016470",
"abdominal infections": "D059413",
"urinary tract infections": "D014552",
"staphylococcal infections": "D013203",
"fungal infections": "D009181",
"cmv infection": "D003586",
"epstein barr virus infection": "D020031",
"diabetes insipidus": "D003919",
"toxic hepatitis": "D056486",
"toxic liver damage": "D056486",
"atrial thrombosis": "D003328",
"cardiac hypertrophy": "D006332",
"myocardial degeneration": "D009202",
"alternating sinus rhythm": "D001146",
"sinoatrial block": "D012848",
"sinoatrial (s-a) block": "D012848",
"alternating rhythm": "D001146",
"s-a block": "D012848",
"conduction disorder": "D019955",
"immunocytoma": "-1",
"albuminuria": "D000419",
"ascites": "D001201",
"hydrothorax": "D006876",
"apnoea": "D001049",
"neuromuscular blockade": "D020879",
"hyperactivity": "D006948",
"stereotypies": "D019956",
"accelerated junctional rhythms": "D001145",
"supraventricular tachyarrhythmias": "D013617",
"ovarian cancer": "D010051",
"hematologic toxicity": "D006402",
"gastrointestinal toxicity": "D005767",
"dissecting aneurysm": "D000784",
"locked-in syndrome": "D011782",
"chronic active hepatitis": "D006521",
"anginal attacks": "D000787",
"tachycardial": "D013610",
"renal tubular acidosis": "D000141",
"acute cholecystitis": "D041881",
"hepatic insufficiency syndrome": "D048550",
"thrombopenia": "D013921",
"hematological disorders": "D006402",
"pains": "D010146",
"hernias": "D006547",
"amblyopia": "D000550",
"tonic pupillary": "D015845",
"leg cramps": "D009120",
"jaw stiffness": "D014313",
"prolonged jaw rigidity": "D014313",
"gouty arthritis": "D015210",
"hyporeninemic hypoaidosteronism": "D006994",
"diabetes mellitus": "D003920",
"swelling": "D004487",
"respiratory upset": "D012140",
"psychoses": "D011605",
"motor disability": "D009069",
"dystonic": "D020821",
"choreic": "D002819",
"mid-dose dyskinesias": "D004409",
"drug toxicity": "D064420",
"liver disorders": "D008107",
"blood disorders": "D006402",
"skin disorders": "D012871",
"renal disorders": "D007674",
"erythema multiforme": "D004892",
"stevens-johnson syndrome": "D013262",
"toxic epidermal necrolysis": "D013262",
"bradydysrhythmias": "D001919",
"cardiovascular toxicity": "D002318",
"central nervous system toxicity": "D020258",
"non-small cell lung cancer": "D002289",
"arthralgia": "D018771",
"sensory neuropathy": "D012678",
"hematologic toxicities": "D006402",
"renal dysfunction": "D007674",
"cirrhosis": "D005355",
"cirrhotic": "D005355",
"angio-oedema": "D000799",
"urticaria": "D014581",
"circulatory failure": "D012769",
"myocardial dysfunction": "D009202",
"myocarditis": "D009205",
"eosinophilic": "D004802",
"cardiogenic shock": "D012770",
"dysrhythmias": "D001145",
"cardiac arrest": "D006323",
"atrioventricular (av) block": "D054537",
"hyperaldosteronism": "D006929",
"av block": "D054537",
"audiogenic tonic convulsions": "D020195",
"cluster headache": "D003027",
"ischaemic heart disease": "D017202",
"prinzmetal's angina": "D000788",
"tubular damage": "D007674",
"hypertrophic hearts": "D006332",
"hematoma": "D006406",
"coagulopathy": "D001778",
"idiopathic cardiomyopathy": "D002311",
"congenital heart disease": "D006331",
"hypertrophic cardiomyopathy": "D002312",
"valvular heart disease": "D006349",
"cytomegalovirus infections": "D003586",
"atherosclerosis": "D050197",
"lymphoproliferative disorder": "D008232",
"movement difficulties": "D020820",
"cognitive disorders": "D003072",
"hyperkinetic": "D006948",
"pulmonary edema": "D011654",
"lymphomas": "D008223",
"diarrhea": "D003967",
"metabolic acidosis": "D000138",
"adult respiratory distress syndrome": "D012128",
"cardiac injury": "D006331",
"ischemic lesions": "D007511",
"coronary artery spasm": "D003329",
"hypertrophic": "D006984",
"vt": "D017180",
"milk-alkali syndrome": "D006934",
"hypoparathyroidism": "D007011",
"peptic ulcer disease": "D010437",
"ulcer": "D014456",
"alkalosis": "D000471",
"hypercalcemic emergency": "D006934",
"serotonin syndrome": "D020230",
"unipolar depression": "D003866",
"nms": "D009459",
"ss": "D020230",
"pituitary tumors": "D010911",
"bladder irritation": "D001745",
"irritation of the urinary tract": "D014570",
"coronary stenosis": "D023921",
"coronary artery stenosis": "D023921",
"stenosis": "D003251",
"glomerular disease": "D007674",
"growth failure": "D006130",
"glomerular hypertrophy": "D007674",
"tubulointerstitial injury": "-1",
"tubulointerstitial damage": "D007674",
"learning and post-training consolidation deficits": "D007859",
"hypercapnic": "D006935",
"hepatitis b": "D006509",
"venous thromboembolism": "D054556",
"vte": "D054556",
"deep-vein thrombosis": "D020246",
"venous thrombosis": "D020246",
"tonic and clonic seizure": "D012640",
"neuronal death": "D009410",
"ventricular tachyarrhythmias": "D017180",
"dilated cardiomyopathy": "D002311",
"ventricular tachyarrhythmia": "D017180",
"cardiac disease": "D006331",
"loss of myelination": "D003711",
"occlusion of renal vessels": "-1",
"thromboembolic": "D013923",
"pupillary oscillation": "D011681",
"postoperative nausea and vomiting": "D020250",
"major depression disorder": "D003865",
"depressive episode": "D003866",
"depression disorder": "D003866",
"cardiac dysrhythmias": "D001145",
"hyperosmolar nonketotic coma": "D006944",
"manic depression": "D001714",
"hyperosmolar, nonketotic coma": "D006944",
"polydipsia": "D059606",
"hyperglycaemia": "-1",
"polyuric": "D011141",
"type 2 diabetes": "D003924",
"dehydration": "D003681",
"deterioration of left ventricular (lv) systolic function": "D018487",
"deterioration of lv systolic and diastolic performance": "D018487",
"thromboembolism": "D013923",
"osteoporosis": "D010024",
"eosinophilia": "D004802",
"skin reactions": "D003875",
"allergic reactions": "D004342",
"alopecia": "D000505",
"ureteric obstruction": "D014517",
"ureteric calculi": "D014514",
"hiv-infected": "D015658",
"urolithiasis": "D052878",
"ureteric stones": "D014514",
"hiv infection": "D015658",
"ischemic colitis": "D017091",
"coronary vasospasm": "D003329",
"migraine": "D008881",
"bradykinesia": "D018476",
"rigidity": "D009127",
"homonymous hemianopsia": "D006423",
"multicystic encephalomalacia": "D004678",
"hypoxic-ischemic encephalopathy": "D020925",
"intrahepatic cholestasis": "D002780",
"psoriasis": "D011565",
"psoriatic": "D011565",
"gingival hyperplasia": "D005885",
"ventricular arrhythmias": "D001145",
"dyspnea": "D004417",
"macro-papular rash": "D003875",
"allergy": "D004342",
"hypoactivity": "D009069",
"learning impairment": "D007859",
"impairment of learning": "D007859",
"impairment of memory": "D008569",
"liver toxicity": "D056486",
"myocardial ischaemia": "D017202",
"ischaemia": "D007511",
"stable angina": "D060050",
"myocardial stunning": "D017682",
"pulmonary hypertension": "D006976",
"primary pulmonary hypertension": "D006976",
"pph": "D006976",
"priapism": "D011317",
"ischemic optic neuropathies": "D018917",
"optic nerve toxicity": "D009901",
"bradyarrhythmia": "D001919",
"hypercalcemias": "D006934",
"acute nephrotoxicity": "D058186",
"toxoplasmosis": "D014123",
"oliguria": "D009846",
"renal calculi": "D007669",
"ureteral lithiasis": "D052878",
"downbeating nystagmus": "D009759",
"ovarian cancers": "D010051",
"fallopian tube cancers": "D005185",
"carcinoma of the peritoneum": "D010534",
"erythrodysesthesia": "D060831",
"hand-foot syndrome": "D060831",
"stomatitis": "D013280",
"peritoneal carcinoma": "D010534",
"bipolar mania": "D001714",
"extrapyramidal symptom": "D001480",
"eps": "D001480",
"epss": "D001480",
"pupil dilation": "D015878",
"pupillary dilation": "D015878",
"isotretinoin embryopathy": "C535670",
"anotia": "D065817",
"taussig-bing malformation": "D004310",
"stress incontinence": "D014550",
"urinary stress incontinence": "D014550",
"a significant rise in systolic blood pressure": "D006973",
"chf": "D006333",
"cardiotoxity": "D066126",
"cocaine abuse": "D019970",
"ethanol abuse": "D000437",
"abuse of cocaine": "D019970",
"abuse of ethanol": "D000437",
"decrease in cardiac output": "D002303",
"myocardial depression": "D009202",
"parkinsonism": "D010302",
"irregular heartbeat": "D001145",
"irregular heart beat": "D001145",
"graves' disease": "D006111",
"vasculitic disorders": "D014652",
"oral ulcers": "D019226",
"polyarthralgia": "D018771",
"abnormal left ventricular filling": "D018487",
"myocardial disease": "D009202",
"long-qt syndrome": "D008133",
"preterm infants": "D007235",
"cysts": "D003560",
"subependymal cysts": "D001927",
"premature (< 36 weeks of gestation) infants": "D007235",
"subependymal cyst": "D001927",
"cyst": "D003560",
"ototoxicity": "D006311",
"squamous cell carcinoma": "D002294",
"carcinoma of the cervix": "D002583",
"cervical carcinoma": "D002583",
"neutropenic": "D009503",
"emesis": "D014839",
"mucositis": "D052016",
"constipation": "D003248",
"antibiomania": "D001714",
"ocular dyskinesias": "D015835",
"choreatic dyskinesias": "D002819",
"dysmenorrhea": "D004412",
"pelvic pain": "D017699",
"low back pain": "D017116",
"glomerular injury": "D007674",
"nephrotic": "D009404",
"neprotic": "D009401",
"hypoxaemia": "D000860",
"patent ductus arteriosus": "D004374",
"syndrome of inappropriate secretion of anti-diuretic hormone": "D007177",
"interstitial cystitis": "D018856",
"gastric cancer": "D013274",
"agc": "D013274",
"vomitus": "D014839",
"hemolytic-uremic syndrome": "D006463",
"hus": "D006463",
"leukocyturia": "-1",
"human immunodeficiency virus type 1-infected": "D015658",
"impairment of the renal function": "D007674",
"nephrolithiasis": "D053040",
"myocardial necrosis": "D009202",
"cardiac death": "D003643",
"interstitial nephritis": "D009395",
"ain": "D009395",
"retinal vein occlusion": "D012170",
"chronic renal failure": "D007676",
"crf": "D007676",
"decreased the locomotor activity": "D004409",
"af": "D001281",
"mastocytosis": "D008415",
"haematological malignancies": "D019337",
"peripheral sensory neuropathy": "D010523",
"grand mal seizures": "D004830",
"myotonia congenita": "D009224",
"myotonic disorders": "D020967",
"mc": "D009224",
"sustained membrane depolarisation": "-1",
"muscle spasm": "D013035",
"muscle spasms": "D013035",
"myotonic condition": "D020967",
"myotonia": "D009222",
"ion channel disorders": "-1",
"malignant hyperthermia": "D008305",
"hyperventilation": "D006985",
"akinesia": "D004409",
"drug-induced dyskinesias": "D004409",
"retinoblastoma": "D012175",
"abnormal ocular motility": "D015835",
"rupture": "D012421",
"tuberculosis of the lung": "D014397",
"tuberculosis of lymph node": "D014388",
"visual impairment": "D014786",
"gustatory hyperhidrosis": "D013547",
"sweating": "D013547",
"hyperhidrosis": "D006945",
"dry mouth": "D014987",
"sore throat": "D010612",
"thrombophlebitis": "D013924",
"cardiac diseases": "D006331",
"cerebral anoxia": "D002534",
"visual field defects": "D014786",
"visual field defect": "D014786",
"deterioration in visual field": "D014786",
"dermatitis": "D003872",
"facial inflammatory dermatoses": "D005148",
"rosacea": "D012393",
"perioral dermatitis": "D019557",
"inflammatory facial dermatoses": "D005148",
"eczema": "D004485",
"periocular dermatitis": "D019557",
"atopic dermatitis": "D003876",
"papular rosacea": "D012393",
"structural deficits in the human brain": "D001930",
"metabolic abnormalities": "D008659",
"abnormalities in the cortex, hippocampus, white matter, and ventricles": "D001930",
"impaired memory performance": "D008569",
"gliosis": "D005911",
"brain injury": "D001930",
"hepatomegaly": "D006529",
"hepatoma": "D006528",
"hepatotoxic": "D056486",
"hypercholesterolemia": "D006937",
"dyslipidemia": "D050171",
"psychological disturbance": "D001008",
"neurogenic diabetes insipidus": "D018500",
"di": "D003919",
"traumatic brain injury": "D001930",
"nephrogenic di": "D018500",
"neurogenic di": "D018500",
"brain trauma": "D001930",
"chronic hepatitis c": "D019698",
"mitochondrial injury": "D028361",
"mitochondrial oxidative damage": "D028361",
"coronary spasm": "D003329",
"ventricular dysfunction": "D018754",
"oral stereotypies": "D009062",
"swallowing abnormalities": "D020447",
"gastrointestinal abnormalities": "D005767",
"tubulointerstitial nephritis": "D009395",
"schizoaffective disorder": "D011618",
"insulin sensitivity": "D007333",
"insulin resistance": "D007333",
"cerebral ischemia": "D002545",
"middle cerebral artery occlusion": "D020244",
"ischemic brain injury": "D001930",
"htn": "D006973",
"ischemic hemisphere": "D002545",
"strokes": "D020521",
"cutaneous reactions": "D003875",
"erythema": "D004890",
"angioedema": "D000799",
"esrd": "D007676",
"prolonged qt syndrome": "D008133",
"hypocalcaemia": "D006996",
"menopausal symptoms": "D008594",
"transient ischaemic attacks": "D002546",
"colorectal cancer": "D015179",
"endometrial cancer": "D016889",
"gallbladder disease": "D005705",
"fractures": "D050723",
"colon cancer": "D003110",
"chronic disease": "D002908",
"dili": "D056486",
"fulminant hepatic failure": "D017114",
"myocardiopathy": "D009202",
"renal tubular injury": "D007674",
"desquamation": "-1",
"kidney damage": "D007674",
"restless legs syndrome": "D012148",
"rls": "D012148",
"postprandial dyspepsia": "D004415",
"coronary arterial disease": "D003324",
"coronary heart disease": "D003327",
"coronary arteriosclerosis": "D003324",
"hyperglyceridemic effect": "D050171",
"precordial pain": "D002637",
"right bundle branch block": "D002037",
"cardiac symptoms": "D009461",
"cardiac damage": "D006331",
"cardiomyocyte damage": "D009202",
"afl": "D001282",
"lv hypertrophy": "D006332",
"cardiac inotropic": "D006331",
"long qt syndrome": "D008133",
"qt interval prolongation": "D008133",
"arterial dysfunction": "D018754",
"extrapyramidal syndrome": "D001480",
"schizophrenic": "D012559",
"impairs associative learning": "D007859",
"impaired novel word learning": "D007859",
"polyarteritis nodosa": "D010488",
"palmoplantar pustulosis": "D011565",
"testicular pain": "D013733",
"cirrhotic diseases": "D008103",
"autistic disorder": "D001321",
"autism": "D001321",
"headaches": "D006261",
"behavioral dyscontrol": "D002653",
"inattention": "D019958",
"schizophreniform disorder": "D011618",
"visual field loss": "D014786",
"visual field abnormalities": "D014786",
"atrioventricular block": "D054537",
"carcinoma of the breast": "D001943",
"bradycardic": "D001919",
"ami": "D009203",
"gi bleeding": "D006471",
"severe malaria": "D016778",
"premature ventricular contraction": "D018879",
"chill": "D023341",
"tinnitus": "D014012",
"loss of hearing": "D034381",
"pvc": "D018879",
"heart diseases": "D006331",
"electrolyte disorder": "D014883",
"lichenoid dermatitis": "D017512",
"wilson disease": "D006527",
"wilson's disease": "D006527",
"hepatic disorders": "D056486",
"neurologic disorders": "D009422",
"chronic liver disease": "D008107",
"skin lesion": "D012871",
"drop in blood pressure": "D007022",
"syncopal episode": "D013575",
"acute encephalopathy": "D020803",
"cerebral vasospasm": "D020301",
"all": "D054198",
"aphasia": "D001037",
"incontinence": "D014549",
"essential hypertension": "C562386",
"pb poisoning": "D007855",
"impairments in learning": "D007859",
"impairments in attention, inhibitory control, and arousal regulation": "D019958",
"cognitive dysfunction": "D003072",
"affective dysfunction": "D019964",
"major depression": "D003865",
"mental disorders": "D001523",
"md": "D003865",
"panic attack": "D016584",
"failing left ventricle": "D006333",
"mitral regurgitation": "D008944",
"mr": "D008944",
"tonic-clonic seizure": "D004830",
"gtcs": "D004830",
"bronchiectasis": "D001987",
"secondary infection": "D060085",
"organic brain lesions": "-1",
"uremic": "D006463",
"vocal cord paralysis": "D014826",
"vocal nerve palsies": "D014826",
"paralysis": "D010243",
"cortical dysplasia": "D054220",
"acute tubulo-interstitial nephritis": "C564356",
"atin": "C564356",
"tubulo-interstitial injury": "D009395",
"glomerulonephritis": "D005921",
"staphylococcal": "D013203",
"endocarditis": "D004696",
"tuberculosis": "D014376",
"infective endocarditis": "D004696",
"ie": "D004696",
"chronic hepatitis b virus infection": "D019694",
"chronic hepatitis b": "D019694",
"postoperative pain": "D010149",
"tissue injury": "D017695",
"urinary bladder inflammation": "D003556",
"bladder inflammation": "D003556",
"lactic acidosis": "D000140",
"immune reconstitution syndrome": "D054019",
"sensory neurotoxicity": "D010523",
"sensory axonal neuropathy": "D010523",
"cutaneous lupus erythematosus": "D008178",
"cle": "D008178",
"cramps": "D009120",
"pulmonary mass": "D055370",
"membranous glomerulonephritis": "D015433",
"lung mass": "D055370",
"hemosiderin": "D006486",
"hemosiderotic": "D006486",
"autoimmune diseases": "D001327",
"viral hepatitis": "D006525",
"neoplasms": "D009369",
"pulmonary lesion": "D055370",
"neoplasm": "D009369",
"cad": "D003324",
"type 2 diabetic": "D003924",
"uraemia": "D014511",
"focal segmental glomerulosclerosis": "D005923",
"myocardial infarctions": "D009203",
"cardiac arrhythmias": "D001145",
"glioblastoma": "D005909",
"glioblastoma multiforme": "D005909",
"gbm": "D005909",
"lymphopenia": "D008231",
"hyperparathyroidism": "D006961",
"primary hyperparathyroidism": "D049950",
"nonalcoholic fatty liver disease": "D065626",
"nafld": "D065626",
"obesity": "D009765",
"hyperinsulinemia": "D006946",
"anca positive vasculitis": "D056648",
"antineutrophil cytoplasmic antibody (anca)--associated vasculitis": "D056648",
"vasculitic": "D014657",
"arthritis": "D001168",
"pyrexia": "D005334",
"parotiditis": "D010309",
"cerebrovascular disease": "D002561",
"cvd": "D002318",
"gastrointestinal bleeding": "D006471",
"hepatic impairment": "D008107",
"heroin abuse": "D006556",
"unconsciousness": "D014474",
"cerebral ischemic infarction": "D002544",
"hyperlocomotion": "D009069",
"fulminant liver failure": "D017114",
"nsclc": "D002289",
"non-small-cell lung cancer": "D002289",
"pneumonitis": "D011014",
"posterior reversible encephalopathy syndrome": "D054038",
"posterior leukoencephalopathy": "D054038",
"alcoholic cirrhosis": "D008104",
"cerebral edema": "D001929",
"intracranial hypertension": "D019586",
"fhf": "D017114",
"binasal visual field defects": "D014786",
"visual defects": "D014786",
"bilateral visual field abnormalities": "D014786",
"retinal toxicity": "D012164",
"hiv seroconversion": "D006679",
"memory deficits": "D008569",
"hiv disease": "D015658",
"se": "D013226",
"deterioration of learning": "D007859",
"deterioration of short-term memory": "D008569",
"fulminant eosinophilic": "D004802",
"neuropsychiatric symptoms": "D001523",
"neuropsychiatric-like behaviors": "D001523",
"parkinsonian disability": "D020734",
"neuropsychiatric-like behavior": "D001523",
"neuropsychiatric disorders": "D001523",
"alf": "D017114",
"cld": "D008107",
"neuropathic pain": "D009437",
"diabetic neuropathy": "D003929",
"neuropathic": "D009422",
"adductor spasmodic dysphonia": "D014826",
"adsd": "D014826",
"throat pain": "D010612",
"spasmodic dysphonia": "D055154",
"mitochondrial impairment": "D028361",
"mitochondrial abnormalities": "D028361",
"mitochondrial defect": "D028361",
"acute leukemia": "D015470",
"hemoglobinuria": "D006456",
"type 1 diabetes": "D003922",
"diabetes complications": "D048909",
"mantle cell lymphoma": "D020522",
"mcl": "D020522",
"b-cell non-hodgkin's lymphoma": "D008228",
"renal disturbance": "D007674",
"musculoskeletal pain": "D059352",
"meningitis": "D008581",
"bacteremia": "D016470",
"alzheimer's disease": "D000544",
"memory deterioration": "D008569",
"glucose hypometabolism": "D018149",
"ad": "D000544"
}
}
|
BioGPT/data/BC5CDR/raw/test.entities.json/0
|
{
"file_path": "BioGPT/data/BC5CDR/raw/test.entities.json",
"repo_id": "BioGPT",
"token_count": 47007
}
| 144 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
MODEL_DIR=../../checkpoints/QA-PubMedQA-BioGPT
MODEL=checkpoint.pt
DATA_DIR=${PWD}/../../data/PubMedQA/pqal_qcl_ansis-bin
BASE_DATA_DIR=${DATA_DIR%/*}
BIN_DATA_DIR=${DATA_DIR##*/}
DATA_PREFIX=${BIN_DATA_DIR%-*}
RAW_DATA_DIR=${BASE_DATA_DIR}/raw
OUTPUT_FILE=generate_${MODEL}
INPUT_FILE=${RAW_DATA_DIR}/${DATA_PREFIX}_test.tok.bpe.x
OUTPUT_FILE=${MODEL_DIR}/${OUTPUT_FILE}
GOLD_FILE=${RAW_DATA_DIR}/test_ground_truth.json
# inference
if [ ! -f "$OUTPUT_FILE" ]; then
echo "Begin inferencing ${INPUT_FILE} using ${MODEL_DIR}/${MODEL}"
python ../../inference.py --data_dir=${DATA_DIR} --model_dir=${MODEL_DIR} --model_file=${MODEL} --src_file=${INPUT_FILE} --output_file=${OUTPUT_FILE}
fi
# debpe
sed -i "s/@@ //g" ${OUTPUT_FILE}
# detok
perl ${MOSES}/scripts/tokenizer/detokenizer.perl -l en -a < ${OUTPUT_FILE} > ${OUTPUT_FILE}.detok
# postprocess
python postprocess.py ${OUTPUT_FILE}.detok
# eval
python hard_match_evaluation.py ${OUTPUT_FILE}.detok.extracted.txt ${GOLD_FILE}
|
BioGPT/examples/QA-PubMedQA/infer.sh/0
|
{
"file_path": "BioGPT/examples/QA-PubMedQA/infer.sh",
"repo_id": "BioGPT",
"token_count": 466
}
| 145 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import bitblas
from bitblas.base.roller.policy import TensorCorePolicy, DefaultPolicy
from bitblas.base.roller.arch import CUDA
from bitblas.gpu.matmul_analysis import get_tensorized_func_and_tags
from bitblas.gpu import Matmul
from bitblas.utils import auto_detect_nvidia_target
from bitblas.base.utils import apply_and_build
from bitblas.ops.impl.matmul_dequantize_impl import (
matmul_nt_dequantize_b,
matmul_nt_dequantize_b_propagate_a_propagate_b,
)
import tvm
import time
import argparse
parser = argparse.ArgumentParser(description="Benchmark BitBLAS int4 on a specific target.")
parser.add_argument(
"--target",
type=str,
default=auto_detect_nvidia_target(),
)
parser.add_argument(
"--batch_seq",
type=int,
default=1,
help="The batch size of the sequence",
)
parser.add_argument(
"--group_size",
type=int,
default=-1,
help="The group size of the sequence",
)
parser.add_argument(
"--benchmark_sets",
nargs="+",
default=["llm_shape_fp16xnf4"],
help="List of benchmark sets, e.g., llm_shape_fp16xnf4",
)
args = parser.parse_args()
group_size = args.group_size
# fmt:off
llm_shape_fp16xnf4 = [
# square test
(matmul_nt_dequantize_b, (1, 16384, 16384, "float16", "float16", "float16", 4, "int8", "nf",
True, False, group_size, False, False), Matmul),
# BLOOM-176B
(matmul_nt_dequantize_b, (1, 43008, 14336, "float16", "float16", "float16", 4, "int8", "nf",
True, False, group_size, False, False), Matmul),
(matmul_nt_dequantize_b, (1, 14336, 14336, "float16", "float16", "float16", 4, "int8", "nf",
True, False, group_size, False, False), Matmul),
(matmul_nt_dequantize_b, (1, 57344, 14336, "float16", "float16", "float16", 4, "int8", "nf",
True, False, group_size, False, False), Matmul),
(matmul_nt_dequantize_b, (1, 14336, 57344, "float16", "float16", "float16", 4, "int8", "nf",
True, False, group_size, False, False), Matmul),
# # OPT-65B
(matmul_nt_dequantize_b, (1, 9216, 9216, "float16", "float16", "float16", 4, "int8", "nf", True,
False, group_size, False, False), Matmul),
(matmul_nt_dequantize_b, (1, 36864, 9216, "float16", "float16", "float16", 4, "int8", "nf",
True, False, group_size, False, False), Matmul),
(matmul_nt_dequantize_b, (1, 9216, 36864, "float16", "float16", "float16", 4, "int8", "nf",
True, False, group_size, False, False), Matmul),
(matmul_nt_dequantize_b, (1, 22016, 8192, "float16", "float16", "float16", 4, "int8", "nf",
True, False, group_size, False, False), Matmul),
# LLAMA-70B/65B
(matmul_nt_dequantize_b, (1, 8192, 22016, "float16", "float16", "float16", 4, "int8", "nf",
True, False, group_size, False, False), Matmul),
(matmul_nt_dequantize_b, (1, 8192, 8192, "float16", "float16", "float16", 4, "int8", "nf", True,
False, group_size, False, False), Matmul),
(matmul_nt_dequantize_b, (1, 28672, 8192, "float16", "float16", "float16", 4, "int8", "nf",
True, False, group_size, False, False), Matmul),
(matmul_nt_dequantize_b, (1, 8192, 28672, "float16", "float16", "float16", 4, "int8", "nf",
True, False, group_size, False, False), Matmul),
# square test
(matmul_nt_dequantize_b_propagate_a_propagate_b,
(16384, 16384, 16384, "float16", "float16", "float16", 4, "int8", "nf", True, False,
group_size, False, False), Matmul),
# BLOOM-176B
(matmul_nt_dequantize_b_propagate_a_propagate_b,
(8192, 43008, 14336, "float16", "float16", "float16", 4, "int8", "nf", True, False, group_size,
False, False), Matmul),
(matmul_nt_dequantize_b_propagate_a_propagate_b,
(8192, 14336, 14336, "float16", "float16", "float16", 4, "int8", "nf", True, False, group_size,
False, False), Matmul),
(matmul_nt_dequantize_b_propagate_a_propagate_b,
(8192, 57344, 14336, "float16", "float16", "float16", 4, "int8", "nf", True, False, group_size,
False, False), Matmul),
(matmul_nt_dequantize_b_propagate_a_propagate_b,
(8192, 14336, 57344, "float16", "float16", "float16", 4, "int8", "nf", True, False, group_size,
False, False), Matmul),
# OPT-65B
(matmul_nt_dequantize_b_propagate_a_propagate_b,
(8192, 9216, 9216, "float16", "float16", "float16", 4, "int8", "nf", True, False, group_size,
False, False), Matmul),
(matmul_nt_dequantize_b_propagate_a_propagate_b,
(8192, 36864, 9216, "float16", "float16", "float16", 4, "int8", "nf", True, False, group_size,
False, False), Matmul),
(matmul_nt_dequantize_b_propagate_a_propagate_b,
(8192, 9216, 36864, "float16", "float16", "float16", 4, "int8", "nf", True, False, group_size,
False, False), Matmul),
(matmul_nt_dequantize_b_propagate_a_propagate_b,
(8192, 22016, 8192, "float16", "float16", "float16", 4, "int8", "nf", True, False, group_size,
False, False), Matmul),
# LLAMA-70B/65B
(matmul_nt_dequantize_b_propagate_a_propagate_b,
(8192, 8192, 22016, "float16", "float16", "float16", 4, "int8", "nf", True, False, group_size,
False, False), Matmul),
(matmul_nt_dequantize_b_propagate_a_propagate_b,
(8192, 8192, 8192, "float16", "float16", "float16", 4, "int8", "nf", True, False, group_size,
False, False), Matmul),
(matmul_nt_dequantize_b_propagate_a_propagate_b,
(8192, 28672, 8192, "float16", "float16", "float16", 4, "int8", "nf", True, False, group_size,
False, False), Matmul),
(matmul_nt_dequantize_b_propagate_a_propagate_b,
(8192, 8192, 28672, "float16", "float16", "float16", 4, "int8", "nf", True, False, group_size,
False, False), Matmul),
]
target = tvm.target.Target(args.target)
benchmark_sets = []
for benchmark_set in args.benchmark_sets:
benchmark_sets.extend(eval(benchmark_set))
benchmark_results = {}
# fmt:on
target = tvm.target.Target(auto_detect_nvidia_target())
benchmark_results = {}
for get_prim_func, input_args, d_schedule in benchmark_sets:
ir_module = get_prim_func(*input_args)
func = ir_module["main"]
arch = CUDA(target)
policy = DefaultPolicy(func=func, arch=arch)
try:
tensorized_func, tags = get_tensorized_func_and_tags(func, arch.target)
except Exception:
tags = None
if tags:
policy = TensorCorePolicy(func=tensorized_func, arch=arch, tags=tags)
configs = policy.emit_config(20)
tune_start = time.time()
cpresults, best = apply_and_build(func, configs, arch, parallel_build=True)
fast_tune_time = time.time() - tune_start
print("[BitBLAS] The best latency of top 1 is {:.3f} ms".format(cpresults[0].latency))
print("[BitBLAS] The best latency of top 20 is {:.3f} ms".format(best.latency))
# evaluate the performance of the default schedule
rule = d_schedule()
default_tune_start = time.time()
with arch.target:
mod = bitblas.ApplyDefaultSchedule( # pylint: disable=not-callable
bitblas.gpu.Matmul(),
bitblas.gpu.GEMV(),
bitblas.gpu.Reduction(),
bitblas.gpu.GeneralReduction(),
bitblas.gpu.Fallback(),
)(
ir_module)
try:
with tvm.transform.PassContext(config={"tir.use_async_copy": True}):
mod_default = tvm.build(mod, target="cuda")
except Exception:
mod_default = None
default_tune_time = time.time() - default_tune_start
args = func.buffer_map.values()
profile_tensors = best.profile_tensors
if mod_default is not None:
timer_cuda_mod = mod_default.time_evaluator(mod_default.entry_name, arch.device, number=5)
t = timer_cuda_mod(*profile_tensors).mean
else:
t = 1e4 - 1
print("Time cost of BitBLAS default schedule: {:.3f} ms".format(t * 1e3))
profile_config = {
f"{get_prim_func.__name__}-{'-'.join([str(i) for i in input_args])}": {
"fast_bitblas_top20_tune_time": fast_tune_time,
"fast_bitblas_top1_latency": cpresults[0].latency,
"fast_bitblas_top20_latency": best.latency,
"default_bitblas_tune_time": default_tune_time,
"default_bitblas_latency": t * 1e3 if t is not None else "Failed",
}
}
benchmark_results.update(profile_config)
headers = [
"PrimFunc",
"Input Arguments",
"BitBLAS Top20 Tune Time",
"BitBLAS Top1 Latency",
"BitBLAS Top20 Latency",
"DefaultDLight Tune Time",
"DefaultDLight Latency",
]
col_width = (max(len(word) for row in [headers] + list(profile_config.values()) for word in row) + 2
) # padding
print("".join(word.ljust(col_width) for word in headers))
print("-" * col_width * len(headers))
for config, values in benchmark_results.items():
args = config.split("-")
func_name = args[0]
input_args = "-".join(args[1:])
row = [
func_name,
input_args,
f" {str(values['fast_bitblas_top20_tune_time'])} s",
f"{values['fast_bitblas_top1_latency']:.3f} ms",
f"{values['fast_bitblas_top20_latency']:.3f} ms",
str(values["default_bitblas_tune_time"]),
f"{values['default_bitblas_latency']:.3e} ms",
]
print("".join(word.ljust(col_width) for word in row))
|
BitBLAS/benchmark/dsl/matmul_dequantize_af.py/0
|
{
"file_path": "BitBLAS/benchmark/dsl/matmul_dequantize_af.py",
"repo_id": "BitBLAS",
"token_count": 4474
}
| 146 |
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
#include <cuda_runtime.h>
#include <assert.h>
#include "ladder_kernel.h"
#include "mma.h"
// nvcc ladder_kernel.cu -gencode arch=compute_80,code=sm_80
__global__ void __launch_bounds__(128) bitblas_kernel_fp16_int2_fp16_m1n15360k5120_nt(half* __restrict__ A, half* __restrict__ QB, half* __restrict__ D) {
signed char* B = ((int8_t *)QB);
half* Scale = (half *)((int8_t *)QB + 19660800);
half* Zeros = (half *)((int8_t *)QB + 20889600);
// const dim3 GridDim(15360, 1, 1);
// const dim3 BlockDim(128, 1, 1);
// bitblas_kernel_fp16_int2_fp16_m1n15360k5120_nt<<<GridDim, BlockDim>>>(input_0, input_1, output);
half in_thread_C_local[1];
char2 B_local[1];
half B_decode_local[8];
half A_local[8];
__shared__ half red_result[1];
in_thread_C_local[0] = __float2half_rn(0.000000e+00f);
for (int ax1_0 = 0; ax1_0 < 5; ++ax1_0) {
B_local[0] = *(char2*)(B + (((((int)blockIdx.x) * 1280) + (ax1_0 * 256)) + (((int)threadIdx.x) * 2)));
decode_i2u_to_f16_scale_zeros_original(B_local, B_decode_local, (&(Scale[(((((int)blockIdx.x) * 40) + (ax1_0 * 8)) + (((int)threadIdx.x) >> 4))])), (&(Zeros[(((((int)blockIdx.x) * 40) + (ax1_0 * 8)) + (((int)threadIdx.x) >> 4))])), 8);
*(uint4*)(A_local + 0) = *(uint4*)(A + ((ax1_0 * 1024) + (((int)threadIdx.x) * 8)));
for (int ax1_2_0 = 0; ax1_2_0 < 4; ++ax1_2_0) {
for (int ax1_2_1 = 0; ax1_2_1 < 2; ++ax1_2_1) {
in_thread_C_local[0] = (in_thread_C_local[0] + (A_local[((ax1_2_0 * 2) + ax1_2_1)] * B_decode_local[((ax1_2_0 * 2) + ax1_2_1)]));
}
}
}
half red_buf0[1];
uint mask[1];
half t0[1];
half red_buf0_1[1];
uint mask_1[1];
half t0_1[1];
__shared__ half red_buf_staging[4];
red_buf0_1[0] = in_thread_C_local[0];
mask_1[0] = __activemask();
t0_1[0] = __shfl_down_sync(mask_1[0], red_buf0_1[0], 16, 32);
red_buf0_1[0] = (red_buf0_1[0] + t0_1[0]);
t0_1[0] = __shfl_down_sync(mask_1[0], red_buf0_1[0], 8, 32);
red_buf0_1[0] = (red_buf0_1[0] + t0_1[0]);
t0_1[0] = __shfl_down_sync(mask_1[0], red_buf0_1[0], 4, 32);
red_buf0_1[0] = (red_buf0_1[0] + t0_1[0]);
t0_1[0] = __shfl_down_sync(mask_1[0], red_buf0_1[0], 2, 32);
red_buf0_1[0] = (red_buf0_1[0] + t0_1[0]);
t0_1[0] = __shfl_down_sync(mask_1[0], red_buf0_1[0], 1, 32);
red_buf0_1[0] = (red_buf0_1[0] + t0_1[0]);
if ((((int)threadIdx.x) % 32) == 0) {
red_buf_staging[(((int)threadIdx.x) >> 5)] = red_buf0_1[0];
}
__syncthreads();
if (((int)threadIdx.x) < 4) {
red_buf0[0] = red_buf_staging[((int)threadIdx.x)];
}
mask[0] = (__activemask() & (uint)15);
t0[0] = __shfl_down_sync(mask[0], red_buf0[0], 2, 32);
red_buf0[0] = (red_buf0[0] + t0[0]);
t0[0] = __shfl_down_sync(mask[0], red_buf0[0], 1, 32);
red_buf0[0] = (red_buf0[0] + t0[0]);
if (((int)threadIdx.x) == 0) {
((volatile half*)red_result)[0] = red_buf0[0];
}
__syncthreads();
if (((int)threadIdx.x) == 0) {
D[((int)blockIdx.x)] = (half)(((volatile half*)red_result)[0]);
}
}
__global__ void __launch_bounds__(128) bitblas_kernel_fp16_int2_fp16_m128n15360k5120_nt(half* __restrict__ A, half* __restrict__ QB, half* __restrict__ D) {
signed char* B = ((int8_t *)QB);
half* Scale = (half *)((int8_t *)QB + 19660800);
half* Zeros = (half *)((int8_t *)QB + 20889600);
// const dim3 GridDim(480, 1, 1);
// const dim3 BlockDim(32, 4, 1);
// bitblas_kernel_fp16_int2_fp16_m128n15360k5120_nt<<<GridDim, BlockDim>>>(input_0, input_1, output);
const int MAX_BLOCK_N = 10;
const auto baseBlockIdx = blockIdx.x + gridDim.x *blockIdx.y;
const auto totalPanel = (gridDim.x * gridDim.y +MAX_BLOCK_N * gridDim.x - 1) / (MAX_BLOCK_N * gridDim.x);
const auto totalBlock = gridDim.x * gridDim.y;
const auto panelIdx = baseBlockIdx / (MAX_BLOCK_N *gridDim.x);
const auto strideLd = panelIdx + 1 < totalPanel ?MAX_BLOCK_N : (totalBlock - panelIdx * (MAX_BLOCK_N *gridDim.x)) / gridDim.x;
const auto bx = (panelIdx & 1) ? gridDim.x -(baseBlockIdx - panelIdx * MAX_BLOCK_N * gridDim.x) /strideLd - 1 : (baseBlockIdx - panelIdx * MAX_BLOCK_N *gridDim.x) / strideLd;
const auto by = (baseBlockIdx - panelIdx * MAX_BLOCK_N *gridDim.x) % strideLd + panelIdx * MAX_BLOCK_N;
const auto bz = blockIdx.z;
const dim3 blockIdx(bx, by, bz);
half C_reindex_shared_warp[32];
__shared__ half A_reindex_shared[16384];
__shared__ signed char B_shared[1024];
__shared__ half B_decode_reindex_shared[2048];
char2 B_local[1];
uint4 B_decode_reindex_local[1];
half A_reindex_shared_warp[16];
half B_decode_reindex_shared_warp[16];
char2 B_local_1[1];
uint4 B_decode_reindex_local_1[1];
half A_reindex_shared_warp_1[16];
half B_decode_reindex_shared_warp_1[16];
for (int var = 0; var < 1; ++var) {
for (int ax1_0_3_init = 0; ax1_0_3_init < 2; ++ax1_0_3_init) {
for (int ax2_0_3_init = 0; ax2_0_3_init < 2; ++ax2_0_3_init) {
for (int i = 0; i < 8; ++i) {
C_reindex_shared_warp[((ax1_0_3_init * 16) + (ax2_0_3_init * 8)) + i] = 0.0;}
;
}
}
#pragma unroll
for (int ax0_ax1_ax2_fused_0 = 0; ax0_ax1_ax2_fused_0 < 8; ++ax0_ax1_ax2_fused_0) {
{
unsigned int addr;
#if TVM_ENBALE_EFFICIENT_SMEM_PTR_CAST
addr = static_cast<unsigned int>(__cvta_generic_to_shared((void *)(A_reindex_shared + ((((ax0_ax1_ax2_fused_0 * 1024) + (((int)threadIdx.y) * 256)) + ((((int)threadIdx.x) >> 3) * 64)) + (((((int)threadIdx.x) & 7) ^ (((((int)threadIdx.y) & 1) * 4) + (((int)threadIdx.x) >> 3))) * 8)))));
#else
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)(A_reindex_shared + ((((ax0_ax1_ax2_fused_0 * 1024) + (((int)threadIdx.y) * 256)) + ((((int)threadIdx.x) >> 3) * 64)) + (((((int)threadIdx.x) & 7) ^ (((((int)threadIdx.y) & 1) * 4) + (((int)threadIdx.x) >> 3))) * 8))))
);
#endif
__asm__ __volatile__(
#if TVM_ENABLE_L2_PREFETCH
"cp.async.cg.shared.global.L2::128B [%0], [%1], %2;"
#else
"cp.async.cg.shared.global [%0], [%1], %2;"
#endif
:: "r"(addr), "l"((void*)(A + ((((ax0_ax1_ax2_fused_0 * 81920) + (((int)threadIdx.y) * 20480)) + ((((int)threadIdx.x) >> 3) * 5120)) + ((((int)threadIdx.x) & 7) * 8)))), "n"(16)
);
}
}
#pragma unroll
for (int ax0_ax1_fused_0 = 0; ax0_ax1_fused_0 < 1; ++ax0_ax1_fused_0) {
if (((int)threadIdx.y) < 1) {
{
unsigned int addr;
#if TVM_ENBALE_EFFICIENT_SMEM_PTR_CAST
addr = static_cast<unsigned int>(__cvta_generic_to_shared((void *)(B_shared + (((int)threadIdx.x) * 16))));
#else
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)(B_shared + (((int)threadIdx.x) * 16)))
);
#endif
__asm__ __volatile__(
#if TVM_ENABLE_L2_PREFETCH
"cp.async.cg.shared.global.L2::128B [%0], [%1], %2;"
#else
"cp.async.cg.shared.global [%0], [%1], %2;"
#endif
:: "r"(addr), "l"((void*)(B + (((((int)blockIdx.x) * 40960) + (((int)threadIdx.y) * 40960)) + (((int)threadIdx.x) * 1280)))), "n"(16)
);
}
}
}
__asm__ __volatile__("cp.async.commit_group;");
for (int ax3_0_0 = 0; ax3_0_0 < 79; ++ax3_0_0) {
__syncthreads();
#pragma unroll
for (int ax0_ax1_ax2_fused_0_1 = 0; ax0_ax1_ax2_fused_0_1 < 8; ++ax0_ax1_ax2_fused_0_1) {
{
unsigned int addr;
#if TVM_ENBALE_EFFICIENT_SMEM_PTR_CAST
addr = static_cast<unsigned int>(__cvta_generic_to_shared((void *)(A_reindex_shared + (((((((ax3_0_0 + 1) & 1) * 8192) + (ax0_ax1_ax2_fused_0_1 * 1024)) + (((int)threadIdx.y) * 256)) + ((((int)threadIdx.x) >> 3) * 64)) + (((((int)threadIdx.x) & 7) ^ (((((int)threadIdx.y) & 1) * 4) + (((int)threadIdx.x) >> 3))) * 8)))));
#else
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)(A_reindex_shared + (((((((ax3_0_0 + 1) & 1) * 8192) + (ax0_ax1_ax2_fused_0_1 * 1024)) + (((int)threadIdx.y) * 256)) + ((((int)threadIdx.x) >> 3) * 64)) + (((((int)threadIdx.x) & 7) ^ (((((int)threadIdx.y) & 1) * 4) + (((int)threadIdx.x) >> 3))) * 8))))
);
#endif
__asm__ __volatile__(
#if TVM_ENABLE_L2_PREFETCH
"cp.async.cg.shared.global.L2::128B [%0], [%1], %2;"
#else
"cp.async.cg.shared.global [%0], [%1], %2;"
#endif
:: "r"(addr), "l"((void*)(A + ((((((ax0_ax1_ax2_fused_0_1 * 81920) + (((int)threadIdx.y) * 20480)) + ((((int)threadIdx.x) >> 3) * 5120)) + (ax3_0_0 * 64)) + ((((int)threadIdx.x) & 7) * 8)) + 64))), "n"(16)
);
}
}
#pragma unroll
for (int ax0_ax1_fused_0_1 = 0; ax0_ax1_fused_0_1 < 1; ++ax0_ax1_fused_0_1) {
if (((int)threadIdx.y) < 1) {
{
unsigned int addr;
#if TVM_ENBALE_EFFICIENT_SMEM_PTR_CAST
addr = static_cast<unsigned int>(__cvta_generic_to_shared((void *)(B_shared + ((((ax3_0_0 + 1) & 1) * 512) + (((int)threadIdx.x) * 16)))));
#else
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)(B_shared + ((((ax3_0_0 + 1) & 1) * 512) + (((int)threadIdx.x) * 16))))
);
#endif
__asm__ __volatile__(
#if TVM_ENABLE_L2_PREFETCH
"cp.async.cg.shared.global.L2::128B [%0], [%1], %2;"
#else
"cp.async.cg.shared.global [%0], [%1], %2;"
#endif
:: "r"(addr), "l"((void*)(B + (((((((int)blockIdx.x) * 40960) + (((int)threadIdx.y) * 40960)) + (((int)threadIdx.x) * 1280)) + (ax3_0_0 * 16)) + 16))), "n"(16)
);
}
}
}
__asm__ __volatile__("cp.async.commit_group;");
__asm__ __volatile__("cp.async.wait_group 1;");
__syncthreads();
for (int ax1_ax2_0_fused_0 = 0; ax1_ax2_0_fused_0 < 2; ++ax1_ax2_0_fused_0) {
B_local[0] = *(char2*)(B_shared + (((((ax3_0_0 & 1) * 512) + (ax1_ax2_0_fused_0 * 256)) + (((int)threadIdx.y) * 64)) + (((int)threadIdx.x) * 2)));
decode_i2u_to_f16_scale_zeros_original(B_local, B_decode_reindex_local, (&(Scale[(((((((int)blockIdx.x) * 1280) + (ax1_ax2_0_fused_0 * 640)) + (((int)threadIdx.y) * 160)) + ((((int)threadIdx.x) >> 3) * 40)) + (ax3_0_0 >> 1))])), (&(Zeros[(((((((int)blockIdx.x) * 1280) + (ax1_ax2_0_fused_0 * 640)) + (((int)threadIdx.y) * 160)) + ((((int)threadIdx.x) >> 3) * 40)) + (ax3_0_0 >> 1))])), 8);
*(uint4*)(B_decode_reindex_shared + ((((ax1_ax2_0_fused_0 * 1024) + (((int)threadIdx.y) * 256)) + ((((int)threadIdx.x) >> 3) * 64)) + (((((int)threadIdx.x) & 7) ^ (((((int)threadIdx.y) & 1) * 4) + (((int)threadIdx.x) >> 3))) * 8))) = B_decode_reindex_local[0];
}
__syncthreads();
for (int ax3_0_1 = 0; ax3_0_1 < 4; ++ax3_0_1) {
for (int ax1_0 = 0; ax1_0 < 2; ++ax1_0) {
{
unsigned int addr;
#if TVM_ENBALE_EFFICIENT_SMEM_PTR_CAST
addr = static_cast<unsigned int>(__cvta_generic_to_shared((void *)((&(A_reindex_shared[((((((ax3_0_0 & 1) * 8192) + (((int)threadIdx.y) * 2048)) + (ax1_0 * 1024)) + ((((int)threadIdx.x) & 15) * 64)) + ((((ax3_0_1 * 2) + (((int)threadIdx.x) >> 4)) ^ (((int)threadIdx.x) & 7)) * 8))])) + 0)));
#else
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)((&(A_reindex_shared[((((((ax3_0_0 & 1) * 8192) + (((int)threadIdx.y) * 2048)) + (ax1_0 * 1024)) + ((((int)threadIdx.x) & 15) * 64)) + ((((ax3_0_1 * 2) + (((int)threadIdx.x) >> 4)) ^ (((int)threadIdx.x) & 7)) * 8))])) + 0))
);
#endif
__asm__ __volatile__(
"ldmatrix.sync.aligned.m8n8.x4.shared.b16"
"{%0, %1, %2, %3}, [%4];\n"
: "=r"(((unsigned *)(A_reindex_shared_warp + (ax1_0 * 8)))[0]), "=r"(((unsigned *)(A_reindex_shared_warp + (ax1_0 * 8)))[1]), "=r"(((unsigned *)(A_reindex_shared_warp + (ax1_0 * 8)))[2]), "=r"(((unsigned *)(A_reindex_shared_warp + (ax1_0 * 8)))[3])
: "r"(addr)
);
}
}
for (int ax1_0_1 = 0; ax1_0_1 < 2; ++ax1_0_1) {
{
unsigned int addr;
#if TVM_ENBALE_EFFICIENT_SMEM_PTR_CAST
addr = static_cast<unsigned int>(__cvta_generic_to_shared((void *)((&(B_decode_reindex_shared[((((ax1_0_1 * 1024) + ((((int)threadIdx.x) >> 4) * 512)) + ((((int)threadIdx.x) & 7) * 64)) + ((((ax3_0_1 * 2) + ((((int)threadIdx.x) & 15) >> 3)) ^ (((int)threadIdx.x) & 7)) * 8))])) + 0)));
#else
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)((&(B_decode_reindex_shared[((((ax1_0_1 * 1024) + ((((int)threadIdx.x) >> 4) * 512)) + ((((int)threadIdx.x) & 7) * 64)) + ((((ax3_0_1 * 2) + ((((int)threadIdx.x) & 15) >> 3)) ^ (((int)threadIdx.x) & 7)) * 8))])) + 0))
);
#endif
__asm__ __volatile__(
"ldmatrix.sync.aligned.m8n8.x4.shared.b16"
"{%0, %1, %2, %3}, [%4];\n"
: "=r"(((unsigned *)(B_decode_reindex_shared_warp + (ax1_0_1 * 8)))[0]), "=r"(((unsigned *)(B_decode_reindex_shared_warp + (ax1_0_1 * 8)))[1]), "=r"(((unsigned *)(B_decode_reindex_shared_warp + (ax1_0_1 * 8)))[2]), "=r"(((unsigned *)(B_decode_reindex_shared_warp + (ax1_0_1 * 8)))[3])
: "r"(addr)
);
}
}
for (int ax1_0_3 = 0; ax1_0_3 < 2; ++ax1_0_3) {
for (int ax2_0_3 = 0; ax2_0_3 < 2; ++ax2_0_3) {
{
__asm__ __volatile__(
"mma.sync.aligned.m16n8k16.row.col.f16.f16.f16.f16"
"{%0, %1}, {%2, %3, %4, %5}, {%6, %7}, {%8, %9};\n"
: "=r"(((unsigned *)(C_reindex_shared_warp + ((ax1_0_3 * 16) + (ax2_0_3 * 8))))[0]), "=r"(((unsigned *)(C_reindex_shared_warp + ((ax1_0_3 * 16) + (ax2_0_3 * 8))))[1])
: "r"(((unsigned *)(A_reindex_shared_warp + (ax1_0_3 * 8)))[0]), "r"(((unsigned *)(A_reindex_shared_warp + (ax1_0_3 * 8)))[1]), "r"(((unsigned *)(A_reindex_shared_warp + (ax1_0_3 * 8)))[2]), "r"(((unsigned *)(A_reindex_shared_warp + (ax1_0_3 * 8)))[3]), "r"(((unsigned *)(B_decode_reindex_shared_warp + (ax2_0_3 * 8)))[0]), "r"(((unsigned *)(B_decode_reindex_shared_warp + (ax2_0_3 * 8)))[1]), "r"(((unsigned *)(C_reindex_shared_warp + ((ax1_0_3 * 16) + (ax2_0_3 * 8))))[0]), "r"(((unsigned *)(C_reindex_shared_warp + ((ax1_0_3 * 16) + (ax2_0_3 * 8))))[1]));
}
{
__asm__ __volatile__(
"mma.sync.aligned.m16n8k16.row.col.f16.f16.f16.f16"
"{%0, %1}, {%2, %3, %4, %5}, {%6, %7}, {%8, %9};\n"
: "=r"(((unsigned *)(C_reindex_shared_warp + (((ax1_0_3 * 16) + (ax2_0_3 * 8)) + 4)))[0]), "=r"(((unsigned *)(C_reindex_shared_warp + (((ax1_0_3 * 16) + (ax2_0_3 * 8)) + 4)))[1])
: "r"(((unsigned *)(A_reindex_shared_warp + (ax1_0_3 * 8)))[0]), "r"(((unsigned *)(A_reindex_shared_warp + (ax1_0_3 * 8)))[1]), "r"(((unsigned *)(A_reindex_shared_warp + (ax1_0_3 * 8)))[2]), "r"(((unsigned *)(A_reindex_shared_warp + (ax1_0_3 * 8)))[3]), "r"(((unsigned *)(B_decode_reindex_shared_warp + ((ax2_0_3 * 8) + 4)))[0]), "r"(((unsigned *)(B_decode_reindex_shared_warp + ((ax2_0_3 * 8) + 4)))[1]), "r"(((unsigned *)(C_reindex_shared_warp + (((ax1_0_3 * 16) + (ax2_0_3 * 8)) + 4)))[0]), "r"(((unsigned *)(C_reindex_shared_warp + (((ax1_0_3 * 16) + (ax2_0_3 * 8)) + 4)))[1]));
}
}
}
}
}
__asm__ __volatile__("cp.async.wait_group 0;");
__syncthreads();
for (int ax1_ax2_0_fused_0_1 = 0; ax1_ax2_0_fused_0_1 < 2; ++ax1_ax2_0_fused_0_1) {
B_local_1[0] = *(char2*)(B_shared + ((((ax1_ax2_0_fused_0_1 * 256) + (((int)threadIdx.y) * 64)) + (((int)threadIdx.x) * 2)) + 512));
decode_i2u_to_f16_scale_zeros_original(B_local_1, B_decode_reindex_local_1, (&(Scale[(((((((int)blockIdx.x) * 1280) + (ax1_ax2_0_fused_0_1 * 640)) + (((int)threadIdx.y) * 160)) + ((((int)threadIdx.x) >> 3) * 40)) + 39)])), (&(Zeros[(((((((int)blockIdx.x) * 1280) + (ax1_ax2_0_fused_0_1 * 640)) + (((int)threadIdx.y) * 160)) + ((((int)threadIdx.x) >> 3) * 40)) + 39)])), 8);
*(uint4*)(B_decode_reindex_shared + ((((ax1_ax2_0_fused_0_1 * 1024) + (((int)threadIdx.y) * 256)) + ((((int)threadIdx.x) >> 3) * 64)) + (((((int)threadIdx.x) & 7) ^ (((((int)threadIdx.y) & 1) * 4) + (((int)threadIdx.x) >> 3))) * 8))) = B_decode_reindex_local_1[0];
}
__syncthreads();
for (int ax3_0_1_1 = 0; ax3_0_1_1 < 4; ++ax3_0_1_1) {
for (int ax1_0_2 = 0; ax1_0_2 < 2; ++ax1_0_2) {
{
unsigned int addr;
#if TVM_ENBALE_EFFICIENT_SMEM_PTR_CAST
addr = static_cast<unsigned int>(__cvta_generic_to_shared((void *)((&(A_reindex_shared[(((((((int)threadIdx.y) * 2048) + (ax1_0_2 * 1024)) + ((((int)threadIdx.x) & 15) * 64)) + ((((ax3_0_1_1 * 2) + (((int)threadIdx.x) >> 4)) ^ (((int)threadIdx.x) & 7)) * 8)) + 8192)])) + 0)));
#else
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)((&(A_reindex_shared[(((((((int)threadIdx.y) * 2048) + (ax1_0_2 * 1024)) + ((((int)threadIdx.x) & 15) * 64)) + ((((ax3_0_1_1 * 2) + (((int)threadIdx.x) >> 4)) ^ (((int)threadIdx.x) & 7)) * 8)) + 8192)])) + 0))
);
#endif
__asm__ __volatile__(
"ldmatrix.sync.aligned.m8n8.x4.shared.b16"
"{%0, %1, %2, %3}, [%4];\n"
: "=r"(((unsigned *)(A_reindex_shared_warp_1 + (ax1_0_2 * 8)))[0]), "=r"(((unsigned *)(A_reindex_shared_warp_1 + (ax1_0_2 * 8)))[1]), "=r"(((unsigned *)(A_reindex_shared_warp_1 + (ax1_0_2 * 8)))[2]), "=r"(((unsigned *)(A_reindex_shared_warp_1 + (ax1_0_2 * 8)))[3])
: "r"(addr)
);
}
}
for (int ax1_0_4 = 0; ax1_0_4 < 2; ++ax1_0_4) {
{
unsigned int addr;
#if TVM_ENBALE_EFFICIENT_SMEM_PTR_CAST
addr = static_cast<unsigned int>(__cvta_generic_to_shared((void *)((&(B_decode_reindex_shared[((((ax1_0_4 * 1024) + ((((int)threadIdx.x) >> 4) * 512)) + ((((int)threadIdx.x) & 7) * 64)) + ((((ax3_0_1_1 * 2) + ((((int)threadIdx.x) & 15) >> 3)) ^ (((int)threadIdx.x) & 7)) * 8))])) + 0)));
#else
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)((&(B_decode_reindex_shared[((((ax1_0_4 * 1024) + ((((int)threadIdx.x) >> 4) * 512)) + ((((int)threadIdx.x) & 7) * 64)) + ((((ax3_0_1_1 * 2) + ((((int)threadIdx.x) & 15) >> 3)) ^ (((int)threadIdx.x) & 7)) * 8))])) + 0))
);
#endif
__asm__ __volatile__(
"ldmatrix.sync.aligned.m8n8.x4.shared.b16"
"{%0, %1, %2, %3}, [%4];\n"
: "=r"(((unsigned *)(B_decode_reindex_shared_warp_1 + (ax1_0_4 * 8)))[0]), "=r"(((unsigned *)(B_decode_reindex_shared_warp_1 + (ax1_0_4 * 8)))[1]), "=r"(((unsigned *)(B_decode_reindex_shared_warp_1 + (ax1_0_4 * 8)))[2]), "=r"(((unsigned *)(B_decode_reindex_shared_warp_1 + (ax1_0_4 * 8)))[3])
: "r"(addr)
);
}
}
for (int ax1_0_3_1 = 0; ax1_0_3_1 < 2; ++ax1_0_3_1) {
for (int ax2_0_3_1 = 0; ax2_0_3_1 < 2; ++ax2_0_3_1) {
{
__asm__ __volatile__(
"mma.sync.aligned.m16n8k16.row.col.f16.f16.f16.f16"
"{%0, %1}, {%2, %3, %4, %5}, {%6, %7}, {%8, %9};\n"
: "=r"(((unsigned *)(C_reindex_shared_warp + ((ax1_0_3_1 * 16) + (ax2_0_3_1 * 8))))[0]), "=r"(((unsigned *)(C_reindex_shared_warp + ((ax1_0_3_1 * 16) + (ax2_0_3_1 * 8))))[1])
: "r"(((unsigned *)(A_reindex_shared_warp_1 + (ax1_0_3_1 * 8)))[0]), "r"(((unsigned *)(A_reindex_shared_warp_1 + (ax1_0_3_1 * 8)))[1]), "r"(((unsigned *)(A_reindex_shared_warp_1 + (ax1_0_3_1 * 8)))[2]), "r"(((unsigned *)(A_reindex_shared_warp_1 + (ax1_0_3_1 * 8)))[3]), "r"(((unsigned *)(B_decode_reindex_shared_warp_1 + (ax2_0_3_1 * 8)))[0]), "r"(((unsigned *)(B_decode_reindex_shared_warp_1 + (ax2_0_3_1 * 8)))[1]), "r"(((unsigned *)(C_reindex_shared_warp + ((ax1_0_3_1 * 16) + (ax2_0_3_1 * 8))))[0]), "r"(((unsigned *)(C_reindex_shared_warp + ((ax1_0_3_1 * 16) + (ax2_0_3_1 * 8))))[1]));
}
{
__asm__ __volatile__(
"mma.sync.aligned.m16n8k16.row.col.f16.f16.f16.f16"
"{%0, %1}, {%2, %3, %4, %5}, {%6, %7}, {%8, %9};\n"
: "=r"(((unsigned *)(C_reindex_shared_warp + (((ax1_0_3_1 * 16) + (ax2_0_3_1 * 8)) + 4)))[0]), "=r"(((unsigned *)(C_reindex_shared_warp + (((ax1_0_3_1 * 16) + (ax2_0_3_1 * 8)) + 4)))[1])
: "r"(((unsigned *)(A_reindex_shared_warp_1 + (ax1_0_3_1 * 8)))[0]), "r"(((unsigned *)(A_reindex_shared_warp_1 + (ax1_0_3_1 * 8)))[1]), "r"(((unsigned *)(A_reindex_shared_warp_1 + (ax1_0_3_1 * 8)))[2]), "r"(((unsigned *)(A_reindex_shared_warp_1 + (ax1_0_3_1 * 8)))[3]), "r"(((unsigned *)(B_decode_reindex_shared_warp_1 + ((ax2_0_3_1 * 8) + 4)))[0]), "r"(((unsigned *)(B_decode_reindex_shared_warp_1 + ((ax2_0_3_1 * 8) + 4)))[1]), "r"(((unsigned *)(C_reindex_shared_warp + (((ax1_0_3_1 * 16) + (ax2_0_3_1 * 8)) + 4)))[0]), "r"(((unsigned *)(C_reindex_shared_warp + (((ax1_0_3_1 * 16) + (ax2_0_3_1 * 8)) + 4)))[1]));
}
}
}
}
for (int ax0 = 0; ax0 < 2; ++ax0) {
for (int ax1 = 0; ax1 < 2; ++ax1) {
__syncthreads();
for (int local_id = 0; local_id < 8; local_id+=2) {
*((uint *)&(&(B_decode_reindex_shared[(((int)threadIdx.y) * 512)]))[((((((local_id % 4) / 2) * 8) + (threadIdx.x / 4)) * 16) + ((((local_id / 4) * 8) + ((threadIdx.x % 4) * 2)) + (local_id % 2)))]) = *((uint *)&C_reindex_shared_warp[((ax0 * 16) + (ax1 * 8)) + local_id]);
}
;
__syncthreads();
#pragma unroll
for (int ax0_ax1_ax2_ax3_ax4_fused_0 = 0; ax0_ax1_ax2_ax3_ax4_fused_0 < 1; ++ax0_ax1_ax2_ax3_ax4_fused_0) {
*(uint4*)(D + ((((((((int)threadIdx.y) * 491520) + (ax0 * 245760)) + ((((int)threadIdx.x) >> 1) * 15360)) + (((int)blockIdx.x) * 32)) + (ax1 * 16)) + ((((int)threadIdx.x) & 1) * 8))) = *(uint4*)(B_decode_reindex_shared + ((((int)threadIdx.y) * 512) + (((int)threadIdx.x) * 8)));
}
}
}
}
}
int ladder_gemm_fp16xint2_fp16(half *input_0, half *input_1, half *output, const int M, const int N, const int K, const int trans_a, const int trans_b, half *workspace_ptr)
{
assert(trans_a == 0 && trans_b == 1);
if (M == 1 && N == 15360 && K == 5120){
const dim3 GridDim(15360, 1, 1);
const dim3 BlockDim(128, 1, 1);
bitblas_kernel_fp16_int2_fp16_m1n15360k5120_nt<<<GridDim, BlockDim>>>(input_0, input_1, output);
return 0;
}
if (M == 128 && N == 15360 && K == 5120){
const dim3 GridDim(480, 1, 1);
const dim3 BlockDim(32, 4, 1);
bitblas_kernel_fp16_int2_fp16_m128n15360k5120_nt<<<GridDim, BlockDim>>>(input_0, input_1, output);
return 0;
}
return -1;
}
|
BitBLAS/integration/bitdistiller/kenrel_output/ladder_kernel.cu/0
|
{
"file_path": "BitBLAS/integration/bitdistiller/kenrel_output/ladder_kernel.cu",
"repo_id": "BitBLAS",
"token_count": 12003
}
| 147 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import bitblas
import pytest
import time
import numpy as np
from bitblas_quant_linear import QuantLinear
import torch
import torch.nn as nn
# !pip install auto-gptq
from auto_gptq.nn_modules.qlinear.qlinear_cuda_old import (
QuantLinear as CudaOldQuantLinear,)
torch.manual_seed(0)
def gen_quant4(k, n, groupsize=-1):
maxq = 2**4
w = torch.randn((k, n), dtype=torch.half, device="cpu")
original_w = w.clone()
if groupsize == -1:
groupsize = k
if groupsize != -1:
w = w.reshape((-1, groupsize, n))
w = w.permute(1, 0, 2)
w = w.reshape((groupsize, -1))
s = torch.max(torch.abs(w), 0, keepdim=True)[0]
s *= 2 / maxq
# Quantize.
w = torch.round(w / s).int()
# Unsigned storage.
w += (maxq) // 2
w = torch.clamp(w, 0, maxq)
# Dequantize.
ref = (w - (maxq) // 2).half() * s
if groupsize != -1:
def reshape(w):
w = w.reshape((groupsize, -1, n))
w = w.permute(1, 0, 2)
w = w.reshape((k, n)).contiguous()
return w
ref = reshape(ref)
w = reshape(w)
s = s.reshape((-1, n)).contiguous()
linear = nn.Linear(k, n, bias=False)
linear.weight.data = ref.t()
return original_w, linear, s, (w - (maxq) // 2)
@pytest.mark.parametrize(
"m, in_features, out_features, bits, group_size, bias",
[
(1, 1024, 4096, 4, -1, False),
(1, 1024, 4096, 4, 128, False),
(1, 1024, 4096, 4, 128, True),
],
)
def test_quantization_accuracy(m, in_features, out_features, bits, group_size, bias):
original_w, linear, s, qw = gen_quant4(in_features, out_features, group_size)
if group_size == -1:
group_size = in_features
zeros = torch.full((in_features // group_size, out_features), 8, dtype=torch.int32)
bitblas_zeros = zeros.clone().T
cuda_old_linear = CudaOldQuantLinear(
bits=bits,
group_size=group_size,
in_features=in_features,
out_features=out_features,
bias=bias,
)
cuda_old_linear.pack(linear, s.T, zeros.T, g_idx=None)
linear_module = torch.nn.Linear(
in_features=in_features,
out_features=out_features,
bias=bias,
dtype=torch.float16,
device="cuda",
)
linear_module.weight.data.copy_(linear.weight.data)
scales = s.to("cuda")
bitblas_qlinear = QuantLinear(
bits, group_size, in_features, out_features, bias, opt_M=m, enable_tuning=True)
bitblas_qlinear.pack(
linear_module.to("cuda"),
scales=scales.T.contiguous().to("cuda"),
zeros=bitblas_zeros.contiguous().to("cuda"),
)
inp = torch.rand(m, in_features, dtype=torch.float16, device="cuda")
cuda_old_linear = cuda_old_linear.to("cuda")
bitblas_qlinear = bitblas_qlinear.to("cuda")
with torch.no_grad():
res_original = linear_module(inp)
res_cuda_old = cuda_old_linear(inp)
res_bitblas = bitblas_qlinear(inp)
# Verify the accuracy of the quantized outputs against the original
torch.testing.assert_close(res_cuda_old, res_original, rtol=1e9, atol=1e-2)
torch.testing.assert_close(res_bitblas, res_original, rtol=1e9, atol=1e-2)
def profile(model, input_data):
model = model.cuda()
model.eval()
output = torch.empty(
input_data.shape[:-1] + (model.out_features,),
dtype=input_data.dtype,
device=input_data.device,
)
def get_runtime(num_repeats=1):
tic = time.time()
for _ in range(num_repeats):
_ = model(input_data, output)
torch.cuda.synchronize()
return (time.time() - tic) * 1000 / num_repeats
with torch.no_grad():
# print("Warming up ...")
st = time.time()
while time.time() - st < 1.0:
get_runtime() # warmup
warmup_runtime = get_runtime()
num_repeats = max(1, int(1000 / warmup_runtime))
times = get_runtime(num_repeats)
return np.mean(times)
@pytest.mark.parametrize(
"m, in_features, out_features, bits, group_size, bias",
[
(1, 16384, 16384, 4, -1, False),
],
)
def test_profile_performance(m, in_features, out_features, bits, group_size, bias):
bitblas_qlinear = QuantLinear(
bits,
group_size,
in_features,
out_features,
bias,
opt_M=m,
enable_tuning=True,
).cuda()
with torch.no_grad():
input_data = torch.randn(m, in_features, dtype=torch.float16).cuda()
torch_latency = profile(bitblas_qlinear, input_data)
bitblas_latency = bitblas_qlinear.bitblas_matmul.profile_latency()
assert abs(
torch_latency - bitblas_latency
) / torch_latency < 0.1, f"torch_latency: {torch_latency}, bitblas_latency: {bitblas_latency}"
if __name__ == "__main__":
bitblas.testing.main()
|
BitBLAS/integration/pytorch/test_bitblas_quant_linear.py/0
|
{
"file_path": "BitBLAS/integration/pytorch/test_bitblas_quant_linear.py",
"repo_id": "BitBLAS",
"token_count": 2310
}
| 148 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from typing import List
class TileDevice:
"""
Represents the architecture of a computing device, capturing various hardware specifications.
"""
def __init__(self) -> None:
self.reg_cap: int = 0 # Register capacity: The amount of register memory available
self.smem_cap: int = 0 # Shared memory capacity: The amount of shared memory available
self.compute_max_core: int = 0 # The maximum number of computing cores
self.warp_size: int = (
0 # The size of a warp, a group of threads that execute instructions in lockstep
)
self.sm_partition: int = 0 # The number of streaming multiprocessor partitions
self.transaction_size: List[int] = [
0,
0,
] # The size of memory transactions, typically in bytes
self.max_smem_usage: int = 0 # The maximum shared memory usage allowed
self.bandwidth: List[int] = [
0,
0,
] # Bandwidth specifications, possibly including peak and sustained rates
self.platform: str = "unknown" # The platform or manufacturer of the device
self.compute_capability: str = (
"unknown" # The compute capability, indicating the feature set and performance level
)
self.l2_cache_size_bytes: int = 0
# the number of transaction size in bytes
self.transaction_size: List[int] = [0, 0] # in bytes
# bandwidth in MB/s, will be used for recommend basic tile size
self.bandwidth: List[int] = [0, 0]
def get_avaliable_tensorintrin_shapes(self):
raise NotImplementedError()
|
BitBLAS/python/bitblas/base/roller/arch/arch_base.py/0
|
{
"file_path": "BitBLAS/python/bitblas/base/roller/arch/arch_base.py",
"repo_id": "BitBLAS",
"token_count": 639
}
| 149 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import tvm
import os
from tvm.contrib.popen_pool import PopenPoolExecutor, StatusKind
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
from typing import List, Tuple, Optional, Dict, Union, Literal
from tvm import tir, IRModule
from tvm.runtime import Module
from tvm.tir import Schedule
from tvm.relax.expr import Function
import bitblas
from .analysis import get_root_block, get_reduction_blocks, find_var_from_func
from bitblas.base.roller.arch import CUDA
from bitblas.base.roller.policy import TensorCorePolicy, DefaultPolicy
from bitblas.gpu.matmul_analysis import get_tensorized_func_and_tags
import tempfile
import itertools
from tvm.ir.supply import GlobalVarSupply
from bitblas.utils import tensor_replace_dp4a, tensor_remove_make_int4
import logging
logger = logging.getLogger(__name__)
def get_rasterization_code(pannel_width: int = 8) -> str:
return f"""
const int MAX_BLOCK_N = {pannel_width};
const auto baseBlockIdx = blockIdx.x + gridDim.x *blockIdx.y;
const auto totalPanel = (gridDim.x * gridDim.y +MAX_BLOCK_N * gridDim.x - 1) / (MAX_BLOCK_N * gridDim.x);
const auto totalBlock = gridDim.x * gridDim.y;
const auto panelIdx = baseBlockIdx / (MAX_BLOCK_N *gridDim.x);
const auto strideLd = panelIdx + 1 < totalPanel ?MAX_BLOCK_N : (totalBlock - panelIdx * (MAX_BLOCK_N *gridDim.x)) / gridDim.x;
const auto bx = (panelIdx & 1) ? gridDim.x -(baseBlockIdx - panelIdx * MAX_BLOCK_N * gridDim.x) /strideLd - 1 : (baseBlockIdx - panelIdx * MAX_BLOCK_N *gridDim.x) / strideLd;
const auto by = (baseBlockIdx - panelIdx * MAX_BLOCK_N *gridDim.x) % strideLd + panelIdx * MAX_BLOCK_N;
const auto bz = blockIdx.z;
const dim3 blockIdx(bx, by, bz);
"""
class CompileResult:
"""
Class to store the result of compilation
"""
def __init__(self, config, sch, mod: Module):
self.config = config
self.sch = sch
self.mod = mod
self.code = mod.imported_modules[0].get_source() if mod else None
self.latency = 1e9
self.profile_tensors = []
self.time_evaluator = None
def profile(self):
profile_tensors = self.profile_tensors
return self.time_evaluator(*profile_tensors).mean * 1e3
def _apply_config(
func: tir.PrimFunc,
config=None, # todo(lei): update typing
) -> Optional[tir.Schedule]:
"""
find rules:
case 1. if the main block has no reduce op, then use the Elementwise rule.
case 2. if the config enabled tensorcore, then use the TensorCore rule.
case 3. if any([t > 1 for t in config.reduce_thread]), we should use the InnerThread Reduction Rule.
case 4. else we should use general reduction rule.
"""
logger.debug("Apply config {}".format(config))
sch = tir.Schedule(func)
root_block = get_root_block(sch)
blocks = sch.get_child_blocks(root_block)
reduction_blocks = get_reduction_blocks(sch, blocks)
if not reduction_blocks:
return bitblas.gpu.ElementWise().apply_config(func, config)
elif config.use_tc:
if config.arch.sm_version >= 80:
# For A100(sm_80) or more advanced gpu, use MMA tensorization.
return bitblas.gpu.MatmulTensorizationMMA().apply_config(func, config)
else:
# For other GPUs, use WMMA tensorization.
return bitblas.gpu.MatmulTensorizationWMMA().apply_config(func, config)
else:
_reduction_rules = []
_reduction_rules.append(bitblas.gpu.GEMV())
if not any([t > 1 for t in config.reduce_thread]):
# Matrix multiplication template doesn't support inner thread reduction
_reduction_rules.append(bitblas.gpu.Matmul())
_reduction_rules.append(bitblas.gpu.GeneralReduction())
for rule in _reduction_rules:
sch = rule.apply_config(func, config)
try:
sch = rule.apply_config(func, config)
except Exception as e_msg:
logger.debug("Apply config failed: ", e_msg)
continue
if sch is not None:
return sch
return None
def get_dummy_input_arrays(
func: Union[tir.PrimFunc, Function],
device: tvm.runtime.Device,
distribution: Literal["uniform", "onefill"] = "uniform",
):
def var_wrapper(v):
if isinstance(v, tvm.tir.Var):
assert "opt_shapes" in func.attrs
assert v.name in func.attrs["opt_shapes"]
return func.attrs["opt_shapes"][v.name].value
elif isinstance(v, tvm.tir.IntImm):
return v.value
else:
raise RuntimeError("Not supported type: ", type(v))
profile_tensors = []
for param in func.params:
if isinstance(func, tir.PrimFunc):
if param not in func.buffer_map:
# in case of dynamic symbolic may in params
continue
arg = func.buffer_map[param]
elif isinstance(func, Function):
arg = param.struct_info
else:
raise ValueError("Not supported type: ", type(func))
def map_numpy_type(intype):
typemap = {
'e4m3_float8': 'float8_e4m3fn',
'e5m2_float8': 'float8_e5m2',
}
if intype in typemap:
return typemap[intype]
else:
return intype
numpy_dtype = map_numpy_type(arg.dtype)
if distribution == "uniform":
profile_tensors.append(
tvm.nd.array(
np.random.rand(*[var_wrapper(i) for i in arg.shape]).astype(numpy_dtype),
device=device,
))
elif distribution == "onefill":
profile_tensors.append(
tvm.nd.array(
np.ones([var_wrapper(i) for i in arg.shape]).astype(numpy_dtype),
device=device,
))
else:
raise ValueError("Not supported distribution: ", distribution)
return profile_tensors
def apply_and_build_parallel(func,
configs,
arch,
num_repeats=3,
max_workers=10,
data_distribution="uniform") -> CompileResult:
cpresults = []
profile_tensors = get_dummy_input_arrays(func, arch.device, distribution=data_distribution)
max_workers = min(len(configs), os.cpu_count(), max_workers)
# apply config in thread parallel
_sched: List[Schedule] = []
def _apply_schedule(f, c):
try:
sch = _apply_config(f, c)
except Exception as apply_schedule_error:
logger.debug("Apply schedule failed: {}".format(apply_schedule_error))
sch = None
return sch
with ThreadPoolExecutor(max_workers=4) as scheduler:
futures = {scheduler.submit(_apply_schedule, func, config) for config in configs}
for future in as_completed(futures):
_sched.append(future.result())
builder = PopenPoolExecutor(max_workers=max_workers)
# build in process parallel
def _build(context) -> str:
idx, mod, arch = context
if mod is None:
return idx, None, None
# TODO(lei):
# this is a trick to implement rasteration, will be removed in the future
config = configs[idx]
@tvm.register_func(func_name="tvm_callback_cuda_postproc", override=True)
def tvm_callback_cuda_postproc(code, _):
code = tensor_replace_dp4a(code)
code = tensor_remove_make_int4(code)
return code
with tvm.transform.PassContext(config={"tir.use_async_copy": True, **config.pass_context}):
rt_mod = tvm.build(mod, target=arch.target)
from tvm.contrib.tar import tar # pylint: disable=import-outside-toplevel
artifact_path = os.path.join(tempfile.mkdtemp(), "tvm_tmp_mod." + tar.output_format)
code = rt_mod.imported_modules[0].get_source()
rt_mod.export_library(artifact_path, fcompile=tar)
return idx, code, artifact_path
_mods = [sch.mod if sch is not None else None for sch in _sched]
for map_result in builder.map_with_error_catching(
_build,
[(i, mod, arch) for i, mod in enumerate(_mods)],
):
if map_result.status == StatusKind.TIMEOUT:
logger.debug("LocalBuilder: Timeout")
elif map_result.status == StatusKind.EXCEPTION:
# TODO(lei): redirect the exception to file if needed
logger.debug("LocalBuilder: An exception occurred {}".format(map_result.value))
continue
elif map_result.status == StatusKind.COMPLETE:
idx, code, artifact_path = map_result.value
if artifact_path is None:
logger.debug("Artifact path is None")
continue
sch = _sched[idx]
config = configs[idx]
rt_mod = tvm.runtime.load_module(artifact_path)
cpresult = CompileResult(config, sch, rt_mod)
timer_cuda_mod = rt_mod.time_evaluator(
rt_mod.entry_name, arch.device, number=num_repeats)
cpresult.profile_tensors = profile_tensors
cpresult.time_evaluator = timer_cuda_mod
cpresult.code = code
cpresults.append(cpresult)
else:
raise ValueError(f"Unreachable: unexpected result: {map_result}")
del builder
best = None
best_latency = 1e9
for cpresult in cpresults:
config = cpresult.config
try:
latency = cpresult.profile()
except Exception as e_mesg:
logger.debug(f"Evaluation with config failed {e_mesg}")
continue
logger.info("Evaluation with config {}".format(config))
logger.info("Time cost of this config: {:.3f} ms".format(latency))
cpresult.latency = latency
if latency < best_latency:
best_latency = latency
best = cpresult
return cpresults, best
def apply_and_build(
func,
configs,
arch,
parallel_build=False,
data_distribution="uniform",
) -> Tuple[List[CompileResult], CompileResult]:
max_workers = 10 if parallel_build else 1
return apply_and_build_parallel(
func, configs, arch, max_workers=max_workers, data_distribution=data_distribution)
def fast_tune(
func: tir.PrimFunc,
target: tvm.target.Target,
topk: int = 10,
parallel_build: bool = True,
data_distribution: Literal["uniform", "onefill"] = "uniform",
):
# check the function is a primfunc
if not isinstance(func, tir.PrimFunc):
raise ValueError("Only support func is PrimFunc") # pragma: no cover
if target.kind.name != "cuda":
logger.error("Only support CUDA target")
return None, None
specilized_func = func
if func.attrs is not None and "opt_shapes" in func.attrs:
opt_shapes = func.attrs["opt_shapes"]
# should be int value
if not all([isinstance(v.value, int) for v in opt_shapes.values()]):
logger.error("The opt_shapes should be int value")
return None, None
# currently only support one dynamic range
if len(opt_shapes) > 1:
logger.error("Currently only support one dynamic range")
return None, None
for buffer in func.buffer_map.values():
for axis in buffer.shape:
if isinstance(axis, tvm.tir.Var) and axis.name not in opt_shapes:
raise NotImplementedError(
"Currently do not support fast tune with none-dynamic range set")
if opt_shapes:
for name, shape in opt_shapes.items():
var = find_var_from_func(func, name)
specilized_func = func.specialize({
var: shape.astype(var.dtype)
}).with_attr("is_specialized")
arch = CUDA(target)
policy = DefaultPolicy(func=func, arch=arch)
try:
specilized_func, tags = get_tensorized_func_and_tags(specilized_func, arch.target)
except Exception as e_msg:
logger.debug("Get tensorized func and tags failed: ", e_msg)
tags = None
if tags:
policy = TensorCorePolicy(func=specilized_func, arch=arch, tags=tags)
configs = policy.emit_config(topk)
if len(configs) == 0:
raise ValueError("No valid config generated")
cpresults, best = apply_and_build(
func,
configs,
arch,
parallel_build=parallel_build,
data_distribution=data_distribution,
)
return cpresults, best
# always use the first function as the base
def collect_buffers_to_declare(func):
params = []
# collect dynamic symbolic
dyn_symbolic: List[tvm.tir.Var] = []
buffers_to_declare = []
for param in func.params:
if param not in func.buffer_map:
continue
buffer = func.buffer_map[param]
for axis in buffer.shape:
if isinstance(axis, tvm.tir.Var) and axis not in dyn_symbolic:
dyn_symbolic.append(axis)
buffers_to_declare.append(buffer)
params.append(buffer.data)
# the args should be buffers + dynamic symbolic
params += list(dyn_symbolic)
return params, buffers_to_declare
def refactor_specialized_func(g_var, func, params, buffers_to_declare):
body = func.body
attrs = func.attrs
global_symbol = g_var
if "opt_shapes" in func.attrs:
opt_shapes = func.attrs["opt_shapes"]
def serialize_name(opt_shapes: Dict):
return "_opt_" + "_".join([f"{k}_{v}" for k, v in opt_shapes.items()])
global_symbol += serialize_name(opt_shapes)
ret_type = func.ret_type
for buf in buffers_to_declare:
body = tvm.tir.DeclBuffer(buf, body=body)
# device func must be private
device_func = tvm.tir.PrimFunc(
params, body, ret_type, attrs=attrs).without_attr("global_symbol")
return global_symbol, device_func
def create_dispatch_func(g_var: str, func: tir.PrimFunc, refactored_funcs: List[str]):
global_symbol = g_var
attrs = func.attrs
buffer_map = func.buffer_map
params = func.params
ret_type = func.ret_type
# collect dynamic symbolic
dyn_symbolic: List[tvm.tir.Var] = []
_invoke_params = []
for param in func.params:
if param not in func.buffer_map:
continue
buffer = func.buffer_map[param]
for axis in buffer.shape:
if isinstance(axis, tvm.tir.Var) and axis not in dyn_symbolic:
dyn_symbolic.append(axis)
_invoke_params.append(buffer.data)
_invoke_params += list(dyn_symbolic)
func_range: List[int] = []
global_symbols = []
for g_var, refactor_func in refactored_funcs:
opt_shapes = refactor_func.attrs["opt_shapes"]
func_range.append(list(opt_shapes.values())[0])
global_symbols.append(g_var)
# TODO(lei): general the dispatch function to support multiple dynamic symbolics
assert len(dyn_symbolic) == 1, "Only support one dynamic symbolics currently"
ib = tvm.tir.ir_builder.create()
syb = list(dyn_symbolic)[-1]
last_range = 0
for i, (_range, g_var) in enumerate(zip(func_range, global_symbols)):
if i == 0:
with ib.if_scope(syb <= _range):
ib.emit(tvm.tir.Call(None, g_var, _invoke_params))
else:
with ib.if_scope(tvm.tir.all(syb > last_range, syb <= _range)):
ib.emit(tvm.tir.Call(None, g_var, _invoke_params))
last_range = _range
with ib.if_scope(syb > last_range):
ib.emit(tvm.tir.Call(None, g_var, _invoke_params))
stmt = ib.get()
dispatch_func = tvm.tir.PrimFunc(params, stmt, ret_type, buffer_map, attrs).with_attrs({
"tir.is_global_func": True,
"global_symbol": global_symbol
})
return dispatch_func
def create_dispatch_mod(g_var: str, original_func: tir.PrimFunc,
specialized_funcs: List[tir.PrimFunc]) -> IRModule:
dispatch_mod: IRModule = tvm.IRModule()
g_var_supply = GlobalVarSupply(dispatch_mod)
refactored_funcs = []
for func in specialized_funcs:
params, buffers_to_declare = collect_buffers_to_declare(func)
global_symbol, device_func = refactor_specialized_func(g_var, func, params,
buffers_to_declare)
global_symbol = g_var_supply.fresh_global(global_symbol, add_prefix=False)
dispatch_mod[global_symbol] = device_func
refactored_funcs.append((global_symbol, device_func))
dispatch_func = create_dispatch_func(g_var, original_func, refactored_funcs=refactored_funcs)
dispatch_mod.update(tvm.IRModule.from_expr(dispatch_func))
return dispatch_mod
def fast_tune_with_dynamic_range(
func: tir.PrimFunc,
target: tvm.target.Target,
topk: int = 10,
parallel_build: bool = True,
global_symbol: Optional[str] = None,
dynamic_range: Optional[Dict[str, List[int]]] = None,
) -> IRModule:
if dynamic_range is None:
dynamic_range = {}
if target.kind.name != "cuda":
logger.error("Only support CUDA target")
return None
if not global_symbol:
global_symbol = func.attrs["global_symbol"]
# set opt_shapes for the primfunc with dynamic symbolic
opt_shapes: Dict[str, List[int]] = {}
for buffer in func.buffer_map.values():
for axis in buffer.shape:
if isinstance(axis, tvm.tir.Var):
if axis.name in dynamic_range:
opt_shapes[axis.name] = dynamic_range[axis.name]
else:
raise ValueError(f"[BitBLAS] The axis {axis.name} is not in dynamic_range")
func = func.with_attr("opt_shapes", opt_shapes)
if "opt_shapes" not in func.attrs:
logger.error(
"[BitBLAS] The primfunc has no opt_shapes, please set opt_shapes for the primfunc")
return None
else:
# should be list value
if not all([isinstance(v, tvm.ir.Array) for v in func.attrs["opt_shapes"].values()]):
logger.error("The opt_shapes should be list value")
return None
logger.info("Start fast tuning with dynamic range")
opt_shapes = func.attrs["opt_shapes"]
# Step 1.Calculate the Cartesian product using itertools.product
product_list = list(itertools.product(*(opt_shapes[key] for key in opt_shapes)))
# Convert the Cartesian product to a list of dictionaries
specialize_items: List[Dict] = [dict(zip(opt_shapes.keys(), values)) for values in product_list]
specilized_tuned_funcs: List[tir.PrimFunc] = []
for item in specialize_items:
func = func.with_attr("opt_shapes", item)
_, best = fast_tune(func, target, topk, parallel_build)
if best is None:
return None
specilized_tuned_funcs.append(best.sch.mod["main"])
return create_dispatch_mod(global_symbol, func, specilized_tuned_funcs)
|
BitBLAS/python/bitblas/base/utils.py/0
|
{
"file_path": "BitBLAS/python/bitblas/base/utils.py",
"repo_id": "BitBLAS",
"token_count": 8587
}
| 150 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# pylint: disable=missing-docstring, invalid-name
"""A GEMM schedule rule for GPU operators."""
from typing import Optional, List
from contextlib import suppress
from tvm import tir, DataType
from ..base.roller.hint import Hint, IntrinInfo
from tvm.target import Target
from ..base.roller.rasterization import NoRasterization
from ..base import analysis
from .base import GPUScheduleRule
from ..base.analysis import get_coalesced_veclen
from .matmul_analysis import (
auto_inline_consumer_chain,
auto_inline_producers,
get_reduction_blocks,
normalize_to_matmul,
get_propagate_map,
layout_propagate_chain,
find_last_producer_from_buffer,
_collect_producers,
get_in_out_dtypes,
)
def get_index_map_3d(index_map, l=16, r=16): # noqa: E741
def index_map_3d(b, i, j):
return (
b,
i // l,
j // r,
*index_map(i % l, j % r),
)
return index_map_3d
def get_index_map_5d(index_map):
"""
for layout transformed gemm, the index map should be 5d
"""
def index_map_5d(b, i, j, ii, jj):
return (
b,
i,
j,
*index_map(ii, jj),
)
return index_map_5d
def get_index_map(index_map, l=16, r=16, is_5d=False): # noqa: E741
if is_5d:
return get_index_map_5d(index_map)
return get_index_map_3d(index_map, l, r)
class MatmulTensorizationMMAWithDequantizeInfo(GPUScheduleRule):
"""
The schedule rule for float16 tensor core matmul computation.
func with attr 'dlight.do_not_tensorize' will not be tensorized.
"""
def apply(
self,
func: tir.PrimFunc,
target: Target,
_: bool,
):
"""
For devices without async copy, we can use a simple dequantize schedule without shared memory prefetch.
quantized weight
|
V
dequantized in register
|
V
save into shared memory
|
V
compute
"""
from tvm.tir.tensor_intrin.cuda import ( # pylint: disable=import-outside-toplevel
get_mma_intrin_group,)
from .intrin import get_lop3_intrin_group
import_source: List[str] = []
sch = tir.Schedule(func)
root_block = analysis.get_root_block(sch)
blocks = sch.get_child_blocks(root_block)
if func.attrs is not None and "dlight.do_not_tensorize" in func.attrs.keys():
return None
reduction_blocks = get_reduction_blocks(sch, blocks)
if reduction_blocks is None:
return None
main_block = reduction_blocks[0]
# always enable shared memory rewrite
cache_write_required = True
# Check Dequantize Info
dequantize_info = func.attrs["dequantize_info"]
def check_dequantize_info(dequantize_info):
conditions = []
# currently only support weight only dequantization
conditions.append(len(dequantize_info) == 1)
# TODO(@lei) check if the dequantize value name is weight
return all(conditions)
assert check_dequantize_info(dequantize_info)
(weight_decode_info,) = list(dequantize_info.values())
def check_weight_decode_info(weight_decode_info):
conditions = []
# check source format in ["int", "fp", "nf"]
conditions.append("source_format" in weight_decode_info)
conditions.append(
weight_decode_info["source_format"]["format"] in ["uint", "int", "fp", "nf"])
# check source bits in [1, 2, 4, 8]
conditions.append(weight_decode_info["source_format"]["bits"] in [1, 2, 4, 8])
# check target format in ["float16", "int8"]
conditions.append("target_format" in weight_decode_info)
conditions.append(weight_decode_info["target_format"] in ["float16", "int8"])
return all(conditions)
assert check_weight_decode_info(weight_decode_info), "Invalid Weight Decode Info"
# Start Schedule
# Step 1. Get default schedule config.
# tensor core intrinsic size
in_dtype, out_dtype = get_in_out_dtypes(sch.get(main_block))
intrin_info = IntrinInfo(
in_dtype=in_dtype,
out_dtype=out_dtype,
trans_b=True,
)
if "weight_transform_kind" in func.attrs:
intrin_info.weight_transform_kind = int(func.attrs["weight_transform_kind"])
if "input_transform_kind" in func.attrs:
intrin_info.input_transform_kind = int(func.attrs["input_transform_kind"])
# default Hint
config = Hint().from_dict({
"block": [128, 128],
"warp": [64, 64],
"rstep": [32],
"pipeline_stage": 1,
"use_async": False,
"intrin_info": intrin_info,
"shared_scope": "shared.dyn",
})
shared_scope = config.shared_scope
intrin_group = get_mma_intrin_group(
load_scope=shared_scope,
store_scope=shared_scope if cache_write_required else "global",
a_dtype=intrin_info.in_dtype,
b_dtype=intrin_info.in_dtype,
out_dtype=intrin_info.out_dtype,
trans_a=intrin_info.trans_a,
trans_b=intrin_info.trans_b,
smooth_a=intrin_info.smooth_a,
smooth_b=intrin_info.smooth_b,
not_use_mma_store_intrinic=False,
)
warp_row_tiles = config.warp[0]
warp_col_tiles = config.warp[1]
block_row_warps = config.block[0] // warp_row_tiles
block_col_warps = config.block[1] // warp_col_tiles
stage = config.pipeline_stage
use_async = config.use_async
chunk = config.rstep[0]
micro_size_x, micro_size_y, micro_size_k = intrin_group["micro_kernel"]
# get the axis for layout transform
def get_axis(l, r, trans): # noqa: E741
return (r, l) if trans else (l, r) # noqa: E741
a_lr = get_axis(micro_size_x, micro_size_k, intrin_info.trans_a)
b_lr = get_axis(micro_size_k, micro_size_y, intrin_info.trans_b)
def can_enable_swizzle(dtype: str, smooth: bool):
# inject_permuted_layout only support float16 currently
if dtype == "float16" or dtype == "int8":
if chunk * DataType(dtype).bits != 512:
# currently the swizzle rule only support 512 bit.
return False
# if we use smooth layout, we don't need to do swizzling
return not smooth
return False
can_swizzle_a = can_enable_swizzle(intrin_info.in_dtype, intrin_info.inter_transform_a)
can_swizzle_b = can_enable_swizzle(intrin_info.in_dtype, intrin_info.inter_transform_b)
# rewrite global smooth layout, for dequantize, currently only support weight only recover.
def smooth_gmem_layout_rewrite(sch, main_block, enable=True, trans=False, matrix_name="A"):
if not enable:
return
# normalized block may have three read buffers, while the first one is the write buffer.
buffer_offset = (1 if sch.get(main_block).reads[0].buffer
== sch.get(main_block).writes[0].buffer else 0)
buffer_idx = 0 if matrix_name == "A" else 1
source_buffer = sch.get(main_block).reads[buffer_offset + buffer_idx].buffer
# step1: find the first producer block
# Notes: we assume the layout propagate happens in the first producer block
# otherwise, the layout transform will have no effect as it will transform both
# read and write buffer
propagate_block: tir.Block = find_last_producer_from_buffer(
sch, main_block, source_buffer)
# some trick impl may not have reindex block
(weight_dequantize_info,) = dequantize_info.values()
if (sch.get(propagate_block).name_hint == weight_dequantize_info["decode_block"]):
return
# step2: transform the layout with inverse permutation
intra_indexmap, _ = get_propagate_map(
trans=trans, dtype=intrin_info.in_dtype, matrix_name=matrix_name)
# step3: propagate the matmul layout to the first reindex block
intra_indexmap = layout_propagate_chain(
sch,
start_block=main_block,
start_buffer=source_buffer,
end_block=propagate_block,
index_map=intra_indexmap,
)
def inverse_permutation(i, j, ii, jj):
return (i, j, *intra_indexmap.map_indices([ii, jj]))
sch.transform_layout(propagate_block, ("read", 0), inverse_permutation)
smooth_gmem_layout_rewrite(
sch, main_block, intrin_info.smooth_a, intrin_info.trans_a, matrix_name="A")
smooth_gmem_layout_rewrite(
sch, main_block, intrin_info.smooth_b, intrin_info.trans_b, matrix_name="B")
warp_size = 32
i_factors, j_factors, k_factors = (
[None, 1, block_row_warps, warp_row_tiles // micro_size_x],
[1, None, block_col_warps, warp_col_tiles // micro_size_y],
[None, chunk // micro_size_k],
)
num_ty = i_factors[2]
num_tz = j_factors[2]
x_pad_factor = i_factors[2] * i_factors[3]
y_pad_factor = j_factors[2] * j_factors[3]
k_pad_factor = k_factors[1]
# Step 1. Normalize generic matmul to C[S, I, J] += A[S, I, K] * B[S, J, K]/B[S, K, J]
if not (func.attrs is not None and "dlight.tensorcore_prenormlized" in func.attrs.keys()):
sch = normalize_to_matmul(sch, main_block, ["n", "t", "n"])
# Step 2. Padding for dynamic shape kernels
sch.pad_einsum(
main_block,
[
1,
micro_size_x * x_pad_factor,
micro_size_y * y_pad_factor,
micro_size_k * k_pad_factor,
],
)
# Step 3. Schedule matmul to use tensor core
block = main_block
batch, i, j, k = sch.get_loops(block)
# inner loops for tensor core computation
i, i_inner = sch.split(i, factors=[None, micro_size_x])
j, j_inner = sch.split(j, factors=[None, micro_size_y])
k, k_inner = sch.split(k, factors=[None, micro_size_k])
sch.reorder(i, j, k, i_inner, j_inner, k_inner)
block_inner = block
block_outer = sch.blockize(i_inner)
i0, i1, i2, i3 = sch.split(i, factors=i_factors)
j0, j1, j2, j3 = sch.split(j, factors=j_factors)
k0, k1 = sch.split(k, k_factors)
sch.reorder(i0, j0, i1, j1, i2, j2, k0, k1, i3, j3)
block_idy = sch.fuse(i0, j0)
block_idx = sch.fuse(i1, j1)
thread_idy = i2
thread_idz = j2
sch.bind(batch, "blockIdx.z")
sch.bind(block_idx, "blockIdx.x")
sch.bind(block_idy, "blockIdx.y")
sch.bind(thread_idy, "threadIdx.y")
sch.bind(thread_idz, "threadIdx.z")
def smooth_layout_recover(block, scope, l=16, r=16, enable=True): # noqa: E741
if not enable:
return
sch.transform_layout(
block,
scope,
lambda b, i, j: (
b,
i // l,
j // r,
i % l,
j % r,
),
)
smooth_layout_recover(block_outer, ("read", 0), *a_lr, enable=intrin_info.inter_transform_a)
smooth_layout_recover(
block_outer,
("read", 1),
*b_lr,
enable=intrin_info.inter_transform_b,
)
smooth_layout_recover(block_outer, ("write", 0), enable=True)
def fetch_to_shared(block, idx, vec_len, can_swizzle=False, is_smooth=False):
block_read = sch.cache_read(block, idx, shared_scope)
sch.compute_at(block_read, k0, preserve_unit_loops=True)
ndim = len(sch.get(block_read).iter_vars)
fused = sch.fuse(*sch.get_loops(block_read)[-ndim:])
f_0, f_1, f_2, f_3, f_4 = sch.split(
fused, factors=[None, num_ty, num_tz, warp_size, vec_len])
sch.bind(f_3, "threadIdx.x")
sch.bind(f_2, "threadIdx.z")
sch.bind(f_1, "threadIdx.y")
sch.vectorize(f_4)
sch.unroll(f_0)
# Apply Swizzling
sch.annotate(block_read, ann_key="permuted_layout", ann_val=can_swizzle)
# if not, apply padding to alleviate bank conflict
if not (can_swizzle or is_smooth):
pad_offset = 8 if intrin_info.in_dtype == "float16" else 16
sch.storage_align(block_read, 0, axis=-2, factor=16, offset=pad_offset)
sch.annotate(f_2, "pragma_unroll_explicit", False)
return block_read
a_g2s = fetch_to_shared(
block_outer,
0,
vec_len=4,
can_swizzle=can_swizzle_a,
is_smooth=intrin_info.smooth_a,
)
auto_inline_producers(sch, a_g2s)
def decode_fetch_to_shared(block, idx):
# step1. create memory hierarchy
# global -> local -> shared
block_shared = sch.cache_read(block, idx, shared_scope)
sch.compute_at(block_shared, k0, preserve_unit_loops=True)
decode_factor = get_coalesced_veclen(sch.get(block_shared))
_, B_shared_vi, _ = sch.split(
sch.get_loops(block_shared)[-1], factors=[None, 1, decode_factor])
block_shared_local = sch.cache_read(block_shared, 0, "local")
# global -> dequantzed_local -> shared
# step2. inline to local block
weight_dequantize_block = sch.get_block(weight_decode_info["decode_block"])
weight_producers = _collect_producers(sch, weight_dequantize_block)
auto_inline_producers(sch, block_shared_local, weight_producers)
# get target dequantize buffer's idx
def get_idx():
# for LUT dequantize, the expr is LUT(w), the idx is 1
# maybe we can use a more general and structural based way
# to analysis the idx
if weight_decode_info["source_format"]["format"] == "nf":
return 1
return 0
b_idx = get_idx()
# global -> prefetch_local -> dequantzed_local -> shared
block_shared_local_local = sch.cache_read(block_shared_local, b_idx, "local")
sch.compute_at(block_shared_local, B_shared_vi, preserve_unit_loops=True)
sch.compute_at(block_shared_local_local, B_shared_vi, preserve_unit_loops=True)
dequantize_block_local = block_shared_local
if ("zeros_mode" in weight_decode_info and
weight_decode_info["zeros_mode"] == "quantized"):
if ("with_scaling" in weight_decode_info and weight_decode_info["with_scaling"]):
block_local_scales = sch.cache_read(dequantize_block_local, b_idx + 1, "local")
sch.compute_at(block_local_scales, B_shared_vi, preserve_unit_loops=True)
# pop the scale block
auto_inline_producers(sch, block_local_scales)
if ("with_zeros" in weight_decode_info and weight_decode_info["with_zeros"]):
block_local_zeros = sch.cache_read(dequantize_block_local, b_idx + 2, "local")
sch.compute_at(block_local_zeros, B_shared_vi, preserve_unit_loops=True)
auto_inline_producers(sch, block_local_zeros)
for producer in weight_producers:
with suppress(Exception):
auto_inline_producers(sch, producer)
sch.compute_inline(producer)
# fast type conversion
if ("fast_decoding" in weight_decode_info and weight_decode_info["fast_decoding"]):
source_bit = weight_decode_info["source_format"]["bits"]
out_dtype = weight_decode_info["target_format"]
lop3_intrin_info = get_lop3_intrin_group(
out_dtype=out_dtype,
storage_dtype=weight_decode_info["storage_dtype"],
source_format=weight_decode_info["source_format"]["format"],
source_bit=source_bit,
with_scaling=weight_decode_info["with_scaling"],
with_zeros=weight_decode_info["with_zeros"],
zeros_mode=weight_decode_info["zeros_mode"],
)
sch.tensorize(
sch.get_loops(dequantize_block_local)[-1],
lop3_intrin_info["compute"],
)
import_source.append(lop3_intrin_info["c_source"])
sch.annotate(block_shared, ann_key="permuted_layout", ann_val=can_swizzle_b)
union_len = (2 + 4) if intrin_info.smooth_b else (2 + 2)
B_shared_fused = sch.fuse(*sch.get_loops(block_shared)[-union_len:-2])
_, B_shared_ty, B_shared_tz, B_shared_tx = sch.split(
B_shared_fused, factors=[None, num_ty, num_tz, warp_size])
if not (can_swizzle_b or intrin_info.smooth_b):
pad_offset = 8 if intrin_info.in_dtype == "float16" else 16
sch.storage_align(block_shared, 0, axis=-2, factor=16, offset=pad_offset)
sch.bind(B_shared_tx, "threadIdx.x")
sch.bind(B_shared_ty, "threadIdx.y")
sch.bind(B_shared_tz, "threadIdx.z")
sch.vectorize(sch.get_loops(block_shared)[-1])
sch.vectorize(sch.get_loops(block_shared_local_local)[-1])
# cache small tensors, e.g. LUT
if b_idx:
block_shared_lut = sch.cache_read(dequantize_block_local, 0, shared_scope)
sch.reverse_compute_at(block_shared_lut, j2)
_, B_shared_tx = sch.split(
sch.get_loops(block_shared_lut)[-1], factors=[None, warp_size])
sch.bind(B_shared_tx, "threadIdx.x")
return block_shared_local
_ = decode_fetch_to_shared(block_outer, 1)
# create read cache to load matrix from shared memory to wmma fragments
A_mat = sch.cache_read(block_outer, 0, "warp")
B_mat = sch.cache_read(block_outer, 1, "warp")
sch.compute_at(A_mat, k1, preserve_unit_loops=True)
sch.compute_at(B_mat, k1, preserve_unit_loops=True)
# create write cache to store matrix from wmma fragments to shared memory and global memory
if cache_write_required:
accumulator_shared_to_global = sch.cache_write(block_outer, 0, shared_scope)
store = sch.cache_write(block_outer, 0, "warp")
sch.reverse_compute_at(store, j2)
# split the store loop to match hardware intrinsic pattern
i, j = sch.get_loops(store)[-2:]
i0, i1 = sch.split(i, factors=[None, micro_size_x])
j0, j1 = sch.split(j, factors=[None, micro_size_y])
sch.reorder(i0, j0, i1, j1)
if cache_write_required:
auto_inline_consumer_chain(sch, accumulator_shared_to_global)
sch.reverse_compute_at(
accumulator_shared_to_global,
sch.get_loops(store)[-5],
preserve_unit_loops=True,
)
vec_len = get_coalesced_veclen(sch.get(accumulator_shared_to_global))
fused = sch.fuse(*sch.get_loops(accumulator_shared_to_global)[-5:])
f0, f1, f2 = sch.split(fused, factors=[None, warp_size, vec_len])
sch.bind(f1, "threadIdx.x")
sch.vectorize(f2)
sch.unroll(f0)
sch.annotate(f0, "pragma_unroll_explicit", False)
else:
auto_inline_consumer_chain(sch, store)
block_init_c = sch.decompose_reduction(block_outer, k0)
block_init_c_inner = sch.get_child_blocks(block_init_c)[0]
# Tensorization by hardware intrinsics
index_map_a, index_map_b, index_map_c = intrin_group["index_map"]
sch.transform_layout(
A_mat,
("write", 0),
get_index_map(index_map_a, *a_lr, intrin_info.inter_transform_a),
)
sch.transform_layout(
B_mat,
("write", 0),
get_index_map(index_map_b, *b_lr, intrin_info.inter_transform_b),
)
sch.transform_layout(
store,
("read", 0),
get_index_map(index_map_c, is_5d=True),
)
i, j = sch.get_loops(A_mat)[-2:]
i0, i1 = sch.split(i, factors=[None, a_lr[0]])
j0, j1 = sch.split(j, factors=[None, a_lr[1]])
sch.reorder(i0, j0, i1, j1)
ba = sch.blockize(i1)
sch.annotate(ba, ann_key="permuted_layout", ann_val=can_swizzle_a)
sch.tensorize(ba, intrin_group["load_a"])
i, j = sch.get_loops(B_mat)[-2:]
i0, i1 = sch.split(i, factors=[None, b_lr[0]])
j0, j1 = sch.split(j, factors=[None, b_lr[1]])
sch.reorder(i0, j0, i1, j1)
bb = sch.blockize(i1)
sch.annotate(bb, ann_key="permuted_layout", ann_val=can_swizzle_b)
sch.tensorize(bb, intrin_group["load_b"])
def tensorize_init_store_compute():
sch.tensorize(sch.get_loops(block_init_c_inner)[-2], intrin_group["init"])
sch.tensorize(sch.get_loops(store)[-2], intrin_group["store"])
sch.tensorize(sch.get_loops(block_inner)[-3], intrin_group["compute"])
tensorize_init_store_compute()
if stage > 1:
sch.annotate(
k0,
ann_key="software_pipeline_stage",
ann_val=[0, 0, stage - 1],
)
sch.annotate(k0, ann_key="software_pipeline_order", ann_val=[0, 1, 2])
if use_async:
sch.annotate(k0, "software_pipeline_async_stages", [0])
# plan rasteration
if not isinstance(config.rasterization_plan, NoRasterization):
device_func, invoke_func = config.rasterization_plan.get_code()
import_source.append(device_func)
sch.annotate(
sch.get_loops(block_init_c)[-2],
ann_key="inject_customized_code_prepend",
ann_val=invoke_func,
)
# plan import source
if len(import_source) > 0:
sch.annotate(
thread_idz,
ann_key="pragma_import_c",
ann_val=("\n").join(import_source),
)
return sch
def sch_dequantize_in_register_with_config(
self,
func: tir.PrimFunc,
config,
):
"""
For devices without async copy, we can use a simple dequantize schedule without shared memory prefetch.
quantized weight
|
V
dequantized in register
|
V
save into shared memory
|
V
compute
"""
from tvm.tir.tensor_intrin.cuda import ( # pylint: disable=import-outside-toplevel
get_mma_intrin_group,)
from .intrin import get_lop3_intrin_group
import_source: List[str] = []
sch = tir.Schedule(func)
root_block = analysis.get_root_block(sch)
blocks = sch.get_child_blocks(root_block)
if func.attrs is not None and "dlight.do_not_tensorize" in func.attrs.keys():
return None
reduction_blocks = get_reduction_blocks(sch, blocks)
if reduction_blocks is None:
return None
main_block = reduction_blocks[0]
# always enable shared memory rewrite
cache_write_required = True
# Check Dequantize Info
dequantize_info = func.attrs["dequantize_info"]
def check_dequantize_info(dequantize_info):
conditions = []
# currently only support weight only dequantization
conditions.append(len(dequantize_info) == 1)
# TODO(@lei) check if the dequantize value name is weight
return all(conditions)
assert check_dequantize_info(dequantize_info)
(weight_decode_info,) = list(dequantize_info.values())
def check_weight_decode_info(weight_decode_info):
conditions = []
# check source format in ["int", "fp", "nf"]
conditions.append("source_format" in weight_decode_info)
conditions.append(
weight_decode_info["source_format"]["format"] in ["uint", "int", "fp", "nf"])
# check source bits in [1, 2, 4, 8]
conditions.append(weight_decode_info["source_format"]["bits"] in [1, 2, 4, 8])
# check target format in ["float16", "int8"]
conditions.append("target_format" in weight_decode_info)
conditions.append(weight_decode_info["target_format"] in ["float16", "int8"])
return all(conditions)
assert check_weight_decode_info(weight_decode_info), "Invalid Weight Decode Info"
# Start Schedule
# Step 0. Get schedule config.
# NOTE: we can analyze the config by the hardware spec in the future
# tensor core intrinsic size
intrin_info = config.intrin_info
shared_scope = config.shared_scope
intrin_group = get_mma_intrin_group(
load_scope=shared_scope,
store_scope=shared_scope if cache_write_required else "global",
a_dtype=intrin_info.in_dtype,
b_dtype=intrin_info.in_dtype,
out_dtype=intrin_info.out_dtype,
trans_a=intrin_info.trans_a,
trans_b=intrin_info.trans_b,
smooth_a=intrin_info.smooth_a,
smooth_b=intrin_info.smooth_b,
not_use_mma_store_intrinic=False,
)
warp_row_tiles = config.warp[0]
warp_col_tiles = config.warp[1]
block_row_warps = config.block[0] // warp_row_tiles
block_col_warps = config.block[1] // warp_col_tiles
stage = config.pipeline_stage
use_async = config.use_async
chunk = config.rstep[0]
micro_size_x, micro_size_y, micro_size_k = intrin_group["micro_kernel"]
# get the axis for layout transform
def get_axis(l, r, trans): # noqa: E741
return (r, l) if trans else (l, r) # noqa: E741
a_lr = get_axis(micro_size_x, micro_size_k, intrin_info.trans_a)
b_lr = get_axis(micro_size_k, micro_size_y, intrin_info.trans_b)
def can_enable_swizzle(dtype: str, smooth: bool):
# inject_permuted_layout only support float16 currently
if dtype == "float16" or dtype == "int8":
if chunk * DataType(dtype).bits != 512:
# currently the swizzle rule only support 512 bit.
return False
# if we use smooth layout, we don't need to do swizzling
return not smooth
return False
can_swizzle_a = can_enable_swizzle(intrin_info.in_dtype, intrin_info.inter_transform_a)
can_swizzle_b = can_enable_swizzle(intrin_info.in_dtype, intrin_info.inter_transform_b)
# rewrite global smooth layout, for dequantize, currently only support weight only recover.
def smooth_gmem_layout_rewrite(sch, main_block, enable=True, trans=False, matrix_name="A"):
if not enable:
return
# normalized block may have three read buffers, while the first one is the write buffer.
buffer_offset = (1 if sch.get(main_block).reads[0].buffer
== sch.get(main_block).writes[0].buffer else 0)
buffer_idx = 0 if matrix_name == "A" else 1
source_buffer = sch.get(main_block).reads[buffer_offset + buffer_idx].buffer
# step1: find the first producer block
# Notes: we assume the layout propagate happens in the first producer block
# otherwise, the layout transform will have no effect as it will transform both
# read and write buffer
propagate_block: tir.Block = find_last_producer_from_buffer(
sch, main_block, source_buffer)
# some trick impl may not have reindex block
(weight_dequantize_info,) = dequantize_info.values()
if (sch.get(propagate_block).name_hint == weight_dequantize_info["decode_block"]):
return
# step2: transform the layout with inverse permutation
intra_indexmap, _ = get_propagate_map(
trans=trans, dtype=intrin_info.in_dtype, matrix_name=matrix_name)
# step3: propagate the matmul layout to the first reindex block
intra_indexmap = layout_propagate_chain(
sch,
start_block=main_block,
start_buffer=source_buffer,
end_block=propagate_block,
index_map=intra_indexmap,
)
def inverse_permutation(i, j, ii, jj):
return (i, j, *intra_indexmap.map_indices([ii, jj]))
sch.transform_layout(propagate_block, ("read", 0), inverse_permutation)
smooth_gmem_layout_rewrite(
sch, main_block, intrin_info.smooth_a, intrin_info.trans_a, matrix_name="A")
smooth_gmem_layout_rewrite(
sch, main_block, intrin_info.smooth_b, intrin_info.trans_b, matrix_name="B")
warp_size = 32
i_factors, j_factors, k_factors = (
[None, 1, block_row_warps, warp_row_tiles // micro_size_x],
[1, None, block_col_warps, warp_col_tiles // micro_size_y],
[None, chunk // micro_size_k],
)
num_ty = i_factors[2]
num_tz = j_factors[2]
x_pad_factor = i_factors[2] * i_factors[3]
y_pad_factor = j_factors[2] * j_factors[3]
k_pad_factor = k_factors[1]
# Step 1. Normalize generic matmul to C[S, I, J] += A[S, I, K] * B[S, J, K]/B[S, K, J]
if not (func.attrs is not None and "dlight.tensorcore_prenormlized" in func.attrs.keys()):
sch = normalize_to_matmul(sch, main_block, ["a", "a", "a"])
# Step 2. Padding for dynamic shape kernels
sch.pad_einsum(
main_block,
[
1,
micro_size_x * x_pad_factor,
micro_size_y * y_pad_factor,
micro_size_k * k_pad_factor,
],
)
# Step 3. Schedule matmul to use tensor core
block = main_block
batch, i, j, k = sch.get_loops(block)
# inner loops for tensor core computation
i, i_inner = sch.split(i, factors=[None, micro_size_x])
j, j_inner = sch.split(j, factors=[None, micro_size_y])
k, k_inner = sch.split(k, factors=[None, micro_size_k])
sch.reorder(i, j, k, i_inner, j_inner, k_inner)
block_inner = block
block_outer = sch.blockize(i_inner)
i0, i1, i2, i3 = sch.split(i, factors=i_factors)
j0, j1, j2, j3 = sch.split(j, factors=j_factors)
k0, k1 = sch.split(k, k_factors)
sch.reorder(i0, j0, i1, j1, i2, j2, k0, k1, i3, j3)
block_idy = sch.fuse(i0, j0)
block_idx = sch.fuse(i1, j1)
thread_idy = i2
thread_idz = j2
sch.bind(batch, "blockIdx.z")
sch.bind(block_idx, "blockIdx.x")
sch.bind(block_idy, "blockIdx.y")
sch.bind(thread_idy, "threadIdx.y")
sch.bind(thread_idz, "threadIdx.z")
def smooth_layout_recover(block, scope, l=16, r=16, enable=True): # noqa: E741
if not enable:
return
sch.transform_layout(
block,
scope,
lambda b, i, j: (
b,
i // l,
j // r,
i % l,
j % r,
),
)
smooth_layout_recover(block_outer, ("read", 0), *a_lr, enable=intrin_info.inter_transform_a)
smooth_layout_recover(
block_outer,
("read", 1),
*b_lr,
enable=intrin_info.inter_transform_b,
)
smooth_layout_recover(block_outer, ("write", 0), enable=True)
def fetch_to_shared(block, idx, vec_len, can_swizzle=False, is_smooth=False):
block_read = sch.cache_read(block, idx, shared_scope)
sch.compute_at(block_read, k0, preserve_unit_loops=True)
ndim = len(sch.get(block_read).iter_vars)
fused = sch.fuse(*sch.get_loops(block_read)[-ndim:])
f_0, f_1, f_2, f_3, f_4 = sch.split(
fused, factors=[None, num_ty, num_tz, warp_size, vec_len])
sch.bind(f_3, "threadIdx.x")
sch.bind(f_2, "threadIdx.z")
sch.bind(f_1, "threadIdx.y")
sch.vectorize(f_4)
sch.unroll(f_0)
# Apply Swizzling
sch.annotate(block_read, ann_key="permuted_layout", ann_val=can_swizzle)
# if not, apply padding to alleviate bank conflict
if not (can_swizzle or is_smooth):
pad_offset = 8 if intrin_info.in_dtype == "float16" else 16
sch.storage_align(block_read, 0, axis=-2, factor=16, offset=pad_offset)
sch.annotate(f_2, "pragma_unroll_explicit", False)
return block_read
a_g2s = fetch_to_shared(
block_outer,
0,
vec_len=list(config.vectorize.values())[0],
can_swizzle=can_swizzle_a,
is_smooth=intrin_info.smooth_a,
)
auto_inline_producers(sch, a_g2s)
def decode_fetch_to_shared(block, idx):
# step1. create memory hierarchy
# global -> local -> shared
block_shared = sch.cache_read(block, idx, shared_scope)
sch.compute_at(block_shared, k0, preserve_unit_loops=True)
decode_factor = get_coalesced_veclen(sch.get(block_shared))
_, B_shared_vi, _ = sch.split(
sch.get_loops(block_shared)[-1], factors=[None, 1, decode_factor])
block_shared_local = sch.cache_read(block_shared, 0, "local")
# global -> dequantzed_local -> shared
# step2. inline to local block
weight_dequantize_block = sch.get_block(weight_decode_info["decode_block"])
weight_producers = _collect_producers(sch, weight_dequantize_block)
auto_inline_producers(sch, block_shared_local, weight_producers)
# get target dequantize buffer's idx
def get_idx():
# for LUT dequantize, the expr is LUT(w), the idx is 1
# maybe we can use a more general and structural based way
# to analysis the idx
if weight_decode_info["source_format"]["format"] == "nf":
return 1
return 0
b_idx = get_idx()
# global -> prefetch_local -> dequantzed_local -> shared
block_shared_local_local = sch.cache_read(block_shared_local, b_idx, "local")
sch.compute_at(block_shared_local, B_shared_vi, preserve_unit_loops=True)
sch.compute_at(block_shared_local_local, B_shared_vi, preserve_unit_loops=True)
dequantize_block_local = block_shared_local
if ("zeros_mode" in weight_decode_info and
weight_decode_info["zeros_mode"] == "quantized"):
if ("with_scaling" in weight_decode_info and weight_decode_info["with_scaling"]):
block_local_scales = sch.cache_read(dequantize_block_local, b_idx + 1, "local")
sch.compute_at(block_local_scales, B_shared_vi, preserve_unit_loops=True)
# pop the scale block
auto_inline_producers(sch, block_local_scales)
if ("with_zeros" in weight_decode_info and weight_decode_info["with_zeros"]):
block_local_zeros = sch.cache_read(dequantize_block_local, b_idx + 2, "local")
sch.compute_at(block_local_zeros, B_shared_vi, preserve_unit_loops=True)
auto_inline_producers(sch, block_local_zeros)
for producer in weight_producers:
with suppress(Exception):
auto_inline_producers(sch, producer)
sch.compute_inline(producer)
# fast type conversion
if ("fast_decoding" in weight_decode_info and weight_decode_info["fast_decoding"]):
source_bit = weight_decode_info["source_format"]["bits"]
out_dtype = weight_decode_info["target_format"]
lop3_intrin_info = get_lop3_intrin_group(
out_dtype=out_dtype,
storage_dtype=weight_decode_info["storage_dtype"],
source_format=weight_decode_info["source_format"]["format"],
source_bit=source_bit,
with_scaling=weight_decode_info["with_scaling"],
with_zeros=weight_decode_info["with_zeros"],
zeros_mode=weight_decode_info["zeros_mode"],
)
sch.tensorize(
sch.get_loops(dequantize_block_local)[-1],
lop3_intrin_info["compute"],
)
import_source.append(lop3_intrin_info["c_source"])
sch.annotate(block_shared, ann_key="permuted_layout", ann_val=can_swizzle_b)
union_len = (2 + 4) if intrin_info.smooth_b else (2 + 2)
B_shared_fused = sch.fuse(*sch.get_loops(block_shared)[-union_len:-2])
_, B_shared_ty, B_shared_tz, B_shared_tx = sch.split(
B_shared_fused, factors=[None, num_ty, num_tz, warp_size])
if not (can_swizzle_b or intrin_info.smooth_b):
pad_offset = 8 if intrin_info.in_dtype == "float16" else 16
sch.storage_align(block_shared, 0, axis=-2, factor=16, offset=pad_offset)
sch.bind(B_shared_tx, "threadIdx.x")
sch.bind(B_shared_ty, "threadIdx.y")
sch.bind(B_shared_tz, "threadIdx.z")
sch.vectorize(sch.get_loops(block_shared)[-1])
sch.vectorize(sch.get_loops(block_shared_local_local)[-1])
# cache small tensors, e.g. LUT
if b_idx:
block_shared_lut = sch.cache_read(dequantize_block_local, 0, shared_scope)
sch.reverse_compute_at(block_shared_lut, j2)
_, B_shared_tx = sch.split(
sch.get_loops(block_shared_lut)[-1], factors=[None, warp_size])
sch.bind(B_shared_tx, "threadIdx.x")
return block_shared_local
_ = decode_fetch_to_shared(block_outer, 1)
# create read cache to load matrix from shared memory to wmma fragments
A_mat = sch.cache_read(block_outer, 0, "warp")
B_mat = sch.cache_read(block_outer, 1, "warp")
sch.compute_at(A_mat, k1, preserve_unit_loops=True)
sch.compute_at(B_mat, k1, preserve_unit_loops=True)
# create write cache to store matrix from wmma fragments to shared memory and global memory
if cache_write_required:
accumulator_shared_to_global = sch.cache_write(block_outer, 0, shared_scope)
store = sch.cache_write(block_outer, 0, "warp")
sch.reverse_compute_at(store, j2)
# split the store loop to match hardware intrinsic pattern
i, j = sch.get_loops(store)[-2:]
i0, i1 = sch.split(i, factors=[None, micro_size_x])
j0, j1 = sch.split(j, factors=[None, micro_size_y])
sch.reorder(i0, j0, i1, j1)
if cache_write_required:
auto_inline_consumer_chain(sch, accumulator_shared_to_global)
sch.reverse_compute_at(
accumulator_shared_to_global,
sch.get_loops(store)[-5],
preserve_unit_loops=True,
)
vec_len = get_coalesced_veclen(sch.get(accumulator_shared_to_global))
fused = sch.fuse(*sch.get_loops(accumulator_shared_to_global)[-5:])
f0, f1, f2 = sch.split(fused, factors=[None, warp_size, vec_len])
sch.bind(f1, "threadIdx.x")
sch.vectorize(f2)
sch.unroll(f0)
sch.annotate(f0, "pragma_unroll_explicit", False)
else:
auto_inline_consumer_chain(sch, store)
block_init_c = sch.decompose_reduction(block_outer, k0)
block_init_c_inner = sch.get_child_blocks(block_init_c)[0]
# Tensorization by hardware intrinsics
index_map_a, index_map_b, index_map_c = intrin_group["index_map"]
sch.transform_layout(
A_mat,
("write", 0),
get_index_map(index_map_a, *a_lr, intrin_info.inter_transform_a),
)
sch.transform_layout(
B_mat,
("write", 0),
get_index_map(index_map_b, *b_lr, intrin_info.inter_transform_b),
)
sch.transform_layout(
store,
("read", 0),
get_index_map(index_map_c, is_5d=True),
)
i, j = sch.get_loops(A_mat)[-2:]
i0, i1 = sch.split(i, factors=[None, a_lr[0]])
j0, j1 = sch.split(j, factors=[None, a_lr[1]])
sch.reorder(i0, j0, i1, j1)
ba = sch.blockize(i1)
sch.annotate(ba, ann_key="permuted_layout", ann_val=can_swizzle_a)
sch.tensorize(ba, intrin_group["load_a"])
i, j = sch.get_loops(B_mat)[-2:]
i0, i1 = sch.split(i, factors=[None, b_lr[0]])
j0, j1 = sch.split(j, factors=[None, b_lr[1]])
sch.reorder(i0, j0, i1, j1)
bb = sch.blockize(i1)
sch.annotate(bb, ann_key="permuted_layout", ann_val=can_swizzle_b)
sch.tensorize(bb, intrin_group["load_b"])
def tensorize_init_store_compute():
sch.tensorize(sch.get_loops(block_init_c_inner)[-2], intrin_group["init"])
sch.tensorize(sch.get_loops(store)[-2], intrin_group["store"])
sch.tensorize(sch.get_loops(block_inner)[-3], intrin_group["compute"])
tensorize_init_store_compute()
if stage > 1:
sch.annotate(
k0,
ann_key="software_pipeline_stage",
ann_val=[0, 0, stage - 1],
)
sch.annotate(k0, ann_key="software_pipeline_order", ann_val=[0, 1, 2])
if use_async:
sch.annotate(k0, "software_pipeline_async_stages", [0])
# plan rasteration
if not isinstance(config.rasterization_plan, NoRasterization):
device_func, invoke_func = config.rasterization_plan.get_code()
import_source.append(device_func)
sch.annotate(
sch.get_loops(block_init_c)[-2],
ann_key="inject_customized_code_prepend",
ann_val=invoke_func,
)
# plan import source
if len(import_source) > 0:
sch.annotate(
thread_idz,
ann_key="pragma_import_c",
ann_val=("\n").join(import_source),
)
return sch
def sch_shared_memory_prefetch_with_config(
self,
func: tir.PrimFunc,
config,
):
"""
For A100 Like devices, the shared memory prefetch(async) is required
to achieve optimal performance.
quantized weight
|
V
shared memory prefetch (with async copy)
|
V
dequantized into shared memory
|
V
compute
"""
from tvm.tir.tensor_intrin.cuda import ( # pylint: disable=import-outside-toplevel
get_mma_intrin_group,)
from .intrin import get_lop3_intrin_group
import_source: List[str] = []
sch = tir.Schedule(func)
root_block = analysis.get_root_block(sch)
blocks = sch.get_child_blocks(root_block)
if func.attrs is not None and "dlight.do_not_tensorize" in func.attrs.keys():
return None
reduction_blocks = get_reduction_blocks(sch, blocks)
if reduction_blocks is None:
return None
main_block = reduction_blocks[0]
# always enable shared memory rewrite
cache_write_required = True
# Check Dequantize Info
# TODO(leiwang): this is a hack to get the configuration, can be improved by writing a pass to analysis the dequantize block.
dequantize_info = func.attrs["dequantize_info"]
def check_dequantize_info(dequantize_info):
conditions = []
# currently only support weight only dequantization
conditions.append(len(dequantize_info) == 1)
# TODO(@lei) check if the dequantize value name is weight
return all(conditions)
assert check_dequantize_info(dequantize_info)
(weight_decode_info,) = list(dequantize_info.values())
def check_weight_decode_info(weight_decode_info):
conditions = []
# check source format in ["int", "fp", "nf"]
conditions.append("source_format" in weight_decode_info)
conditions.append(
weight_decode_info["source_format"]["format"] in ["uint", "int", "fp", "nf"])
# check source bits in [1, 2, 4, 8]
conditions.append(weight_decode_info["source_format"]["bits"] in [1, 2, 4, 8])
# check target format in ["float16", "int8"]
conditions.append("target_format" in weight_decode_info)
conditions.append(weight_decode_info["target_format"] in ["float16", "int8"])
return all(conditions)
assert check_weight_decode_info(weight_decode_info), "Invalid B_decode_info"
# Start Schedule
# Step 0. Get schedule config.
# NOTE: we can analyze the config by the hardware spec in the future
# tensor core intrinsic size
shared_scope = config.shared_scope
intrin_info = config.intrin_info
intrin_group = get_mma_intrin_group(
load_scope=shared_scope,
store_scope=shared_scope if cache_write_required else "global",
a_dtype=intrin_info.in_dtype,
b_dtype=intrin_info.in_dtype,
out_dtype=intrin_info.out_dtype,
trans_a=intrin_info.trans_a,
trans_b=intrin_info.trans_b,
smooth_a=intrin_info.smooth_a,
smooth_b=intrin_info.smooth_b,
not_use_mma_store_intrinic=False,
)
warp_row_tiles = config.warp[0]
warp_col_tiles = config.warp[1]
block_row_warps = config.block[0] // warp_row_tiles
block_col_warps = config.block[1] // warp_col_tiles
stage = config.pipeline_stage
use_async = config.use_async
chunk = config.rstep[0]
micro_size_x, micro_size_y, micro_size_k = intrin_group["micro_kernel"]
# get the axis for layout transform
def get_axis(l, r, trans): # noqa: E741
return (r, l) if trans else (l, r) # noqa: E741
a_lr = get_axis(micro_size_x, micro_size_k, intrin_info.trans_a)
b_lr = get_axis(micro_size_k, micro_size_y, intrin_info.trans_b)
def can_enable_swizzle(dtype: str, smooth: bool):
# inject_permuted_layout only support float16 currently
if dtype == "float16" or dtype == "int8":
if chunk * DataType(dtype).bits != 512:
# currently the swizzle rule only support 512 bit.
return False
# if we use smooth layout, we don't need to do swizzling
return not smooth
return False
can_swizzle_a = can_enable_swizzle(intrin_info.in_dtype, intrin_info.inter_transform_a)
can_swizzle_b = can_enable_swizzle(intrin_info.in_dtype, intrin_info.inter_transform_b)
# rewrite global smooth layout, for dequantize, currently only support weight only recover.
def smooth_gmem_layout_rewrite(
sch,
main_block,
enable=True,
trans=False,
matrix_name="A",
intrin_group=intrin_group,
):
if not enable:
return
# normalized block may have three read buffers, while the first one is the write buffer.
buffer_offset = (1 if sch.get(main_block).reads[0].buffer
== sch.get(main_block).writes[0].buffer else 0)
buffer_idx = 0 if matrix_name == "A" else 1
source_buffer = sch.get(main_block).reads[buffer_offset + buffer_idx].buffer
# step1: find the first producer block
# Notes: we assume the layout propagate happens in the first producer block
# otherwise, the layout transform will have no effect as it will transform both
# read and write buffer
propagate_block: tir.Block = find_last_producer_from_buffer(
sch, main_block, source_buffer)
# some trick impl may not have reindex block
(weight_dequantize_info,) = dequantize_info.values()
if (sch.get(propagate_block).name_hint == weight_dequantize_info["decode_block"]):
return
# step2: transform the layout with inverse permutation
intra_indexmap, _ = get_propagate_map(
trans=trans, dtype=intrin_info.in_dtype, matrix_name=matrix_name)
# step3: propagate the matmul layout to the first reindex block
intra_indexmap = layout_propagate_chain(
sch,
start_block=main_block,
start_buffer=source_buffer,
end_block=propagate_block,
index_map=intra_indexmap,
)
def inverse_permutation(i, j, ii, jj):
return (i, j, *intra_indexmap.map_indices([ii, jj]))
sch.transform_layout(propagate_block, ("read", 0), inverse_permutation)
intra_index_map, _ = get_propagate_map(
trans=trans, dtype=intrin_info.in_dtype, matrix_name=matrix_name)
# get target dequantize buffer's offset
def get_offset():
# for LUT dequantize, the expr is LUT(w), the idx is 1
# maybe we can use a more general and structural based way
# to analysis the idx
if weight_dequantize_info["source_format"]["format"] == "nf":
return 1
return 0
offset = get_offset()
dequantize_block = sch.get_block(weight_dequantize_info["decode_block"])
group_size = weight_dequantize_info["group_size"]
_, mn, mk = intrin_group["micro_kernel"]
def get_param_indices(
indexmap,
l=mn,
r=mk,
group_size=group_size # noqa: E741
): # noqa: E741
# assume the param layout is n, k
rl, rr = [x.var for x in sch.get(dequantize_block).iter_vars]
warp_i, warp_j = rl % l, rr % r
spatial_i, spatial_j = rl // l, rr // r
warp_i, warp_j = indexmap.map_indices([warp_i, warp_j])
new_indices = (
spatial_i * l + warp_i,
(spatial_j * r + warp_j) // group_size,
)
return new_indices
with_scaling = bool(weight_dequantize_info["with_scaling"])
if with_scaling:
sch.unsafe_rewrite_buffer_region(
dequantize_block,
("read", offset + 1),
get_param_indices(intra_index_map),
)
with_zeros = bool(weight_dequantize_info["with_zeros"])
if with_zeros:
sch.unsafe_rewrite_buffer_region(
dequantize_block,
("read", offset + 2),
get_param_indices(intra_index_map),
)
smooth_gmem_layout_rewrite(
sch, main_block, intrin_info.smooth_a, intrin_info.trans_a, matrix_name="A")
smooth_gmem_layout_rewrite(
sch, main_block, intrin_info.smooth_b, intrin_info.trans_b, matrix_name="B")
warp_size = 32
i_factors, j_factors, k_factors = (
[None, 1, block_row_warps, warp_row_tiles // micro_size_x],
[1, None, block_col_warps, warp_col_tiles // micro_size_y],
[None, chunk // micro_size_k],
)
num_ty = i_factors[2]
num_tz = j_factors[2]
x_pad_factor = i_factors[2] * i_factors[3]
y_pad_factor = j_factors[2] * j_factors[3]
k_pad_factor = k_factors[1]
# Step 1. Normalize generic matmul to C[S, I, J] += A[S, I, K] * B[S, J, K]/B[S, K, J]
if not (func.attrs is not None and "dlight.tensorcore_prenormlized" in func.attrs.keys()):
sch = normalize_to_matmul(sch, main_block, ["a", "a", "a"])
# Step 2. Padding for dynamic shape kernels
sch.pad_einsum(
main_block,
[
1,
micro_size_x * x_pad_factor,
micro_size_y * y_pad_factor,
micro_size_k * k_pad_factor,
],
)
# Step 3. Schedule matmul to use tensor core
block = main_block
batch, i, j, k = sch.get_loops(block)
# inner loops for tensor core computation
i, i_inner = sch.split(i, factors=[None, micro_size_x])
j, j_inner = sch.split(j, factors=[None, micro_size_y])
k, k_inner = sch.split(k, factors=[None, micro_size_k])
sch.reorder(i, j, k, i_inner, j_inner, k_inner)
block_inner = block
block_outer = sch.blockize(i_inner)
i0, i1, i2, i3 = sch.split(i, factors=i_factors)
j0, j1, j2, j3 = sch.split(j, factors=j_factors)
k0, k1 = sch.split(k, k_factors)
sch.reorder(i0, j0, i1, j1, i2, j2, k0, k1, i3, j3)
block_idy = sch.fuse(i0, j0)
block_idx = sch.fuse(i1, j1)
thread_idy = i2
thread_idz = j2
sch.bind(batch, "blockIdx.z")
sch.bind(block_idx, "blockIdx.x")
sch.bind(block_idy, "blockIdx.y")
sch.bind(thread_idy, "threadIdx.y")
sch.bind(thread_idz, "threadIdx.z")
def smooth_layout_recover(block, scope, l=16, r=16, enable=True): # noqa: E741
if not enable:
return
sch.transform_layout(
block,
scope,
lambda b, i, j: (
b,
i // l,
j // r,
i % l,
j % r,
),
)
smooth_layout_recover(block_outer, ("read", 0), *a_lr, enable=intrin_info.inter_transform_a)
smooth_layout_recover(
block_outer,
("read", 1),
*b_lr,
enable=intrin_info.inter_transform_b,
)
smooth_layout_recover(block_outer, ("write", 0), enable=True)
def fetch_to_shared(block, idx, vec_len, can_swizzle=False, is_smooth=False):
block_read = sch.cache_read(block, idx, shared_scope)
sch.compute_at(block_read, k0, preserve_unit_loops=True)
ndim = len(sch.get(block_read).iter_vars)
fused = sch.fuse(*sch.get_loops(block_read)[-ndim:])
f_0, f_1, f_2, f_3, f_4 = sch.split(
fused, factors=[None, num_ty, num_tz, warp_size, vec_len])
sch.bind(f_3, "threadIdx.x")
sch.bind(f_2, "threadIdx.z")
sch.bind(f_1, "threadIdx.y")
sch.vectorize(f_4)
sch.unroll(f_0)
# Apply Swizzling
sch.annotate(block_read, ann_key="permuted_layout", ann_val=can_swizzle)
# if not, apply padding to alleviate bank conflict
if not (can_swizzle or is_smooth):
pad_offset = 8 if intrin_info.in_dtype == "float16" else 16
sch.storage_align(block_read, 0, axis=-2, factor=16, offset=pad_offset)
sch.annotate(f_2, "pragma_unroll_explicit", False)
return block_read
a_g2s = fetch_to_shared(
block_outer,
0,
vec_len=list(config.vectorize.values())[0],
can_swizzle=can_swizzle_a,
is_smooth=intrin_info.smooth_a,
)
auto_inline_producers(sch, a_g2s)
def decode_fetch_to_shared(block, idx):
# step1. create memory hierarchy
# global -> local -> shared
block_shared = sch.cache_read(block, idx, shared_scope)
sch.compute_at(block_shared, k0, preserve_unit_loops=True)
# TODO(lei): the factor should be analyzed more deeper.
decode_factor = get_coalesced_veclen(sch.get(block_shared))
_, B_shared_vi, _ = sch.split(
sch.get_loops(block_shared)[-1], factors=[None, 1, decode_factor])
block_shared_local = sch.cache_read(block_shared, 0, "local")
# global -> dequantzed_local -> shared
# step2. inline to local block, should skip qzeros
is_qzeros = ("with_zeros" in weight_decode_info and weight_decode_info["with_zeros"] and
weight_decode_info["zeros_mode"] == "quantized")
weight_dequantize_block = sch.get_block(weight_decode_info["decode_block"])
weight_producers = (
_collect_producers(sch, weight_dequantize_block) if is_qzeros else [])
auto_inline_producers(sch, block_shared_local, weight_producers)
# get target dequantize buffer's idx
def get_idx():
# for LUT dequantize, the expr is LUT(w), the idx is 1
# maybe we can use a more general and structural based way
# to analysis the idx
if weight_decode_info["source_format"]["format"] == "nf":
return 1
return 0
b_idx = get_idx()
# global -> prefetch_local -> dequantzed_local -> shared
block_shared_local_local = sch.cache_read(block_shared_local, b_idx, "local")
# global -> prefetch_shared -> vector load -> dequantzed_local -> shared
block_shared_local_local_shared = sch.cache_read(block_shared_local_local, 0,
shared_scope)
sch.compute_at(block_shared_local, B_shared_vi, preserve_unit_loops=True)
sch.compute_at(block_shared_local_local, B_shared_vi, preserve_unit_loops=True)
dequantize_block_local = block_shared_local
if is_qzeros:
if ("with_scaling" in weight_decode_info and weight_decode_info["with_scaling"]):
block_local_scales = sch.cache_read(dequantize_block_local, b_idx + 1, "local")
sch.compute_at(block_local_scales, B_shared_vi, preserve_unit_loops=True)
auto_inline_producers(sch, block_local_scales)
if ("with_zeros" in weight_decode_info and weight_decode_info["with_zeros"]):
block_local_zeros = sch.cache_read(dequantize_block_local, b_idx + 2, "local")
sch.compute_at(block_local_zeros, B_shared_vi, preserve_unit_loops=True)
auto_inline_producers(sch, block_local_zeros)
for producer in weight_producers:
with suppress(Exception):
auto_inline_producers(sch, producer)
sch.compute_inline(producer)
# fast type conversion
if ("fast_decoding" in weight_decode_info and weight_decode_info["fast_decoding"]):
source_bit = weight_decode_info["source_format"]["bits"]
out_dtype = weight_decode_info["target_format"]
lop3_intrin_info = get_lop3_intrin_group(
out_dtype=out_dtype,
storage_dtype=weight_decode_info["storage_dtype"],
source_format=weight_decode_info["source_format"]["format"],
source_bit=source_bit,
with_scaling=weight_decode_info["with_scaling"],
with_zeros=weight_decode_info["with_zeros"],
zeros_mode=weight_decode_info["zeros_mode"],
)
sch.tensorize(
sch.get_loops(dequantize_block_local)[-1],
lop3_intrin_info["compute"],
)
import_source.append(lop3_intrin_info["c_source"])
sch.annotate(block_shared, ann_key="permuted_layout", ann_val=can_swizzle_b)
union_len = (2 + 4) if intrin_info.smooth_b else (2 + 2)
B_shared_fused = sch.fuse(*sch.get_loops(block_shared)[-union_len:-2])
_, B_shared_ty, B_shared_tz, B_shared_tx = sch.split(
B_shared_fused, factors=[None, num_ty, num_tz, warp_size])
if not (can_swizzle_b or intrin_info.smooth_b):
pad_offset = 8 if intrin_info.in_dtype == "float16" else 16
sch.storage_align(block_shared, 0, axis=-2, factor=16, offset=pad_offset)
sch.bind(B_shared_tx, "threadIdx.x")
sch.bind(B_shared_ty, "threadIdx.y")
sch.bind(B_shared_tz, "threadIdx.z")
sch.vectorize(sch.get_loops(block_shared)[-1])
sch.vectorize(sch.get_loops(block_shared_local_local)[-1])
sch.compute_at(block_shared_local_local_shared, k0, preserve_unit_loops=True)
ndim = len(sch.get(block_shared_local_local_shared).iter_vars)
fused = sch.fuse(*sch.get_loops(block_shared_local_local_shared)[-ndim:])
f_0, f_1, f_2, f_3, f_4 = sch.split(
fused,
factors=[
None,
num_tz,
num_ty,
warp_size,
get_coalesced_veclen(sch.get(block_shared_local_local_shared)),
],
)
sch.bind(f_3, "threadIdx.x")
sch.bind(f_2, "threadIdx.y")
sch.bind(f_1, "threadIdx.z")
sch.vectorize(f_4)
sch.unroll(f_0)
sch.annotate(f_0, "pragma_unroll_explicit", False)
# cache small tensors, e.g. LUT
if b_idx:
block_shared_lut = sch.cache_read(dequantize_block_local, 0, shared_scope)
sch.reverse_compute_at(block_shared_lut, j2)
_, B_shared_tx = sch.split(
sch.get_loops(block_shared_lut)[-1], factors=[None, warp_size])
sch.bind(B_shared_tx, "threadIdx.x")
return block_shared_local
_ = decode_fetch_to_shared(block_outer, 1)
# create read cache to load matrix from shared memory to wmma fragments
A_mat = sch.cache_read(block_outer, 0, "warp")
B_mat = sch.cache_read(block_outer, 1, "warp")
sch.compute_at(A_mat, k1, preserve_unit_loops=True)
sch.compute_at(B_mat, k1, preserve_unit_loops=True)
# create write cache to store matrix from wmma fragments to shared memory and global memory
if cache_write_required:
accumulator_shared_to_global = sch.cache_write(block_outer, 0, shared_scope)
store = sch.cache_write(block_outer, 0, "warp")
sch.reverse_compute_at(store, j2)
# split the store loop to match hardware intrinsic pattern
i, j = sch.get_loops(store)[-2:]
i0, i1 = sch.split(i, factors=[None, micro_size_x])
j0, j1 = sch.split(j, factors=[None, micro_size_y])
sch.reorder(i0, j0, i1, j1)
if cache_write_required:
auto_inline_consumer_chain(sch, accumulator_shared_to_global)
sch.reverse_compute_at(
accumulator_shared_to_global,
sch.get_loops(store)[-5],
preserve_unit_loops=True,
)
vec_len = get_coalesced_veclen(sch.get(accumulator_shared_to_global))
fused = sch.fuse(*sch.get_loops(accumulator_shared_to_global)[-5:])
f0, f1, f2 = sch.split(fused, factors=[None, warp_size, vec_len])
sch.bind(f1, "threadIdx.x")
sch.vectorize(f2)
sch.unroll(f0)
sch.annotate(f0, "pragma_unroll_explicit", False)
else:
auto_inline_consumer_chain(sch, store)
block_init_c = sch.decompose_reduction(block_outer, k0)
block_init_c_inner = sch.get_child_blocks(block_init_c)[0]
# Tensorization by hardware intrinsics
index_map_a, index_map_b, index_map_c = intrin_group["index_map"]
sch.transform_layout(
A_mat,
("write", 0),
get_index_map(index_map_a, *a_lr, intrin_info.inter_transform_a),
)
sch.transform_layout(
B_mat,
("write", 0),
get_index_map(index_map_b, *b_lr, intrin_info.inter_transform_b),
)
sch.transform_layout(
store,
("read", 0),
get_index_map(index_map_c, is_5d=True),
)
i, j = sch.get_loops(A_mat)[-2:]
i0, i1 = sch.split(i, factors=[None, a_lr[0]])
j0, j1 = sch.split(j, factors=[None, a_lr[1]])
sch.reorder(i0, j0, i1, j1)
ba = sch.blockize(i1)
sch.annotate(ba, ann_key="permuted_layout", ann_val=can_swizzle_a)
sch.tensorize(ba, intrin_group["load_a"])
i, j = sch.get_loops(B_mat)[-2:]
i0, i1 = sch.split(i, factors=[None, b_lr[0]])
j0, j1 = sch.split(j, factors=[None, b_lr[1]])
sch.reorder(i0, j0, i1, j1)
bb = sch.blockize(i1)
sch.annotate(bb, ann_key="permuted_layout", ann_val=can_swizzle_b)
sch.tensorize(bb, intrin_group["load_b"])
def tensorize_init_store_compute():
sch.tensorize(sch.get_loops(block_init_c_inner)[-2], intrin_group["init"])
sch.tensorize(sch.get_loops(store)[-2], intrin_group["store"])
sch.tensorize(sch.get_loops(block_inner)[-3], intrin_group["compute"])
tensorize_init_store_compute()
if stage > 1:
sch.annotate(
k0,
ann_key="software_pipeline_stage",
ann_val=[0, 0, stage - 1, stage - 1],
)
sch.annotate(k0, ann_key="software_pipeline_order", ann_val=[0, 1, 2, 3])
if use_async:
sch.annotate(k0, "software_pipeline_async_stages", [0])
# plan rasteration
if not isinstance(config.rasterization_plan, NoRasterization):
device_func, invoke_func = config.rasterization_plan.get_code()
import_source.append(device_func)
sch.annotate(
sch.get_loops(block_init_c)[-2],
ann_key="inject_customized_code_prepend",
ann_val=invoke_func,
)
# plan import source
if len(import_source) > 0:
sch.annotate(
thread_idz,
ann_key="pragma_import_c",
ann_val=("\n").join(import_source),
)
return sch
def apply_config( # pylint: disable=too-many-locals,missing-docstring
self,
func: tir.PrimFunc,
config,
) -> Optional[tir.Schedule]:
def check_sm_version(arch: str) -> int:
sm_version = arch.replace("sm_", "")
return int(sm_version) if sm_version.isdigit() else -1
if check_sm_version(config.arch.target.arch) < 80:
"""MMA Template only support sm_80 and above"""
return None
if (config.arch.target.kind.name == "cuda" and
check_sm_version(config.arch.target.arch) == 80):
return self.sch_shared_memory_prefetch_with_config(func, config)
else:
return self.sch_dequantize_in_register_with_config(func, config)
|
BitBLAS/python/bitblas/gpu/matmul_mma_dequantize.py/0
|
{
"file_path": "BitBLAS/python/bitblas/gpu/matmul_mma_dequantize.py",
"repo_id": "BitBLAS",
"token_count": 35225
}
| 151 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from tvm.target import Target
from typing import Literal, Union
from .operator import Operator
from .impl.ladder_permutate_impl import select_implementation
from dataclasses import dataclass
@dataclass(frozen=True)
class LadderPermutateConfig:
M: int
N: int
datatype: Literal["int8", "e4m3_float8", "e5m2_float8"] = "float16"
dequantize_bits: int = -1
storage_dtype: Literal["float16", "int8", "uint8", "int32", "uint32"] = "float16"
propagate_kind: Literal["A", "B"] = "B" # "A" or "B"
transpose_matrix: bool = False
transform_kind: int = 2 # 0: none, 1: inter_warp 2: intra_warp
target_instruction: Literal["nvidia-mma"] = (
"nvidia-mma" # maybe extend to "cdna-mfma" in future.
)
class LadderPermutate(Operator):
def __init__(
self,
config: LadderPermutateConfig,
name: str = "permutate",
target: Union[str, Target] = "llvm", # assume to do permutation on cpu.
enable_tuning: bool = False,
from_database: bool = False,
):
# consider to warp the arguments to MatmulConfig
super().__init__(name, config, target)
target = self.target
if target.kind.name == "cuda":
self.optimized_func = self.apply_default_schedule(self.prim_func_mod, target)
if enable_tuning:
self.hardware_aware_finetune()
if not from_database:
self._build_runtime_module(target)
# select implementation based on the Operator config
def _select_implementation(self):
return select_implementation(
M=self.M,
N=self.N,
datatype=self.datatype,
dequantize_bits=self.dequantize_bits,
storage_dtype=self.storage_dtype,
propagate_kind=self.propagate_kind,
transpose_matrix=self.transpose_matrix,
transform_kind=self.transform_kind,
target_instruction=self.target_instruction,
)
@property
def M(self):
return self.config.M
@property
def N(self):
return self.config.N
@property
def datatype(self):
return self.config.datatype
@property
def dequantize_bits(self):
return self.config.dequantize_bits
@property
def storage_dtype(self):
return self.config.storage_dtype
@property
def propagate_kind(self):
return self.config.propagate_kind
@property
def transpose_matrix(self):
return self.config.transpose_matrix
@property
def transform_kind(self):
return self.config.transform_kind
@property
def target_instruction(self):
return self.config.target_instruction
__all__ = ["LadderPermutate", "LadderPermutateConfig"]
|
BitBLAS/python/bitblas/ops/ladder_permutate.py/0
|
{
"file_path": "BitBLAS/python/bitblas/ops/ladder_permutate.py",
"repo_id": "BitBLAS",
"token_count": 1211
}
| 152 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import subprocess
from thefuzz import process
from tvm.target import Target
from tvm.target.tag import list_tags
import logging
logger = logging.getLogger(__name__)
def get_gpu_model_from_nvidia_smi():
"""
Executes the 'nvidia-smi' command to fetch the name of the first available NVIDIA GPU.
Returns:
str: The name of the GPU, or None if 'nvidia-smi' command fails.
"""
try:
# Execute nvidia-smi command to get the GPU name
output = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"],
encoding="utf-8",
).strip()
except subprocess.CalledProcessError as e:
logger.info("nvidia-smi failed with error: %s", e)
return None
# Return the name of the first GPU if multiple are present
return output.split("\n")[0]
def find_best_match(tags, query):
"""
Finds the best match for a query within a list of tags using fuzzy string matching.
"""
MATCH_THRESHOLD = 25
best_match, score = process.extractOne(query, tags)
def check_target(best, default):
return best if Target(best).arch == Target(default).arch else default
if check_target(best_match, "cuda"):
return best_match if score >= MATCH_THRESHOLD else "cuda"
else:
logger.info(f"Best match '{best_match}' is not a valid CUDA target, falling back to 'cuda'")
return "cuda"
def auto_detect_nvidia_target() -> str:
"""
Automatically detects the NVIDIA GPU architecture to set the appropriate TVM target.
Returns:
str: The detected TVM target architecture.
"""
# Return a predefined target if specified in the environment variable
# if "TVM_TARGET" in os.environ:
# return os.environ["TVM_TARGET"]
# Fetch all available tags and filter for NVIDIA tags
all_tags = list_tags()
nvidia_tags = [tag for tag in all_tags if "nvidia" in tag]
# Get the current GPU model and find the best matching target
gpu_model = get_gpu_model_from_nvidia_smi()
target = find_best_match(nvidia_tags, gpu_model) if gpu_model else "cuda"
return target
|
BitBLAS/python/bitblas/utils/target_detector.py/0
|
{
"file_path": "BitBLAS/python/bitblas/utils/target_detector.py",
"repo_id": "BitBLAS",
"token_count": 820
}
| 153 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import bitblas
from bitblas import Linear as BitBLASLinear
import torch
import time
import numpy as np
import torch.nn as nn
import pytest
torch.manual_seed(0)
@pytest.mark.parametrize(
"m, in_features, out_features, bias",
[
(1, 1024, 1024, False),
(1, 1024, 1024, True),
(1024, 1024, 1024, True),
([1, 1024], 1024, 1024, True),
],
)
def test_correctness_consistent(m, in_features, out_features, bias):
linear_torch = (nn.Linear(in_features, out_features, bias=bias).to(torch.float16).cuda())
linear_bitblas = BitBLASLinear(
in_features,
out_features,
bias=bias,
A_dtype="float16",
W_dtype="float16",
accum_dtype="float16",
out_dtype="float16",
opt_M=m,
).cuda()
with torch.no_grad():
linear_bitblas.load_and_transform_weight(linear_torch.weight.clone())
if bias:
linear_bitblas.bias = nn.Parameter(linear_torch.bias.clone())
with torch.no_grad():
if not isinstance(m, int):
# average m
m = sum(m) // len(m)
input_data = torch.randn(m, in_features, dtype=torch.float16).cuda()
output_torch = linear_torch(input_data)
output_bitblas = linear_bitblas(input_data)
torch.testing.assert_close(output_torch, output_bitblas, rtol=1e-1, atol=1e-2)
@pytest.mark.parametrize(
"m, in_features, out_features, bias, W_dtype, group_size, with_scaling, with_zeros, zeros_mode",
[
(1, 1024, 1024, False, "uint4", -1, False, False, None),
(1, 1024, 1024, False, "uint4", -1, False, False, None),
(1024, 1024, 1024, True, "uint4", -1, False, False, None),
(1, 1024, 1024, True, "uint2", -1, True, False, None),
(1, 1024, 1024, True, "uint2", 128, True, True, "original"),
(1024, 1024, 1024, True, "uint2", 128, True, True, "original"),
(1, 1024, 1024, True, "uint2", 128, True, True, "rescale"),
],
)
def test_correctness_weight_only_dequantize(
m,
in_features,
out_features,
bias,
W_dtype,
group_size,
with_scaling,
with_zeros,
zeros_mode,
):
import numpy as np
from bitblas.quantization.utils import general_compress
linear_bitblas = BitBLASLinear(
in_features,
out_features,
bias=bias,
A_dtype="float16",
W_dtype=W_dtype,
accum_dtype="float16",
out_dtype="float16",
group_size=group_size,
with_scaling=with_scaling,
with_zeros=with_zeros,
opt_M=m,
).cuda()
if not isinstance(m, int):
# average m
m = sum(m) // len(m)
input_shape = (m, in_features)
weight_shape = (out_features, in_features)
output_shape = (m, out_features)
inputs = []
inputs.append(torch.rand(input_shape, dtype=torch.float16).cuda() - 0.5)
source_format, bit = (
linear_bitblas.bitblas_matmul.source_format,
linear_bitblas.bitblas_matmul.bit,
)
maxq = 2**(bit - 1)
zeros = maxq
if source_format == "uint":
inputs.append(torch.randint(0, maxq, weight_shape, dtype=torch.int8).cuda())
elif source_format == "int":
inputs.append(torch.randint(-maxq, maxq, weight_shape, dtype=torch.int8).cuda())
else:
raise NotImplementedError
inputs.append(torch.rand(output_shape, dtype=torch.float16).cuda())
intweight = inputs[1]
intweight = intweight.cpu().numpy().astype(np.int8)
if source_format == "int":
intweight = intweight + maxq
if with_zeros:
inputs[1] = inputs[1] - zeros
bias_tensor = torch.rand((output_shape[-1],), dtype=torch.float16).cuda()
ref_result = torch.matmul(inputs[0], (inputs[1].t()).to(torch.float16))
if bias:
ref_result = ref_result + bias_tensor
with torch.no_grad():
qw_np = general_compress(intweight, source_bits=bit, storage_dtype=np.int8)
qw_torch = torch.from_numpy(qw_np).cuda()
permuted_inputs = []
permuted_inputs.append(inputs[0])
if linear_bitblas.bitblas_matmul.weight_transform is not None:
permuted_inputs.append(
linear_bitblas.bitblas_matmul.weight_transform(qw_torch.cpu()).cuda())
else:
permuted_inputs.append(qw_torch)
linear_bitblas.qweight.data = permuted_inputs[-1].clone()
if with_scaling:
if group_size == -1:
group_size = in_features
permuted_inputs.append(
torch.ones([out_features, in_features // group_size], dtype=torch.float16).cuda())
linear_bitblas.scales.data = permuted_inputs[-1].clone()
if with_zeros:
if zeros_mode == "original":
permuted_inputs.append(
torch.ones([out_features, in_features // group_size],
dtype=torch.float16).cuda() * zeros)
elif zeros_mode == "rescale":
original_zeros = (
torch.ones([out_features, in_features // group_size],
dtype=torch.float16).cuda() * zeros)
scaled_zeros = original_zeros * permuted_inputs[-1]
permuted_inputs.append(scaled_zeros)
elif zeros_mode == "quantized":
original_zeros = (
torch.ones([in_features // group_size, out_features], dtype=torch.int8).cuda() *
zeros)
qzeros = general_compress(
original_zeros.cpu().numpy(), source_bits=bit, storage_dtype=np.int8)
permuted_inputs.append(torch.from_numpy(qzeros).cuda())
else:
raise NotImplementedError
linear_bitblas.zeros.data = permuted_inputs[-1].clone()
if bias:
permuted_inputs.append(bias_tensor)
linear_bitblas.bias.data = bias_tensor.clone()
with torch.no_grad():
output_bitblas = linear_bitblas(inputs[0])
torch.testing.assert_close(output_bitblas, ref_result, rtol=1e0, atol=1e0)
def profile(model, input_data):
model = model.cuda()
model.eval()
def get_runtime(num_repeats=1):
tic = time.time()
for _ in range(num_repeats):
_ = model(input_data)
torch.cuda.synchronize()
return (time.time() - tic) * 1000 / num_repeats
with torch.no_grad():
# print("Warming up ...")
st = time.time()
while time.time() - st < 1.0:
get_runtime() # warmup
warmup_runtime = get_runtime()
num_repeats = max(1, int(1000 / warmup_runtime))
times = get_runtime(num_repeats)
return np.mean(times)
if __name__ == "__main__":
bitblas.testing.main()
|
BitBLAS/testing/python/module/test_bitblas_linear.py/0
|
{
"file_path": "BitBLAS/testing/python/module/test_bitblas_linear.py",
"repo_id": "BitBLAS",
"token_count": 3327
}
| 154 |
date ; hostname ; pwd
export MASTER_ADDR=$HOSTNAME
export MASTER_PORT=19800
export NODE_RANK=0
EXP_LF=True
EXP_RB=288
EXP_LR_ARRAY=(1e-5 2e-5 1e-5 2e-5)
EXP_GN_ARRAY=(cifar10 cifar10 cifar100 cifar100)
for i in {0..3}
do
EXP_LR=${EXP_LR_ARRAY[$i]}
EXP_GN=${EXP_GN_ARRAY[$i]}
echo $MASTER_ADDR, $MASTER_PORT, $NODE_RANK, $EXP_NODES, $EXP_LR, $EXP_LF, $EXP_GN, $EXP_RB
RUN_NAME=""$EXP_LR"_"$EXP_LF"_10"
python run_cifar.py with run_name=$RUN_NAME learning_rate=$EXP_LR load_flag=$EXP_LF group_name=$EXP_GN resolution_before=$EXP_RB exp_name=METER-Uni-Modal load_path=~/BT/METER_checkpoints/meter_clip16_288_roberta_pretrain.ckpt vit_remove_last=True
RUN_NAME=""$EXP_LR"_"$EXP_LF"_100"
python run_cifar.py with run_name=$RUN_NAME learning_rate=$EXP_LR load_flag=$EXP_LF group_name=$EXP_GN resolution_before=$EXP_RB max_epoch=100 exp_name=METER-Uni-Modal load_path=~/BT/METER_checkpoints/meter_clip16_288_roberta_pretrain.ckpt vit_remove_last=True
done
date
|
BridgeTower/scripts/ftfpt_cifar_meter.sh/0
|
{
"file_path": "BridgeTower/scripts/ftfpt_cifar_meter.sh",
"repo_id": "BridgeTower",
"token_count": 458
}
| 155 |
import functools
import torch
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader, DistributedSampler
from torch.utils.data.dataset import ConcatDataset
from . import _datamodules
class MTDataModule(LightningDataModule):
def __init__(self, _config):
datamodule_keys = _config["datasets"]
assert len(datamodule_keys) > 0
super().__init__()
self.prepare_data_per_node = False
self.dm_keys = datamodule_keys
self.dm_dicts = {key: _datamodules[key](_config) for key in datamodule_keys}
self.dms = [v for k, v in self.dm_dicts.items()]
self.batch_size = self.dms[0].batch_size
self.eval_batch_size = self.dms[0].eval_batch_size
self.vocab_size = self.dms[0].vocab_size
self.num_workers = self.dms[0].num_workers
self.seed = _config["seed"]
def prepare_data(self):
for dm in self.dms:
dm.prepare_data()
def setup(self, stage):
for dm in self.dms:
dm.setup(stage)
self.train_dataset = ConcatDataset([dm.train_dataset for dm in self.dms])
self.val_dataset = ConcatDataset([dm.val_dataset for dm in self.dms])
self.test_dataset = ConcatDataset([dm.test_dataset for dm in self.dms])
self.tokenizer = self.dms[0].tokenizer
self.collate = functools.partial(
self.dms[0].train_dataset.collate, mlm_collator=self.dms[0].mlm_collator,
)
if torch.distributed.is_initialized():
# set_epoch for distributed training will be done by pytorch-lightning
self.train_sampler = DistributedSampler(self.train_dataset, shuffle=True)
self.val_sampler = DistributedSampler(self.val_dataset, shuffle=False)
self.test_sampler = DistributedSampler(self.test_dataset, shuffle=False)
else:
self.train_sampler = None
self.val_sampler = None
self.test_sampler = None
def train_dataloader(self):
loader = DataLoader(
self.train_dataset,
batch_size=self.batch_size,
shuffle=True if not torch.distributed.is_initialized() else None,
sampler=self.train_sampler,
num_workers=self.num_workers,
collate_fn=self.collate,
)
return loader
def val_dataloader(self, batch_size=None):
loader = DataLoader(
self.val_dataset,
batch_size=batch_size if batch_size is not None else self.eval_batch_size,
sampler=self.val_sampler,
num_workers=self.num_workers,
collate_fn=self.collate,
)
return loader
def test_dataloader(self):
loader = DataLoader(
self.test_dataset,
batch_size=self.eval_batch_size,
sampler=self.test_sampler,
num_workers=self.num_workers,
collate_fn=self.collate,
)
return loader
|
BridgeTower/src/datamodules/multitask_datamodule.py/0
|
{
"file_path": "BridgeTower/src/datamodules/multitask_datamodule.py",
"repo_id": "BridgeTower",
"token_count": 1416
}
| 156 |
# from https://github.com/salesforce/BLIP/blob/main/transform/randaugment.py
import cv2
import numpy as np
## aug functions
def identity_func(img):
return img
def autocontrast_func(img, cutoff=0):
'''
same output as PIL.ImageOps.autocontrast
'''
n_bins = 256
def tune_channel(ch):
n = ch.size
cut = cutoff * n // 100
if cut == 0:
high, low = ch.max(), ch.min()
else:
hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins])
low = np.argwhere(np.cumsum(hist) > cut)
low = 0 if low.shape[0] == 0 else low[0]
high = np.argwhere(np.cumsum(hist[::-1]) > cut)
high = n_bins - 1 if high.shape[0] == 0 else n_bins - 1 - high[0]
if high <= low:
table = np.arange(n_bins)
else:
scale = (n_bins - 1) / (high - low)
offset = -low * scale
table = np.arange(n_bins) * scale + offset
table[table < 0] = 0
table[table > n_bins - 1] = n_bins - 1
table = table.clip(0, 255).astype(np.uint8)
return table[ch]
channels = [tune_channel(ch) for ch in cv2.split(img)]
out = cv2.merge(channels)
return out
def equalize_func(img):
'''
same output as PIL.ImageOps.equalize
PIL's implementation is different from cv2.equalize
'''
n_bins = 256
def tune_channel(ch):
hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins])
non_zero_hist = hist[hist != 0].reshape(-1)
step = np.sum(non_zero_hist[:-1]) // (n_bins - 1)
if step == 0: return ch
n = np.empty_like(hist)
n[0] = step // 2
n[1:] = hist[:-1]
table = (np.cumsum(n) // step).clip(0, 255).astype(np.uint8)
return table[ch]
channels = [tune_channel(ch) for ch in cv2.split(img)]
out = cv2.merge(channels)
return out
def rotate_func(img, degree, fill=(0, 0, 0)):
'''
like PIL, rotate by degree, not radians
'''
H, W = img.shape[0], img.shape[1]
center = W / 2, H / 2
M = cv2.getRotationMatrix2D(center, degree, 1)
out = cv2.warpAffine(img, M, (W, H), borderValue=fill)
return out
def solarize_func(img, thresh=128):
'''
same output as PIL.ImageOps.posterize
'''
table = np.array([el if el < thresh else 255 - el for el in range(256)])
table = table.clip(0, 255).astype(np.uint8)
out = table[img]
return out
def color_func(img, factor):
'''
same output as PIL.ImageEnhance.Color
'''
## implementation according to PIL definition, quite slow
# degenerate = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)[:, :, np.newaxis]
# out = blend(degenerate, img, factor)
# M = (
# np.eye(3) * factor
# + np.float32([0.114, 0.587, 0.299]).reshape(3, 1) * (1. - factor)
# )[np.newaxis, np.newaxis, :]
M = (
np.float32([
[0.886, -0.114, -0.114],
[-0.587, 0.413, -0.587],
[-0.299, -0.299, 0.701]]) * factor
+ np.float32([[0.114], [0.587], [0.299]])
)
out = np.matmul(img, M).clip(0, 255).astype(np.uint8)
return out
def contrast_func(img, factor):
"""
same output as PIL.ImageEnhance.Contrast
"""
mean = np.sum(np.mean(img, axis=(0, 1)) * np.array([0.114, 0.587, 0.299]))
table = np.array([(
el - mean) * factor + mean
for el in range(256)
]).clip(0, 255).astype(np.uint8)
out = table[img]
return out
def brightness_func(img, factor):
'''
same output as PIL.ImageEnhance.Contrast
'''
table = (np.arange(256, dtype=np.float32) * factor).clip(0, 255).astype(np.uint8)
out = table[img]
return out
def sharpness_func(img, factor):
'''
The differences the this result and PIL are all on the 4 boundaries, the center
areas are same
'''
kernel = np.ones((3, 3), dtype=np.float32)
kernel[1][1] = 5
kernel /= 13
degenerate = cv2.filter2D(img, -1, kernel)
if factor == 0.0:
out = degenerate
elif factor == 1.0:
out = img
else:
out = img.astype(np.float32)
degenerate = degenerate.astype(np.float32)[1:-1, 1:-1, :]
out[1:-1, 1:-1, :] = degenerate + factor * (out[1:-1, 1:-1, :] - degenerate)
out = out.astype(np.uint8)
return out
def shear_x_func(img, factor, fill=(0, 0, 0)):
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, factor, 0], [0, 1, 0]])
out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8)
return out
def translate_x_func(img, offset, fill=(0, 0, 0)):
'''
same output as PIL.Image.transform
'''
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, 0, -offset], [0, 1, 0]])
out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8)
return out
def translate_y_func(img, offset, fill=(0, 0, 0)):
'''
same output as PIL.Image.transform
'''
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, 0, 0], [0, 1, -offset]])
out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8)
return out
def posterize_func(img, bits):
'''
same output as PIL.ImageOps.posterize
'''
out = np.bitwise_and(img, np.uint8(255 << (8 - bits)))
return out
def shear_y_func(img, factor, fill=(0, 0, 0)):
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, 0, 0], [factor, 1, 0]])
out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8)
return out
def cutout_func(img, pad_size, replace=(0, 0, 0)):
replace = np.array(replace, dtype=np.uint8)
H, W = img.shape[0], img.shape[1]
rh, rw = np.random.random(2)
pad_size = pad_size // 2
ch, cw = int(rh * H), int(rw * W)
x1, x2 = max(ch - pad_size, 0), min(ch + pad_size, H)
y1, y2 = max(cw - pad_size, 0), min(cw + pad_size, W)
out = img.copy()
out[x1:x2, y1:y2, :] = replace
return out
### level to args
def enhance_level_to_args(MAX_LEVEL):
def level_to_args(level):
return ((level / MAX_LEVEL) * 1.8 + 0.1,)
return level_to_args
def shear_level_to_args(MAX_LEVEL, replace_value):
def level_to_args(level):
level = (level / MAX_LEVEL) * 0.3
if np.random.random() > 0.5: level = -level
return (level, replace_value)
return level_to_args
def translate_level_to_args(translate_const, MAX_LEVEL, replace_value):
def level_to_args(level):
level = (level / MAX_LEVEL) * float(translate_const)
if np.random.random() > 0.5: level = -level
return (level, replace_value)
return level_to_args
def cutout_level_to_args(cutout_const, MAX_LEVEL, replace_value):
def level_to_args(level):
level = int((level / MAX_LEVEL) * cutout_const)
return (level, replace_value)
return level_to_args
def solarize_level_to_args(MAX_LEVEL):
def level_to_args(level):
level = int((level / MAX_LEVEL) * 256)
return (level, )
return level_to_args
def none_level_to_args(level):
return ()
def posterize_level_to_args(MAX_LEVEL):
def level_to_args(level):
level = int((level / MAX_LEVEL) * 4)
return (level, )
return level_to_args
def rotate_level_to_args(MAX_LEVEL, replace_value):
def level_to_args(level):
level = (level / MAX_LEVEL) * 30
if np.random.random() < 0.5:
level = -level
return (level, replace_value)
return level_to_args
func_dict = {
'Identity': identity_func,
'AutoContrast': autocontrast_func,
'Equalize': equalize_func,
'Rotate': rotate_func,
'Solarize': solarize_func,
'Color': color_func,
'Contrast': contrast_func,
'Brightness': brightness_func,
'Sharpness': sharpness_func,
'ShearX': shear_x_func,
'TranslateX': translate_x_func,
'TranslateY': translate_y_func,
'Posterize': posterize_func,
'ShearY': shear_y_func,
}
translate_const = 10
MAX_LEVEL = 10
replace_value = (128, 128, 128)
arg_dict = {
'Identity': none_level_to_args,
'AutoContrast': none_level_to_args,
'Equalize': none_level_to_args,
'Rotate': rotate_level_to_args(MAX_LEVEL, replace_value),
'Solarize': solarize_level_to_args(MAX_LEVEL),
'Color': enhance_level_to_args(MAX_LEVEL),
'Contrast': enhance_level_to_args(MAX_LEVEL),
'Brightness': enhance_level_to_args(MAX_LEVEL),
'Sharpness': enhance_level_to_args(MAX_LEVEL),
'ShearX': shear_level_to_args(MAX_LEVEL, replace_value),
'TranslateX': translate_level_to_args(
translate_const, MAX_LEVEL, replace_value
),
'TranslateY': translate_level_to_args(
translate_const, MAX_LEVEL, replace_value
),
'Posterize': posterize_level_to_args(MAX_LEVEL),
'ShearY': shear_level_to_args(MAX_LEVEL, replace_value),
}
class RandomAugment(object):
def __init__(self, N=2, M=10, isPIL=False, augs=[]):
self.N = N
self.M = M
self.isPIL = isPIL
if augs:
self.augs = augs
else:
self.augs = list(arg_dict.keys())
def get_random_ops(self):
sampled_ops = np.random.choice(self.augs, self.N)
return [(op, 0.5, self.M) for op in sampled_ops]
def __call__(self, img):
if self.isPIL:
img = np.array(img)
ops = self.get_random_ops()
for name, prob, level in ops:
if np.random.random() > prob:
continue
args = arg_dict[name](level)
img = func_dict[name](img, *args)
return img
if __name__ == '__main__':
a = RandomAugment()
img = np.random.randn(32, 32, 3)
a(img)
|
BridgeTower/src/transforms/randaugment.py/0
|
{
"file_path": "BridgeTower/src/transforms/randaugment.py",
"repo_id": "BridgeTower",
"token_count": 4710
}
| 157 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import importlib
import torch.utils.data
from data.base_dataset import BaseDataset
from data.face_dataset import FaceTestDataset
def create_dataloader(opt):
instance = FaceTestDataset()
instance.initialize(opt)
print("dataset [%s] of size %d was created" % (type(instance).__name__, len(instance)))
dataloader = torch.utils.data.DataLoader(
instance,
batch_size=opt.batchSize,
shuffle=not opt.serial_batches,
num_workers=int(opt.nThreads),
drop_last=opt.isTrain,
)
return dataloader
|
Bringing-Old-Photos-Back-to-Life/Face_Enhancement/data/__init__.py/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/Face_Enhancement/data/__init__.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 241
}
| 158 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from .base_options import BaseOptions
class TestOptions(BaseOptions):
def initialize(self, parser):
BaseOptions.initialize(self, parser)
parser.add_argument("--results_dir", type=str, default="./results/", help="saves results here.")
parser.add_argument(
"--which_epoch",
type=str,
default="latest",
help="which epoch to load? set to latest to use latest cached model",
)
parser.add_argument("--how_many", type=int, default=float("inf"), help="how many test images to run")
parser.set_defaults(
preprocess_mode="scale_width_and_crop", crop_size=256, load_size=256, display_winsize=256
)
parser.set_defaults(serial_batches=True)
parser.set_defaults(no_flip=True)
parser.set_defaults(phase="test")
self.isTrain = False
return parser
|
Bringing-Old-Photos-Back-to-Life/Face_Enhancement/options/test_options.py/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/Face_Enhancement/options/test_options.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 397
}
| 159 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os.path
import io
import zipfile
from data.base_dataset import BaseDataset, get_params, get_transform, normalize
from data.image_folder import make_dataset
from PIL import Image
import torchvision.transforms as transforms
import numpy as np
from data.Load_Bigfile import BigFileMemoryLoader
import random
import cv2
from io import BytesIO
def pil_to_np(img_PIL):
'''Converts image in PIL format to np.array.
From W x H x C [0...255] to C x W x H [0..1]
'''
ar = np.array(img_PIL)
if len(ar.shape) == 3:
ar = ar.transpose(2, 0, 1)
else:
ar = ar[None, ...]
return ar.astype(np.float32) / 255.
def np_to_pil(img_np):
'''Converts image in np.array format to PIL image.
From C x W x H [0..1] to W x H x C [0...255]
'''
ar = np.clip(img_np * 255, 0, 255).astype(np.uint8)
if img_np.shape[0] == 1:
ar = ar[0]
else:
ar = ar.transpose(1, 2, 0)
return Image.fromarray(ar)
def synthesize_salt_pepper(image,amount,salt_vs_pepper):
## Give PIL, return the noisy PIL
img_pil=pil_to_np(image)
out = img_pil.copy()
p = amount
q = salt_vs_pepper
flipped = np.random.choice([True, False], size=img_pil.shape,
p=[p, 1 - p])
salted = np.random.choice([True, False], size=img_pil.shape,
p=[q, 1 - q])
peppered = ~salted
out[flipped & salted] = 1
out[flipped & peppered] = 0.
noisy = np.clip(out, 0, 1).astype(np.float32)
return np_to_pil(noisy)
def synthesize_gaussian(image,std_l,std_r):
## Give PIL, return the noisy PIL
img_pil=pil_to_np(image)
mean=0
std=random.uniform(std_l/255.,std_r/255.)
gauss=np.random.normal(loc=mean,scale=std,size=img_pil.shape)
noisy=img_pil+gauss
noisy=np.clip(noisy,0,1).astype(np.float32)
return np_to_pil(noisy)
def synthesize_speckle(image,std_l,std_r):
## Give PIL, return the noisy PIL
img_pil=pil_to_np(image)
mean=0
std=random.uniform(std_l/255.,std_r/255.)
gauss=np.random.normal(loc=mean,scale=std,size=img_pil.shape)
noisy=img_pil+gauss*img_pil
noisy=np.clip(noisy,0,1).astype(np.float32)
return np_to_pil(noisy)
def synthesize_low_resolution(img):
w,h=img.size
new_w=random.randint(int(w/2),w)
new_h=random.randint(int(h/2),h)
img=img.resize((new_w,new_h),Image.BICUBIC)
if random.uniform(0,1)<0.5:
img=img.resize((w,h),Image.NEAREST)
else:
img = img.resize((w, h), Image.BILINEAR)
return img
def convertToJpeg(im,quality):
with BytesIO() as f:
im.save(f, format='JPEG',quality=quality)
f.seek(0)
return Image.open(f).convert('RGB')
def blur_image_v2(img):
x=np.array(img)
kernel_size_candidate=[(3,3),(5,5),(7,7)]
kernel_size=random.sample(kernel_size_candidate,1)[0]
std=random.uniform(1.,5.)
#print("The gaussian kernel size: (%d,%d) std: %.2f"%(kernel_size[0],kernel_size[1],std))
blur=cv2.GaussianBlur(x,kernel_size,std)
return Image.fromarray(blur.astype(np.uint8))
def online_add_degradation_v2(img):
task_id=np.random.permutation(4)
for x in task_id:
if x==0 and random.uniform(0,1)<0.7:
img = blur_image_v2(img)
if x==1 and random.uniform(0,1)<0.7:
flag = random.choice([1, 2, 3])
if flag == 1:
img = synthesize_gaussian(img, 5, 50)
if flag == 2:
img = synthesize_speckle(img, 5, 50)
if flag == 3:
img = synthesize_salt_pepper(img, random.uniform(0, 0.01), random.uniform(0.3, 0.8))
if x==2 and random.uniform(0,1)<0.7:
img=synthesize_low_resolution(img)
if x==3 and random.uniform(0,1)<0.7:
img=convertToJpeg(img,random.randint(40,100))
return img
def irregular_hole_synthesize(img,mask):
img_np=np.array(img).astype('uint8')
mask_np=np.array(mask).astype('uint8')
mask_np=mask_np/255
img_new=img_np*(1-mask_np)+mask_np*255
hole_img=Image.fromarray(img_new.astype('uint8')).convert("RGB")
return hole_img,mask.convert("L")
def zero_mask(size):
x=np.zeros((size,size,3)).astype('uint8')
mask=Image.fromarray(x).convert("RGB")
return mask
class UnPairOldPhotos_SR(BaseDataset): ## Synthetic + Real Old
def initialize(self, opt):
self.opt = opt
self.isImage = 'domainA' in opt.name
self.task = 'old_photo_restoration_training_vae'
self.dir_AB = opt.dataroot
if self.isImage:
self.load_img_dir_L_old=os.path.join(self.dir_AB,"Real_L_old.bigfile")
self.load_img_dir_RGB_old=os.path.join(self.dir_AB,"Real_RGB_old.bigfile")
self.load_img_dir_clean=os.path.join(self.dir_AB,"VOC_RGB_JPEGImages.bigfile")
self.loaded_imgs_L_old=BigFileMemoryLoader(self.load_img_dir_L_old)
self.loaded_imgs_RGB_old=BigFileMemoryLoader(self.load_img_dir_RGB_old)
self.loaded_imgs_clean=BigFileMemoryLoader(self.load_img_dir_clean)
else:
# self.load_img_dir_clean=os.path.join(self.dir_AB,self.opt.test_dataset)
self.load_img_dir_clean=os.path.join(self.dir_AB,"VOC_RGB_JPEGImages.bigfile")
self.loaded_imgs_clean=BigFileMemoryLoader(self.load_img_dir_clean)
####
print("-------------Filter the imgs whose size <256 in VOC-------------")
self.filtered_imgs_clean=[]
for i in range(len(self.loaded_imgs_clean)):
img_name,img=self.loaded_imgs_clean[i]
h,w=img.size
if h<256 or w<256:
continue
self.filtered_imgs_clean.append((img_name,img))
print("--------Origin image num is [%d], filtered result is [%d]--------" % (
len(self.loaded_imgs_clean), len(self.filtered_imgs_clean)))
## Filter these images whose size is less than 256
# self.img_list=os.listdir(load_img_dir)
self.pid = os.getpid()
def __getitem__(self, index):
is_real_old=0
sampled_dataset=None
degradation=None
if self.isImage: ## domain A , contains 2 kinds of data: synthetic + real_old
P=random.uniform(0,2)
if P>=0 and P<1:
if random.uniform(0,1)<0.5:
sampled_dataset=self.loaded_imgs_L_old
self.load_img_dir=self.load_img_dir_L_old
else:
sampled_dataset=self.loaded_imgs_RGB_old
self.load_img_dir=self.load_img_dir_RGB_old
is_real_old=1
if P>=1 and P<2:
sampled_dataset=self.filtered_imgs_clean
self.load_img_dir=self.load_img_dir_clean
degradation=1
else:
sampled_dataset=self.filtered_imgs_clean
self.load_img_dir=self.load_img_dir_clean
sampled_dataset_len=len(sampled_dataset)
index=random.randint(0,sampled_dataset_len-1)
img_name,img = sampled_dataset[index]
if degradation is not None:
img=online_add_degradation_v2(img)
path=os.path.join(self.load_img_dir,img_name)
# AB = Image.open(path).convert('RGB')
# split AB image into A and B
# apply the same transform to both A and B
if random.uniform(0,1) <0.1:
img=img.convert("L")
img=img.convert("RGB")
## Give a probability P, we convert the RGB image into L
A=img
w,h=A.size
if w<256 or h<256:
A=transforms.Scale(256,Image.BICUBIC)(A)
## Since we want to only crop the images (256*256), for those old photos whose size is smaller than 256, we first resize them.
transform_params = get_params(self.opt, A.size)
A_transform = get_transform(self.opt, transform_params)
B_tensor = inst_tensor = feat_tensor = 0
A_tensor = A_transform(A)
input_dict = {'label': A_tensor, 'inst': is_real_old, 'image': A_tensor,
'feat': feat_tensor, 'path': path}
return input_dict
def __len__(self):
return len(self.loaded_imgs_clean) ## actually, this is useless, since the selected index is just a random number
def name(self):
return 'UnPairOldPhotos_SR'
class PairOldPhotos(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.isImage = 'imagegan' in opt.name
self.task = 'old_photo_restoration_training_mapping'
self.dir_AB = opt.dataroot
if opt.isTrain:
self.load_img_dir_clean= os.path.join(self.dir_AB, "VOC_RGB_JPEGImages.bigfile")
self.loaded_imgs_clean = BigFileMemoryLoader(self.load_img_dir_clean)
print("-------------Filter the imgs whose size <256 in VOC-------------")
self.filtered_imgs_clean = []
for i in range(len(self.loaded_imgs_clean)):
img_name, img = self.loaded_imgs_clean[i]
h, w = img.size
if h < 256 or w < 256:
continue
self.filtered_imgs_clean.append((img_name, img))
print("--------Origin image num is [%d], filtered result is [%d]--------" % (
len(self.loaded_imgs_clean), len(self.filtered_imgs_clean)))
else:
self.load_img_dir=os.path.join(self.dir_AB,opt.test_dataset)
self.loaded_imgs=BigFileMemoryLoader(self.load_img_dir)
self.pid = os.getpid()
def __getitem__(self, index):
if self.opt.isTrain:
img_name_clean,B = self.filtered_imgs_clean[index]
path = os.path.join(self.load_img_dir_clean, img_name_clean)
if self.opt.use_v2_degradation:
A=online_add_degradation_v2(B)
### Remind: A is the input and B is corresponding GT
else:
if self.opt.test_on_synthetic:
img_name_B,B=self.loaded_imgs[index]
A=online_add_degradation_v2(B)
img_name_A=img_name_B
path = os.path.join(self.load_img_dir, img_name_A)
else:
img_name_A,A=self.loaded_imgs[index]
img_name_B,B=self.loaded_imgs[index]
path = os.path.join(self.load_img_dir, img_name_A)
if random.uniform(0,1)<0.1 and self.opt.isTrain:
A=A.convert("L")
B=B.convert("L")
A=A.convert("RGB")
B=B.convert("RGB")
## In P, we convert the RGB into L
##test on L
# split AB image into A and B
# w, h = img.size
# w2 = int(w / 2)
# A = img.crop((0, 0, w2, h))
# B = img.crop((w2, 0, w, h))
w,h=A.size
if w<256 or h<256:
A=transforms.Scale(256,Image.BICUBIC)(A)
B=transforms.Scale(256, Image.BICUBIC)(B)
# apply the same transform to both A and B
transform_params = get_params(self.opt, A.size)
A_transform = get_transform(self.opt, transform_params)
B_transform = get_transform(self.opt, transform_params)
B_tensor = inst_tensor = feat_tensor = 0
A_tensor = A_transform(A)
B_tensor = B_transform(B)
input_dict = {'label': A_tensor, 'inst': inst_tensor, 'image': B_tensor,
'feat': feat_tensor, 'path': path}
return input_dict
def __len__(self):
if self.opt.isTrain:
return len(self.filtered_imgs_clean)
else:
return len(self.loaded_imgs)
def name(self):
return 'PairOldPhotos'
class PairOldPhotos_with_hole(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.isImage = 'imagegan' in opt.name
self.task = 'old_photo_restoration_training_mapping'
self.dir_AB = opt.dataroot
if opt.isTrain:
self.load_img_dir_clean= os.path.join(self.dir_AB, "VOC_RGB_JPEGImages.bigfile")
self.loaded_imgs_clean = BigFileMemoryLoader(self.load_img_dir_clean)
print("-------------Filter the imgs whose size <256 in VOC-------------")
self.filtered_imgs_clean = []
for i in range(len(self.loaded_imgs_clean)):
img_name, img = self.loaded_imgs_clean[i]
h, w = img.size
if h < 256 or w < 256:
continue
self.filtered_imgs_clean.append((img_name, img))
print("--------Origin image num is [%d], filtered result is [%d]--------" % (
len(self.loaded_imgs_clean), len(self.filtered_imgs_clean)))
else:
self.load_img_dir=os.path.join(self.dir_AB,opt.test_dataset)
self.loaded_imgs=BigFileMemoryLoader(self.load_img_dir)
self.loaded_masks = BigFileMemoryLoader(opt.irregular_mask)
self.pid = os.getpid()
def __getitem__(self, index):
if self.opt.isTrain:
img_name_clean,B = self.filtered_imgs_clean[index]
path = os.path.join(self.load_img_dir_clean, img_name_clean)
B=transforms.RandomCrop(256)(B)
A=online_add_degradation_v2(B)
### Remind: A is the input and B is corresponding GT
else:
img_name_A,A=self.loaded_imgs[index]
img_name_B,B=self.loaded_imgs[index]
path = os.path.join(self.load_img_dir, img_name_A)
#A=A.resize((256,256))
A=transforms.CenterCrop(256)(A)
B=A
if random.uniform(0,1)<0.1 and self.opt.isTrain:
A=A.convert("L")
B=B.convert("L")
A=A.convert("RGB")
B=B.convert("RGB")
## In P, we convert the RGB into L
if self.opt.isTrain:
mask_name,mask=self.loaded_masks[random.randint(0,len(self.loaded_masks)-1)]
else:
mask_name, mask = self.loaded_masks[index%100]
mask = mask.resize((self.opt.loadSize, self.opt.loadSize), Image.NEAREST)
if self.opt.random_hole and random.uniform(0,1)>0.5 and self.opt.isTrain:
mask=zero_mask(256)
if self.opt.no_hole:
mask=zero_mask(256)
A,_=irregular_hole_synthesize(A,mask)
if not self.opt.isTrain and self.opt.hole_image_no_mask:
mask=zero_mask(256)
transform_params = get_params(self.opt, A.size)
A_transform = get_transform(self.opt, transform_params)
B_transform = get_transform(self.opt, transform_params)
if transform_params['flip'] and self.opt.isTrain:
mask=mask.transpose(Image.FLIP_LEFT_RIGHT)
mask_tensor = transforms.ToTensor()(mask)
B_tensor = inst_tensor = feat_tensor = 0
A_tensor = A_transform(A)
B_tensor = B_transform(B)
input_dict = {'label': A_tensor, 'inst': mask_tensor[:1], 'image': B_tensor,
'feat': feat_tensor, 'path': path}
return input_dict
def __len__(self):
if self.opt.isTrain:
return len(self.filtered_imgs_clean)
else:
return len(self.loaded_imgs)
def name(self):
return 'PairOldPhotos_with_hole'
|
Bringing-Old-Photos-Back-to-Life/Global/data/online_dataset_for_old_photos.py/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/Global/data/online_dataset_for_old_photos.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 7642
}
| 160 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from .base_options import BaseOptions
class TestOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument("--ntest", type=int, default=float("inf"), help="# of test examples.")
self.parser.add_argument("--results_dir", type=str, default="./results/", help="saves results here.")
self.parser.add_argument(
"--aspect_ratio", type=float, default=1.0, help="aspect ratio of result images"
)
self.parser.add_argument("--phase", type=str, default="test", help="train, val, test, etc")
self.parser.add_argument(
"--which_epoch",
type=str,
default="latest",
help="which epoch to load? set to latest to use latest cached model",
)
self.parser.add_argument("--how_many", type=int, default=50, help="how many test images to run")
self.parser.add_argument(
"--cluster_path",
type=str,
default="features_clustered_010.npy",
help="the path for clustered results of encoded features",
)
self.parser.add_argument(
"--use_encoded_image",
action="store_true",
help="if specified, encode the real image to get the feature map",
)
self.parser.add_argument("--export_onnx", type=str, help="export ONNX model to a given file")
self.parser.add_argument("--engine", type=str, help="run serialized TRT engine")
self.parser.add_argument("--onnx", type=str, help="run ONNX model via TRT")
self.parser.add_argument(
"--start_epoch",
type=int,
default=-1,
help="write the start_epoch of iter.txt into this parameter",
)
self.parser.add_argument("--test_dataset", type=str, default="Real_RGB_old.bigfile")
self.parser.add_argument(
"--no_degradation",
action="store_true",
help="when train the mapping, enable this parameter --> no degradation will be added into clean image",
)
self.parser.add_argument(
"--no_load_VAE",
action="store_true",
help="when train the mapping, enable this parameter --> random initialize the encoder an decoder",
)
self.parser.add_argument(
"--use_v2_degradation",
action="store_true",
help="enable this parameter --> 4 kinds of degradations will be used to synthesize corruption",
)
self.parser.add_argument("--use_vae_which_epoch", type=str, default="latest")
self.isTrain = False
self.parser.add_argument("--generate_pair", action="store_true")
self.parser.add_argument("--multi_scale_test", type=float, default=0.5)
self.parser.add_argument("--multi_scale_threshold", type=float, default=0.5)
self.parser.add_argument(
"--mask_need_scale",
action="store_true",
help="enable this param meas that the pixel range of mask is 0-255",
)
self.parser.add_argument("--scale_num", type=int, default=1)
self.parser.add_argument(
"--save_feature_url", type=str, default="", help="While extracting the features, where to put"
)
self.parser.add_argument(
"--test_input", type=str, default="", help="A directory or a root of bigfile"
)
self.parser.add_argument("--test_mask", type=str, default="", help="A directory or a root of bigfile")
self.parser.add_argument("--test_gt", type=str, default="", help="A directory or a root of bigfile")
self.parser.add_argument(
"--scale_input", action="store_true", help="While testing, choose to scale the input firstly"
)
self.parser.add_argument(
"--save_feature_name", type=str, default="features.json", help="The name of saved features"
)
self.parser.add_argument(
"--test_rgb_old_wo_scratch", action="store_true", help="Same setting with origin test"
)
self.parser.add_argument("--test_mode", type=str, default="Crop", help="Scale|Full|Crop")
self.parser.add_argument("--Quality_restore", action="store_true", help="For RGB images")
self.parser.add_argument(
"--Scratch_and_Quality_restore", action="store_true", help="For scratched images"
)
self.parser.add_argument("--HR", action='store_true',help='Large input size with scratches')
|
Bringing-Old-Photos-Back-to-Life/Global/options/test_options.py/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/Global/options/test_options.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 1939
}
| 161 |
"""
This is an example using CLAP for zero-shot inference.
"""
from msclap import CLAP
import torch.nn.functional as F
# Define classes for zero-shot
# Should be in lower case and can be more than one word
classes = ['coughing','sneezing','drinking sipping', 'breathing', 'brushing teeth']
ground_truth = ['coughing']
# Add prompt
prompt = 'this is a sound of '
class_prompts = [prompt + x for x in classes]
#Load audio files
audio_files = ['audio_file']
# Load and initialize CLAP
# Setting use_cuda = True will load the model on a GPU using CUDA
clap_model = CLAP(version = '2023', use_cuda=False)
# compute text embeddings from natural text
text_embeddings = clap_model.get_text_embeddings(class_prompts)
# compute the audio embeddings from an audio file
audio_embeddings = clap_model.get_audio_embeddings(audio_files, resample=True)
# compute the similarity between audio_embeddings and text_embeddings
similarity = clap_model.compute_similarity(audio_embeddings, text_embeddings)
similarity = F.softmax(similarity, dim=1)
values, indices = similarity[0].topk(5)
# Print the results
print("Ground Truth: {}".format(ground_truth))
print("Top predictions:\n")
for value, index in zip(values, indices):
print(f"{classes[index]:>16s}: {100 * value.item():.2f}%")
"""
The output (the exact numbers may vary):
Ground Truth: coughing
Top predictions:
coughing: 98.55%
sneezing: 1.24%
drinking sipping: 0.15%
breathing: 0.02%
brushing teeth: 0.01%
"""
|
CLAP/examples/zero_shot_predictions.py/0
|
{
"file_path": "CLAP/examples/zero_shot_predictions.py",
"repo_id": "CLAP",
"token_count": 505
}
| 162 |
# COCO-LM (Fairseq)
This directory contains the Fairseq version of scripts for fine-tuning COCO-LM pretrained models on GLUE and SQuAD benchmarks. The scripts are based on the [Fairseq Library](https://github.com/pytorch/fairseq).
Paper: [COCO-LM: Correcting and Contrasting Text Sequences for Language Model Pretraining](https://arxiv.org/abs/2102.08473)
## Requirements
The scripts require Python 3.6+ and Pytorch 1.5.0+. In addition, you need to install the codebase by running:
```
bash install.sh
```
## Pretrained Models
We release two COCO-LM pretrained models, [`cocolm-base`](https://github.com/microsoft/COCO-LM/releases/download/v0.1.0/cocolm-base.tar.gz) and [`cocolm-large`](https://github.com/microsoft/COCO-LM/releases/download/v0.1.0/cocolm-large.tar.gz) (**Note: Please follow the links here to download them; do not use the huggingface version of pretrained models as they are not compatible with Fairseq**), which correspond to the `base++` and `large++` models mentioned in the paper, respectively. You need to extract the models from the archives.
## GLUE Fine-tuning
The [General Language Understanding Evaluation (GLUE)](https://gluebenchmark.com/) benchmark is a collection of sentence- or sentence-pair language understanding tasks for evaluating and analyzing natural language understanding systems.
**Download GLUE Data**: You can download the [GLUE data](https://gluebenchmark.com/tasks) by running [this script](https://gist.github.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e) and unpack it to some directory.
**Process GLUE Data**: Since Fairseq training takes binary input files, you need to first preprocess the GLUE data to generate binary files by running the following:
```
cd preprocess/glue
bash process.sh <glue_data_folder> <task_name> <dict_dir> <output>
```
where `<glue_data_folder>` is the path of the raw GLUE data; `<task_name>` is one of the following: `{ALL, QQP, MNLI, QNLI, MRPC, RTE, STS-B, SST-2, CoLA}`; use `ALL` for preprocessing all the GLUE tasks; `<dict_dir>` is the directory containing two dictionary files `sp.model` and `dict.txt` which can be downloaded [here](https://github.com/microsoft/COCO-LM/releases/download/v0.1.0/dict.tar.gz); `<task_name>` is the output directory for processed GLUE data.
**Fine-Tuning**: After preprocessing the GLUE data, you can run the [`run_glue.sh`](run_glue.sh) script for fine-tuning on each GLUE task. An example for using the script for fine-tuning on MNLI is shown below:
```
ARCH=cocolm_base
TASK=MNLI
PRETRAINED_MODEL_PATH=/path/to/cocolm_base/model.pt
GLUE_DATA_DIR=/path/to/processed/glue_data
OUT_PATH=./glue_finetune/cocolm_base
BSZ=32
LR=2e-5
EPOCH=2
WARMUP=16
SEED=1
export CUDA_VISIBLE_DEVICES=0
bash run_glue.sh $TASK $PRETRAINED_MODEL_PATH $GLUE_DATA_DIR $OUT_PATH $ARCH $EPOCH $WARMUP $BSZ $LR $SEED
```
**Note: The `WARMUP` argument is the reciprocal of the warmup ratio (e.g., `WARMUP=16` means that we are using a 6.25% warmup ratio)**
**Optimal Hyperparameters**: The fine-tuning hyperparameters leading to the best dev set performance in our experiments are shown below (please note that the results and optimal hyperparameters might slightly differ in your runs due to different computation environments):
* COCO-LM base++
| | MNLI-m/mm | QQP | QNLI | SST-2 | CoLA | RTE | MRPC | STS-B |
| ------ | ------ | ------ | ------ | ------ | ------ | ------ | ------ | ------ |
| BSZ | 32/32 | 32 | 32 | 32 | 16 | 16 | 16 | 16 |
| LR | 2e-5/1e-5 | 3e-5 | 1e-5 | 2e-5 | 2e-5 | 2e-5 | 4e-5 | 4e-5 |
| EPOCH | 2/2 | 5 | 5 | 5 | 10 | 10 | 10 | 10 |
| WARMUP | 16/16 | 16 | 16 | 16 | 16 | 10 | 16 | 16 |
| Result | 90.2/90.0 | 92.2 | 94.2 | 94.6 | 67.3 | 87.4 | 91.2 | 91.8 |
* COCO-LM large++
| | MNLI-m/mm | QQP | QNLI | SST-2 | CoLA | RTE | MRPC | STS-B |
| ------ | ------ | ------ | ------ | ------ | ------ | ------ | ------ | ------ |
| BSZ | 32/32 | 32 | 32 | 32 | 16 | 32 | 16 | 16 |
| LR | 5e-6/5e-6 | 1e-5 | 7e-6 | 5e-6 | 1e-5 | 2e-5 | 2e-5 | 2e-5 |
| EPOCH | 3/2 | 5 | 2 | 2 | 10 | 5 | 5 | 10 |
| WARMUP | 16/16 | 16 | 16 | 16 | 10 | 16 | 16 | 16 |
| Result | 91.4/91.6 | 92.8 | 95.7 | 96.9 | 73.9 | 91.0 | 92.2 | 92.7 |
## SQuAD 2.0 Fine-tuning
[Stanford Question Answering Dataset (SQuAD)](https://rajpurkar.github.io/SQuAD-explorer/) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable.
**Download & Process SQuAD Data**: Since Fairseq training takes binary input files, you need to first preprocess the SQuAD data to generate pickle files by running the following (SQuAD raw data will be automatically downloaded):
```
cd preprocess/squad
bash process.sh <squad_data_folder> <dict_dir>
```
where `<squad_data_folder>` is the path where raw and processed SQuAD data will be stored to; `<dict_dir>` is the directory containing two dictionary files `sp.model` and `dict.txt` which can be downloaded [here](https://github.com/microsoft/COCO-LM/releases/download/v0.1.0/dict.tar.gz).
**Fine-Tuning**: After preprocessing the SQuAD data, you can run the [`run_squad.sh`](run_squad.sh) script for fine-tuning on SQuAD 2.0. An example for using the script is shown below:
```
ARCH=cocolm_base
PRETRAINED_MODEL_PATH=/path/to/cocolm_base/model.pt
DATA_DIR=/path/to/processed/squad2_data
OUT_PATH=./squad2_finetune/cocolm_base
BSZ=32
LR=2e-5
EPOCH=3
WARMUP=10
SEED=1
export CUDA_VISIBLE_DEVICES=0
bash run_squad.sh $PRETRAINED_MODEL_PATH $DATA_DIR $OUT_PATH $ARCH $EPOCH $WARMUP $BSZ $LR $SEED
```
**Note: The `WARMUP` argument is the reciprocal of the warmup ratio (e.g., `WARMUP=16` means that we are using a 6.25% warmup ratio)**
**Optimal Hyperparameters**: The fine-tuning hyperparameters leading to the best dev set performance in our experiments are shown below (please note that the results and optimal hyperparameters might slightly differ in your runs due to different computation environments):
* COCO-LM base++
| | EM | F1 |
| ------ | ------ | ------ |
| BSZ | 32 | 32 |
| LR | 2e-5 | 2e-5 |
| EPOCH | 3 | 3 |
| WARMUP | 10 | 10 |
| Result | 85.4 | 88.1 |
* COCO-LM large++
| | EM | F1 |
| ------ | ------ | ------ |
| BSZ | 32 | 32 |
| LR | 2e-5 | 2e-5 |
| EPOCH | 2 | 3 |
| WARMUP | 16 | 10 |
| Result | 88.3 | 91.0 |
|
COCO-LM/fairseq/README.md/0
|
{
"file_path": "COCO-LM/fairseq/README.md",
"repo_id": "COCO-LM",
"token_count": 2277
}
| 163 |
Modules
=======
Fairseq provides several stand-alone :class:`torch.nn.Module` classes that may
be helpful when implementing a new :class:`~fairseq.models.BaseFairseqModel`.
.. automodule:: fairseq.modules
:members:
:undoc-members:
|
COCO-LM/fairseq/docs/modules.rst/0
|
{
"file_path": "COCO-LM/fairseq/docs/modules.rst",
"repo_id": "COCO-LM",
"token_count": 79
}
| 164 |
# Understanding Back-Translation at Scale (Edunov et al., 2018)
This page includes pre-trained models from the paper [Understanding Back-Translation at Scale (Edunov et al., 2018)](https://arxiv.org/abs/1808.09381).
## Pre-trained models
Model | Description | Dataset | Download
---|---|---|---
`transformer.wmt18.en-de` | Transformer <br> ([Edunov et al., 2018](https://arxiv.org/abs/1808.09381)) <br> WMT'18 winner | [WMT'18 English-German](http://www.statmt.org/wmt18/translation-task.html) | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt18.en-de.ensemble.tar.gz) <br> See NOTE in the archive
## Example usage (torch.hub)
We require a few additional Python dependencies for preprocessing:
```bash
pip install subword_nmt sacremoses
```
Then to generate translations from the full model ensemble:
```python
import torch
# List available models
torch.hub.list('pytorch/fairseq') # [..., 'transformer.wmt18.en-de', ... ]
# Load the WMT'18 En-De ensemble
en2de_ensemble = torch.hub.load(
'pytorch/fairseq', 'transformer.wmt18.en-de',
checkpoint_file='wmt18.model1.pt:wmt18.model2.pt:wmt18.model3.pt:wmt18.model4.pt:wmt18.model5.pt',
tokenizer='moses', bpe='subword_nmt')
# The ensemble contains 5 models
len(en2de_ensemble.models)
# 5
# Translate
en2de_ensemble.translate('Hello world!')
# 'Hallo Welt!'
```
## Training your own model (WMT'18 English-German)
The following instructions can be adapted to reproduce the models from the paper.
#### Step 1. Prepare parallel data and optionally train a baseline (English-German) model
First download and preprocess the data:
```bash
# Download and prepare the data
cd examples/backtranslation/
bash prepare-wmt18en2de.sh
cd ../..
# Binarize the data
TEXT=examples/backtranslation/wmt18_en_de
fairseq-preprocess \
--joined-dictionary \
--source-lang en --target-lang de \
--trainpref $TEXT/train --validpref $TEXT/valid --testpref $TEXT/test \
--destdir data-bin/wmt18_en_de --thresholdtgt 0 --thresholdsrc 0 \
--workers 20
# Copy the BPE code into the data-bin directory for future use
cp examples/backtranslation/wmt18_en_de/code data-bin/wmt18_en_de/code
```
(Optionally) Train a baseline model (English-German) using just the parallel data:
```bash
CHECKPOINT_DIR=checkpoints_en_de_parallel
fairseq-train --fp16 \
data-bin/wmt18_en_de \
--source-lang en --target-lang de \
--arch transformer_wmt_en_de_big --share-all-embeddings \
--dropout 0.3 --weight-decay 0.0 \
--criterion label_smoothed_cross_entropy --label-smoothing 0.1 \
--optimizer adam --adam-betas '(0.9, 0.98)' --clip-norm 0.0 \
--lr 0.001 --lr-scheduler inverse_sqrt --warmup-updates 4000 \
--max-tokens 3584 --update-freq 16 \
--max-update 30000 \
--save-dir $CHECKPOINT_DIR
# Note: the above command assumes 8 GPUs. Adjust `--update-freq` if you have a
# different number of GPUs.
```
Average the last 10 checkpoints:
```bash
python scripts/average_checkpoints.py \
--inputs $CHECKPOINT_DIR \
--num-epoch-checkpoints 10 \
--output $CHECKPOINT_DIR/checkpoint.avg10.pt
```
Evaluate BLEU:
```bash
# tokenized BLEU on newstest2017:
bash examples/backtranslation/tokenized_bleu.sh \
wmt17 \
en-de \
data-bin/wmt18_en_de \
data-bin/wmt18_en_de/code \
$CHECKPOINT_DIR/checkpoint.avg10.pt
# BLEU4 = 29.57, 60.9/35.4/22.9/15.5 (BP=1.000, ratio=1.014, syslen=63049, reflen=62152)
# compare to 29.46 in Table 1, which is also for tokenized BLEU
# generally it's better to report (detokenized) sacrebleu though:
bash examples/backtranslation/sacrebleu.sh \
wmt17 \
en-de \
data-bin/wmt18_en_de \
data-bin/wmt18_en_de/code \
$CHECKPOINT_DIR/checkpoint.avg10.pt
# BLEU+case.mixed+lang.en-de+numrefs.1+smooth.exp+test.wmt17+tok.13a+version.1.4.3 = 29.0 60.6/34.7/22.4/14.9 (BP = 1.000 ratio = 1.013 hyp_len = 62099 ref_len = 61287)
```
#### Step 2. Back-translate monolingual German data
Train a reverse model (German-English) to do the back-translation:
```bash
CHECKPOINT_DIR=checkpoints_de_en_parallel
fairseq-train --fp16 \
data-bin/wmt18_en_de \
--source-lang de --target-lang en \
--arch transformer_wmt_en_de_big --share-all-embeddings \
--dropout 0.3 --weight-decay 0.0 \
--criterion label_smoothed_cross_entropy --label-smoothing 0.1 \
--optimizer adam --adam-betas '(0.9, 0.98)' --clip-norm 0.0 \
--lr 0.001 --lr-scheduler inverse_sqrt --warmup-updates 4000 \
--max-tokens 3584 --update-freq 16 \
--max-update 30000 \
--save-dir $CHECKPOINT_DIR
# Note: the above command assumes 8 GPUs. Adjust `--update-freq` if you have a
# different number of GPUs.
```
Let's evaluate the back-translation (BT) model to make sure it is well trained:
```bash
bash examples/backtranslation/sacrebleu.sh \
wmt17 \
de-en \
data-bin/wmt18_en_de \
data-bin/wmt18_en_de/code \
$CHECKPOINT_DIR/checkpoint_best.py
# BLEU+case.mixed+lang.de-en+numrefs.1+smooth.exp+test.wmt17+tok.13a+version.1.4.3 = 34.9 66.9/41.8/28.5/19.9 (BP = 0.983 ratio = 0.984 hyp_len = 63342 ref_len = 64399)
# compare to the best system from WMT'17 which scored 35.1: http://matrix.statmt.org/matrix/systems_list/1868
```
Next prepare the monolingual data:
```bash
# Download and prepare the monolingual data
# By default the script samples 25M monolingual sentences, which after
# deduplication should be just over 24M sentences. These are split into 25
# shards, each with 1M sentences (except for the last shard).
cd examples/backtranslation/
bash prepare-de-monolingual.sh
cd ../..
# Binarize each shard of the monolingual data
TEXT=examples/backtranslation/wmt18_de_mono
for SHARD in $(seq -f "%02g" 0 24); do \
fairseq-preprocess \
--only-source \
--source-lang de --target-lang en \
--joined-dictionary \
--srcdict data-bin/wmt18_en_de/dict.de.txt \
--testpref $TEXT/bpe.monolingual.dedup.${SHARD} \
--destdir data-bin/wmt18_de_mono/shard${SHARD} \
--workers 20; \
cp data-bin/wmt18_en_de/dict.en.txt data-bin/wmt18_de_mono/shard${SHARD}/; \
done
```
Now we're ready to perform back-translation over the monolingual data. The
following command generates via sampling, but it's possible to use greedy
decoding (`--beam 1`), beam search (`--beam 5`),
top-k sampling (`--sampling --beam 1 --sampling-topk 10`), etc.:
```bash
mkdir backtranslation_output
for SHARD in $(seq -f "%02g" 0 24); do \
fairseq-generate --fp16 \
data-bin/wmt18_de_mono/shard${SHARD} \
--path $CHECKPOINT_DIR/checkpoint_best.pt \
--skip-invalid-size-inputs-valid-test \
--max-tokens 4096 \
--sampling --beam 1 \
> backtranslation_output/sampling.shard${SHARD}.out; \
done
```
After BT, use the `extract_bt_data.py` script to re-combine the shards, extract
the back-translations and apply length ratio filters:
```bash
python examples/backtranslation/extract_bt_data.py \
--minlen 1 --maxlen 250 --ratio 1.5 \
--output backtranslation_output/bt_data --srclang en --tgtlang de \
backtranslation_output/sampling.shard*.out
# Ensure lengths are the same:
# wc -l backtranslation_output/bt_data.{en,de}
# 21795614 backtranslation_output/bt_data.en
# 21795614 backtranslation_output/bt_data.de
# 43591228 total
```
Binarize the filtered BT data and combine it with the parallel data:
```bash
TEXT=backtranslation_output
fairseq-preprocess \
--source-lang en --target-lang de \
--joined-dictionary \
--srcdict data-bin/wmt18_en_de/dict.en.txt \
--trainpref $TEXT/bt_data \
--destdir data-bin/wmt18_en_de_bt \
--workers 20
# We want to train on the combined data, so we'll symlink the parallel + BT data
# in the wmt18_en_de_para_plus_bt directory. We link the parallel data as "train"
# and the BT data as "train1", so that fairseq will combine them automatically
# and so that we can use the `--upsample-primary` option to upsample the
# parallel data (if desired).
PARA_DATA=$(readlink -f data-bin/wmt18_en_de)
BT_DATA=$(readlink -f data-bin/wmt18_en_de_bt)
COMB_DATA=data-bin/wmt18_en_de_para_plus_bt
mkdir -p $COMB_DATA
for LANG in en de; do \
ln -s ${PARA_DATA}/dict.$LANG.txt ${COMB_DATA}/dict.$LANG.txt; \
for EXT in bin idx; do \
ln -s ${PARA_DATA}/train.en-de.$LANG.$EXT ${COMB_DATA}/train.en-de.$LANG.$EXT; \
ln -s ${BT_DATA}/train.en-de.$LANG.$EXT ${COMB_DATA}/train1.en-de.$LANG.$EXT; \
ln -s ${PARA_DATA}/valid.en-de.$LANG.$EXT ${COMB_DATA}/valid.en-de.$LANG.$EXT; \
ln -s ${PARA_DATA}/test.en-de.$LANG.$EXT ${COMB_DATA}/test.en-de.$LANG.$EXT; \
done; \
done
```
#### 3. Train an English-German model over the combined parallel + BT data
Finally we can train a model over the parallel + BT data:
```bash
CHECKPOINT_DIR=checkpoints_en_de_parallel_plus_bt
fairseq-train --fp16 \
data-bin/wmt18_en_de_para_plus_bt \
--upsample-primary 16 \
--source-lang en --target-lang de \
--arch transformer_wmt_en_de_big --share-all-embeddings \
--dropout 0.3 --weight-decay 0.0 \
--criterion label_smoothed_cross_entropy --label-smoothing 0.1 \
--optimizer adam --adam-betas '(0.9, 0.98)' --clip-norm 0.0 \
--lr 0.0007 --lr-scheduler inverse_sqrt --warmup-updates 4000 \
--max-tokens 3584 --update-freq 16 \
--max-update 100000 \
--save-dir $CHECKPOINT_DIR
# Note: the above command assumes 8 GPUs. Adjust `--update-freq` if you have a
# different number of GPUs.
```
Average the last 10 checkpoints:
```bash
python scripts/average_checkpoints.py \
--inputs $CHECKPOINT_DIR \
--num-epoch-checkpoints 10 \
--output $CHECKPOINT_DIR/checkpoint.avg10.pt
```
Evaluate BLEU:
```bash
# tokenized BLEU on newstest2017:
bash examples/backtranslation/tokenized_bleu.sh \
wmt17 \
en-de \
data-bin/wmt18_en_de \
data-bin/wmt18_en_de/code \
$CHECKPOINT_DIR/checkpoint.avg10.pt
# BLEU4 = 32.35, 64.4/38.9/26.2/18.3 (BP=0.977, ratio=0.977, syslen=60729, reflen=62152)
# compare to 32.35 in Table 1, which is also for tokenized BLEU
# generally it's better to report (detokenized) sacrebleu:
bash examples/backtranslation/sacrebleu.sh \
wmt17 \
en-de \
data-bin/wmt18_en_de \
data-bin/wmt18_en_de/code \
$CHECKPOINT_DIR/checkpoint.avg10.pt
# BLEU+case.mixed+lang.en-de+numrefs.1+smooth.exp+test.wmt17+tok.13a+version.1.4.3 = 31.5 64.3/38.2/25.6/17.6 (BP = 0.971 ratio = 0.971 hyp_len = 59515 ref_len = 61287)
```
## Citation
```bibtex
@inproceedings{edunov2018backtranslation,
title = {Understanding Back-Translation at Scale},
author = {Edunov, Sergey and Ott, Myle and Auli, Michael and Grangier, David},
booktitle = {Conference of the Association for Computational Linguistics (ACL)},
year = 2018,
}
```
|
COCO-LM/fairseq/examples/backtranslation/README.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/backtranslation/README.md",
"repo_id": "COCO-LM",
"token_count": 4297
}
| 165 |
# (Vectorized) Lexically constrained decoding with dynamic beam allocation
This page provides instructions for how to use lexically constrained decoding in Fairseq.
Fairseq implements the code described in the following papers:
* [Fast Lexically Constrained Decoding With Dynamic Beam Allocation](https://www.aclweb.org/anthology/N18-1119/) (Post & Vilar, 2018)
* [Improved Lexically Constrained Decoding for Translation and Monolingual Rewriting](https://www.aclweb.org/anthology/N19-1090/) (Hu et al., 2019)
## Quick start
Constrained search is enabled by adding the command-line argument `--constraints` to `fairseq-interactive`.
Constraints are appended to each line of input, separated by tabs. Each constraint (one or more tokens)
is a separate field.
The following command, using [Fairseq's WMT19 German--English model](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md),
translates the sentence *Die maschinelle Übersetzung ist schwer zu kontrollieren.* with the constraints
"hard" and "to influence".
echo -e "Die maschinelle Übersetzung ist schwer zu kontrollieren.\thard\ttoinfluence" \
| normalize.py | tok.py \
| fairseq-interactive /path/to/model \
--path /path/to/model/model1.pt \
--bpe fastbpe \
--bpe-codes /path/to/model/bpecodes \
--constraints \
-s de -t en \
--beam 10
(tok.py and normalize.py can be found in the same directory as this README; they are just shortcuts around Fairseq's WMT19 preprocessing).
This will generate the following output:
[snip]
S-0 Die masch@@ in@@ elle Über@@ setzung ist schwer zu kontrollieren .
W-0 1.844 seconds
C-0 hard
C-0 influence
H-0 -1.5333266258239746 Mach@@ ine trans@@ lation is hard to influence .
D-0 -1.5333266258239746 Machine translation is hard to influence .
P-0 -0.5434 -0.1423 -0.1930 -0.1415 -0.2346 -1.8031 -0.1701 -11.7727 -0.1815 -0.1511
By default, constraints are generated in the order supplied, with any number (zero or more) of tokens generated
between constraints. If you wish for the decoder to order the constraints, then use `--constraints unordered`.
Note that you may want to use a larger beam.
## Implementation details
The heart of the implementation is in `fairseq/search.py`, which adds a `LexicallyConstrainedBeamSearch` instance.
This instance of beam search tracks the progress of each hypothesis in the beam through the set of constraints
provided for each input sentence. It does this using one of two classes, both found in `fairseq/token_generation_contstraints.py`:
* OrderedConstraintState: assumes the `C` input constraints will be generated in the provided order
* UnorderedConstraintState: tries to apply `C` (phrasal) constraints in all `C!` orders
## Differences from Sockeye
There are a number of [differences from Sockeye's implementation](https://awslabs.github.io/sockeye/inference.html#lexical-constraints).
* Generating constraints in the order supplied (the default option here) is not available in Sockeye.
* Due to an improved beam allocation method, there is no need to prune the beam.
* Again due to better allocation, beam sizes as low as 10 or even 5 are often sufficient.
* [The vector extensions described in Hu et al.](https://github.com/edwardjhu/sockeye/tree/trie_constraints) (NAACL 2019) were never merged
into the main Sockeye branch.
## Citation
The paper first describing lexical constraints for seq2seq decoding is:
```bibtex
@inproceedings{hokamp-liu-2017-lexically,
title = "Lexically Constrained Decoding for Sequence Generation Using Grid Beam Search",
author = "Hokamp, Chris and
Liu, Qun",
booktitle = "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2017",
address = "Vancouver, Canada",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P17-1141",
doi = "10.18653/v1/P17-1141",
pages = "1535--1546",
}
```
The fairseq implementation uses the extensions described in
```bibtex
@inproceedings{post-vilar-2018-fast,
title = "Fast Lexically Constrained Decoding with Dynamic Beam Allocation for Neural Machine Translation",
author = "Post, Matt and
Vilar, David",
booktitle = "Proceedings of the 2018 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers)",
month = jun,
year = "2018",
address = "New Orleans, Louisiana",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/N18-1119",
doi = "10.18653/v1/N18-1119",
pages = "1314--1324",
}
```
and
```bibtex
@inproceedings{hu-etal-2019-improved,
title = "Improved Lexically Constrained Decoding for Translation and Monolingual Rewriting",
author = "Hu, J. Edward and
Khayrallah, Huda and
Culkin, Ryan and
Xia, Patrick and
Chen, Tongfei and
Post, Matt and
Van Durme, Benjamin",
booktitle = "Proceedings of the 2019 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)",
month = jun,
year = "2019",
address = "Minneapolis, Minnesota",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/N19-1090",
doi = "10.18653/v1/N19-1090",
pages = "839--850",
}
```
|
COCO-LM/fairseq/examples/constrained_decoding/README.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/constrained_decoding/README.md",
"repo_id": "COCO-LM",
"token_count": 1792
}
| 166 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq.search import Search
class NoisyChannelBeamSearch(Search):
def __init__(self, tgt_dict):
super().__init__(tgt_dict)
self.fw_scores_buf = None
self.lm_scores_buf = None
def _init_buffers(self, t):
# super()._init_buffers(t)
if self.fw_scores_buf is None:
self.scores_buf = t.new()
self.indices_buf = torch.LongTensor().to(device=t.device)
self.beams_buf = torch.LongTensor().to(device=t.device)
self.fw_scores_buf = t.new()
self.lm_scores_buf = t.new()
def combine_fw_bw(self, combine_method, fw_cum, bw, step):
if combine_method == "noisy_channel":
fw_norm = fw_cum.div(step + 1)
lprobs = bw + fw_norm
elif combine_method == "lm_only":
lprobs = bw + fw_cum
return lprobs
def step(self, step, fw_lprobs, scores, bw_lprobs, lm_lprobs, combine_method):
self._init_buffers(fw_lprobs)
bsz, beam_size, vocab_size = fw_lprobs.size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
fw_lprobs = fw_lprobs[:, ::beam_size, :].contiguous()
bw_lprobs = bw_lprobs[:, ::beam_size, :].contiguous()
# nothing to add since we are at the first step
fw_lprobs_cum = fw_lprobs
else:
# make probs contain cumulative scores for each hypothesis
raw_scores = (scores[:, :, step - 1].unsqueeze(-1))
fw_lprobs_cum = (fw_lprobs.add(raw_scores))
combined_lprobs = self.combine_fw_bw(combine_method, fw_lprobs_cum, bw_lprobs, step)
# choose the top k according to the combined noisy channel model score
torch.topk(
combined_lprobs.view(bsz, -1),
k=min(
# Take the best 2 x beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
beam_size * 2,
combined_lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad
),
out=(self.scores_buf, self.indices_buf),
)
# save corresponding fw and lm scores
self.fw_scores_buf = torch.gather(fw_lprobs_cum.view(bsz, -1), 1, self.indices_buf)
self.lm_scores_buf = torch.gather(lm_lprobs.view(bsz, -1), 1, self.indices_buf)
# Project back into relative indices and beams
self.beams_buf = self.indices_buf // vocab_size
self.indices_buf.fmod_(vocab_size)
return self.scores_buf, self.fw_scores_buf, self.lm_scores_buf, self.indices_buf, self.beams_buf
|
COCO-LM/fairseq/examples/fast_noisy_channel/noisy_channel_beam_search.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/fast_noisy_channel/noisy_channel_beam_search.py",
"repo_id": "COCO-LM",
"token_count": 1386
}
| 167 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
import numpy as np
from fairseq.data import BaseWrapperDataset, FairseqDataset, iterators
class MultiItr(object):
def __init__(self, itr):
self.itr = itr
self._counts = [0 for x in itr]
def __len__(self):
return sum(len(itr) for itr in self.itr)
def __iter__(self):
return self
def __next__(self):
ratios = [count / len(itr) for count, itr in zip(self._counts, self.itr)]
idx = ratios.index(min(ratios))
self._counts[idx] += 1
return next(self.itr[idx])
class MultidatasetEpochBatchIterator(iterators.EpochBatchIterating):
"""A wrapper around multiple epoch batch iterators."""
def __init__(
self,
dataset,
batch_sampler,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=1,
):
assert isinstance(dataset, OrderedDict)
assert len(dataset)
assert isinstance(dataset[next(iter(dataset))], FairseqDataset)
self.iterators = []
self.epoch = epoch
for key, dt in dataset.items():
epoch_iter = iterators.EpochBatchIterator(
dataset=dt,
collate_fn=dt.collater,
batch_sampler=batch_sampler[key],
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=0,
epoch=epoch,
)
self.iterators.append(epoch_iter)
def __len__(self):
return sum(len(itr) for itr in self.iterators)
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
# `self.epoch += 1` should be handled by underlying `EpochBatchIterator`s.
return MultiItr(
[
itr.next_epoch_itr(
shuffle=shuffle, fix_batches_to_gpus=fix_batches_to_gpus
)
for itr in self.iterators
]
)
def end_of_epoch(self):
return all(itr.end_of_epoch() for itr in self.iterators)
@property
def next_epoch_idx(self):
"""Return the epoch index after *next_epoch_itr* is called."""
epochs = [itr.next_epoch_idx for itr in self.iterators]
self.epoch = epochs[0]
assert all(epoch == self.epoch for epoch in epochs)
return self.epoch
@property
def iterations_in_epoch(self):
return sum(itr.iterations_in_epoch for itr in self.iterators)
def state_dict(self):
return {
"iterators": [it.state_dict() for it in self.iterators],
"epoch": self.epoch,
}
def load_state_dict(self, state_dict):
self.epoch = state_dict["epoch"]
for it, d in zip(self.iterators, state_dict["iterators"]):
it.load_state_dict(d)
class MultitaskDatasetWrapper(BaseWrapperDataset):
"""A wrapper for a multitask dataset."""
def __init__(self, dataset, target_language_id, sample=1.0, name=""):
super().__init__(dataset)
self.target_language_id = target_language_id
self.sample = sample
self.name = name
def collater(self, *args, **kwargs):
ans = self.dataset.collater(*args, **kwargs)
if "net_input" in ans:
ans["net_input"]["target_language_id"] = self.target_language_id
ans["net_input"]["dataset_name"] = self.name
return ans
def num_tokens(self, *args, **kwargs):
return self.dataset.num_tokens(*args, **kwargs)
def ordered_indices(self, *args, **kwargs):
indices = self.dataset.ordered_indices(*args, **kwargs)
# Hacky solution for sampling
size = int(self.sample * indices.shape[0])
return indices.take(np.sort(np.random.permutation(indices.shape[0])[:size]))
def size(self, index: int):
return self.dataset.size(index)
@property
def supports_prefetch(self):
"""Whether this dataset supports prefetching."""
return getattr(self.dataset, "supports_prefetch", False)
def prefetch(self, indices):
return self.dataset.prefetch(indices)
|
COCO-LM/fairseq/examples/laser/laser_src/multitask_data_utils.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/laser/laser_src/multitask_data_utils.py",
"repo_id": "COCO-LM",
"token_count": 2031
}
| 168 |
#!/bin/bash
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
if [ -z $WORKDIR_ROOT ] ;
then
echo "please specify your working directory root in environment variable WORKDIR_ROOT. Exitting..."
exit
fi
IITB=$WORKDIR_ROOT/IITB
mkdir -p $IITB
pushd $IITB
wget http://www.cfilt.iitb.ac.in/~moses/iitb_en_hi_parallel/iitb_corpus_download/parallel.tgz
tar -xvzf parallel.tgz
wget http://www.cfilt.iitb.ac.in/~moses/iitb_en_hi_parallel/iitb_corpus_download/dev_test.tgz
tar -xvzf dev_test.tgz
DESTDIR=${WORKDIR_ROOT}/ML50/raw/
cp parallel/IITB.en-hi.en $DESTDIR/train.hi_IN-en_XX.en_XX
cp parallel/IITB.en-hi.hi $DESTDIR/train.hi_IN-en_XX.hi_IN
cp dev_test/dev.en $DESTDIR/valid.hi_IN-en_XX.en_XX
cp dev_test/dev.hi $DESTDIR/valid.hi_IN-en_XX.hi_IN
cp dev_test/test.en $DESTDIR/test.hi_IN-en_XX.en_XX
cp dev_test/test.hi $DESTDIR/test.hi_IN-en_XX.hi_IN
popd
|
COCO-LM/fairseq/examples/multilingual/data_scripts/download_iitb.sh/0
|
{
"file_path": "COCO-LM/fairseq/examples/multilingual/data_scripts/download_iitb.sh",
"repo_id": "COCO-LM",
"token_count": 462
}
| 169 |
# Simple and Effective Noisy Channel Modeling for Neural Machine Translation (Yee et al., 2019)
This page contains pointers to pre-trained models as well as instructions on how to run the reranking scripts.
## Citation:
```bibtex
@inproceedings{yee2019simple,
title = {Simple and Effective Noisy Channel Modeling for Neural Machine Translation},
author = {Kyra Yee and Yann Dauphin and Michael Auli},
booktitle = {Conference on Empirical Methods in Natural Language Processing},
year = {2019},
}
```
## Pre-trained Models:
Model | Description | Download
---|---|---
`transformer.noisychannel.de-en` | De->En Forward Model | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/noisychannel/forward_de2en.tar.bz2)
`transformer.noisychannel.en-de` | En->De Channel Model | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/noisychannel/backward_en2de.tar.bz2)
`transformer_lm.noisychannel.en` | En Language model | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/noisychannel/reranking_en_lm.tar.bz2)
Test Data: [newstest_wmt17](https://dl.fbaipublicfiles.com/fairseq/models/noisychannel/wmt17test.tar.bz2)
## Example usage
```
mkdir rerank_example
curl https://dl.fbaipublicfiles.com/fairseq/models/noisychannel/forward_de2en.tar.bz2 | tar xvjf - -C rerank_example
curl https://dl.fbaipublicfiles.com/fairseq/models/noisychannel/backward_en2de.tar.bz2 | tar xvjf - -C rerank_example
curl https://dl.fbaipublicfiles.com/fairseq/models/noisychannel/reranking_en_lm.tar.bz2 | tar xvjf - -C rerank_example
curl https://dl.fbaipublicfiles.com/fairseq/models/noisychannel/wmt17test.tar.bz2 | tar xvjf - -C rerank_example
beam=50
num_trials=1000
fw_name=fw_model_ex
bw_name=bw_model_ex
lm_name=lm_ex
data_dir=rerank_example/hyphen-splitting-mixed-case-wmt17test-wmt14bpe
data_dir_name=wmt17
lm=rerank_example/lm/checkpoint_best.pt
lm_bpe_code=rerank_example/lm/bpe32k.code
lm_dict=rerank_example/lm/dict.txt
batch_size=32
bw=rerank_example/backward_en2de.pt
fw=rerank_example/forward_de2en.pt
# reranking with P(T|S) P(S|T) and P(T)
python examples/noisychannel/rerank_tune.py $data_dir --tune-param lenpen weight1 weight3 \
--lower-bound 0 0 0 --upper-bound 3 3 3 --data-dir-name $data_dir_name \
--num-trials $num_trials --source-lang de --target-lang en --gen-model $fw \
-n $beam --batch-size $batch_size --score-model2 $fw --score-model1 $bw \
--backwards1 --weight2 1 \
-lm $lm --lm-dict $lm_dict --lm-name en_newscrawl --lm-bpe-code $lm_bpe_code \
--model2-name $fw_name --model1-name $bw_name --gen-model-name $fw_name
# reranking with P(T|S) and P(T)
python examples/noisychannel/rerank_tune.py $data_dir --tune-param lenpen weight3 \
--lower-bound 0 0 --upper-bound 3 3 --data-dir-name $data_dir_name \
--num-trials $num_trials --source-lang de --target-lang en --gen-model $fw \
-n $beam --batch-size $batch_size --score-model1 $fw \
-lm $lm --lm-dict $lm_dict --lm-name en_newscrawl --lm-bpe-code $lm_bpe_code \
--model1-name $fw_name --gen-model-name $fw_name
# to run with a preconfigured set of hyperparameters for the lenpen and model weights, using rerank.py instead.
python examples/noisychannel/rerank.py $data_dir \
--lenpen 0.269 --weight1 1 --weight2 0.929 --weight3 0.831 \
--data-dir-name $data_dir_name --source-lang de --target-lang en --gen-model $fw \
-n $beam --batch-size $batch_size --score-model2 $fw --score-model1 $bw --backwards1 \
-lm $lm --lm-dict $lm_dict --lm-name en_newscrawl --lm-bpe-code $lm_bpe_code \
--model2-name $fw_name --model1-name $bw_name --gen-model-name $fw_name
```
|
COCO-LM/fairseq/examples/noisychannel/README.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/noisychannel/README.md",
"repo_id": "COCO-LM",
"token_count": 1468
}
| 170 |
#!/bin/bash
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# raw glue data as downloaded by glue download script (https://gist.github.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e)
if [[ $# -ne 2 ]]; then
echo "Run as following:"
echo "./examples/roberta/preprocess_GLUE_tasks.sh <glud_data_folder> <task_name>"
exit 1
fi
GLUE_DATA_FOLDER=$1
# download bpe encoder.json, vocabulary and fairseq dictionary
wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json'
wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe'
wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/dict.txt'
TASKS=$2 # QQP
if [ "$TASKS" = "ALL" ]
then
TASKS="QQP MNLI QNLI MRPC RTE STS-B SST-2 CoLA"
fi
for TASK in $TASKS
do
echo "Preprocessing $TASK"
TASK_DATA_FOLDER="$GLUE_DATA_FOLDER/$TASK"
echo "Raw data as downloaded from glue website: $TASK_DATA_FOLDER"
SPLITS="train dev test"
INPUT_COUNT=2
if [ "$TASK" = "QQP" ]
then
INPUT_COLUMNS=( 4 5 )
TEST_INPUT_COLUMNS=( 2 3 )
LABEL_COLUMN=6
elif [ "$TASK" = "MNLI" ]
then
SPLITS="train dev_matched dev_mismatched test_matched test_mismatched"
INPUT_COLUMNS=( 9 10 )
TEST_INPUT_COLUMNS=( 9 10 )
DEV_LABEL_COLUMN=16
LABEL_COLUMN=12
elif [ "$TASK" = "QNLI" ]
then
INPUT_COLUMNS=( 2 3 )
TEST_INPUT_COLUMNS=( 2 3 )
LABEL_COLUMN=4
elif [ "$TASK" = "MRPC" ]
then
INPUT_COLUMNS=( 4 5 )
TEST_INPUT_COLUMNS=( 4 5 )
LABEL_COLUMN=1
elif [ "$TASK" = "RTE" ]
then
INPUT_COLUMNS=( 2 3 )
TEST_INPUT_COLUMNS=( 2 3 )
LABEL_COLUMN=4
elif [ "$TASK" = "STS-B" ]
then
INPUT_COLUMNS=( 8 9 )
TEST_INPUT_COLUMNS=( 8 9 )
LABEL_COLUMN=10
# Following are single sentence tasks.
elif [ "$TASK" = "SST-2" ]
then
INPUT_COLUMNS=( 1 )
TEST_INPUT_COLUMNS=( 2 )
LABEL_COLUMN=2
INPUT_COUNT=1
elif [ "$TASK" = "CoLA" ]
then
INPUT_COLUMNS=( 4 )
TEST_INPUT_COLUMNS=( 2 )
LABEL_COLUMN=2
INPUT_COUNT=1
fi
# Strip out header and filter lines that don't have expected number of fields.
rm -rf "$TASK_DATA_FOLDER/processed"
mkdir -p "$TASK_DATA_FOLDER/processed"
for SPLIT in $SPLITS
do
# CoLA train and dev doesn't have header.
if [[ ( "$TASK" = "CoLA") && ( "$SPLIT" != "test" ) ]]
then
cp "$TASK_DATA_FOLDER/$SPLIT.tsv" "$TASK_DATA_FOLDER/processed/$SPLIT.tsv.temp";
else
tail -n +2 "$TASK_DATA_FOLDER/$SPLIT.tsv" > "$TASK_DATA_FOLDER/processed/$SPLIT.tsv.temp";
fi
# Remove unformatted lines from train and dev files for QQP dataset.
if [[ ( "$TASK" = "QQP") && ( "$SPLIT" != "test" ) ]]
then
awk -F '\t' -v NUM_FIELDS=6 'NF==NUM_FIELDS{print}{}' "$TASK_DATA_FOLDER/processed/$SPLIT.tsv.temp" > "$TASK_DATA_FOLDER/processed/$SPLIT.tsv";
else
cp "$TASK_DATA_FOLDER/processed/$SPLIT.tsv.temp" "$TASK_DATA_FOLDER/processed/$SPLIT.tsv";
fi
rm "$TASK_DATA_FOLDER/processed/$SPLIT.tsv.temp";
done
# Split into input0, input1 and label
for SPLIT in $SPLITS
do
for INPUT_TYPE in $(seq 0 $((INPUT_COUNT-1)))
do
if [[ "$SPLIT" != test* ]]
then
COLUMN_NUMBER=${INPUT_COLUMNS[$INPUT_TYPE]}
else
COLUMN_NUMBER=${TEST_INPUT_COLUMNS[$INPUT_TYPE]}
fi
cut -f"$COLUMN_NUMBER" "$TASK_DATA_FOLDER/processed/$SPLIT.tsv" > "$TASK_DATA_FOLDER/processed/$SPLIT.raw.input$INPUT_TYPE";
done
if [[ "$SPLIT" != test* ]]
then
if [ "$TASK" = "MNLI" ] && [ "$SPLIT" != "train" ]
then
cut -f"$DEV_LABEL_COLUMN" "$TASK_DATA_FOLDER/processed/$SPLIT.tsv" > "$TASK_DATA_FOLDER/processed/$SPLIT.label";
else
cut -f"$LABEL_COLUMN" "$TASK_DATA_FOLDER/processed/$SPLIT.tsv" > "$TASK_DATA_FOLDER/processed/$SPLIT.label";
fi
fi
# BPE encode.
for INPUT_TYPE in $(seq 0 $((INPUT_COUNT-1)))
do
LANG="input$INPUT_TYPE"
echo "BPE encoding $SPLIT/$LANG"
python -m examples.roberta.multiprocessing_bpe_encoder \
--encoder-json encoder.json \
--vocab-bpe vocab.bpe \
--inputs "$TASK_DATA_FOLDER/processed/$SPLIT.raw.$LANG" \
--outputs "$TASK_DATA_FOLDER/processed/$SPLIT.$LANG" \
--workers 60 \
--keep-empty;
done
done
# Remove output directory.
rm -rf "$TASK-bin"
DEVPREF="$TASK_DATA_FOLDER/processed/dev.LANG"
TESTPREF="$TASK_DATA_FOLDER/processed/test.LANG"
if [ "$TASK" = "MNLI" ]
then
DEVPREF="$TASK_DATA_FOLDER/processed/dev_matched.LANG,$TASK_DATA_FOLDER/processed/dev_mismatched.LANG"
TESTPREF="$TASK_DATA_FOLDER/processed/test_matched.LANG,$TASK_DATA_FOLDER/processed/test_mismatched.LANG"
fi
# Run fairseq preprocessing:
for INPUT_TYPE in $(seq 0 $((INPUT_COUNT-1)))
do
LANG="input$INPUT_TYPE"
fairseq-preprocess \
--only-source \
--trainpref "$TASK_DATA_FOLDER/processed/train.$LANG" \
--validpref "${DEVPREF//LANG/$LANG}" \
--testpref "${TESTPREF//LANG/$LANG}" \
--destdir "$TASK-bin/$LANG" \
--workers 60 \
--srcdict dict.txt;
done
if [[ "$TASK" != "STS-B" ]]
then
fairseq-preprocess \
--only-source \
--trainpref "$TASK_DATA_FOLDER/processed/train.label" \
--validpref "${DEVPREF//LANG/label}" \
--destdir "$TASK-bin/label" \
--workers 60;
else
# For STS-B output range is converted to be between: [0.0, 1.0]
mkdir -p "$TASK-bin/label"
awk '{print $1 / 5.0 }' "$TASK_DATA_FOLDER/processed/train.label" > "$TASK-bin/label/train.label"
awk '{print $1 / 5.0 }' "$TASK_DATA_FOLDER/processed/dev.label" > "$TASK-bin/label/valid.label"
fi
done
|
COCO-LM/fairseq/examples/roberta/preprocess_GLUE_tasks.sh/0
|
{
"file_path": "COCO-LM/fairseq/examples/roberta/preprocess_GLUE_tasks.sh",
"repo_id": "COCO-LM",
"token_count": 2782
}
| 171 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
from fairseq import registry
(
build_monotonic_attention,
register_monotonic_attention,
MONOTONIC_ATTENTION_REGISTRY,
_,
) = registry.setup_registry("--simul-type")
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
model_name = file[: file.find(".py")]
importlib.import_module(
"examples.simultaneous_translation.modules." + model_name
)
|
COCO-LM/fairseq/examples/simultaneous_translation/modules/__init__.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/simultaneous_translation/modules/__init__.py",
"repo_id": "COCO-LM",
"token_count": 248
}
| 172 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
def calc_mean_invstddev(feature):
if len(feature.size()) != 2:
raise ValueError("We expect the input feature to be 2-D tensor")
mean = feature.mean(0)
var = feature.var(0)
# avoid division by ~zero
eps = 1e-8
if (var < eps).any():
return mean, 1.0 / (torch.sqrt(var) + eps)
return mean, 1.0 / torch.sqrt(var)
def apply_mv_norm(features):
# If there is less than 2 spectrograms, the variance cannot be computed (is NaN)
# and normalization is not possible, so return the item as it is
if features.size(0) < 2:
return features
mean, invstddev = calc_mean_invstddev(features)
res = (features - mean) * invstddev
return res
def lengths_to_encoder_padding_mask(lengths, batch_first=False):
"""
convert lengths (a 1-D Long/Int tensor) to 2-D binary tensor
Args:
lengths: a (B, )-shaped tensor
Return:
max_length: maximum length of B sequences
encoder_padding_mask: a (max_length, B) binary mask, where
[t, b] = 0 for t < lengths[b] and 1 otherwise
TODO:
kernelize this function if benchmarking shows this function is slow
"""
max_lengths = torch.max(lengths).item()
bsz = lengths.size(0)
encoder_padding_mask = torch.arange(
max_lengths
).to( # a (T, ) tensor with [0, ..., T-1]
lengths.device
).view( # move to the right device
1, max_lengths
).expand( # reshape to (1, T)-shaped tensor
bsz, -1
) >= lengths.view( # expand to (B, T)-shaped tensor
bsz, 1
).expand(
-1, max_lengths
)
if not batch_first:
return encoder_padding_mask.t(), max_lengths
else:
return encoder_padding_mask, max_lengths
def encoder_padding_mask_to_lengths(
encoder_padding_mask, max_lengths, batch_size, device
):
"""
convert encoder_padding_mask (2-D binary tensor) to a 1-D tensor
Conventionally, encoder output contains a encoder_padding_mask, which is
a 2-D mask in a shape (T, B), whose (t, b) element indicate whether
encoder_out[t, b] is a valid output (=0) or not (=1). Occasionally, we
need to convert this mask tensor to a 1-D tensor in shape (B, ), where
[b] denotes the valid length of b-th sequence
Args:
encoder_padding_mask: a (T, B)-shaped binary tensor or None; if None,
indicating all are valid
Return:
seq_lengths: a (B,)-shaped tensor, where its (b, )-th element is the
number of valid elements of b-th sequence
max_lengths: maximum length of all sequence, if encoder_padding_mask is
not None, max_lengths must equal to encoder_padding_mask.size(0)
batch_size: batch size; if encoder_padding_mask is
not None, max_lengths must equal to encoder_padding_mask.size(1)
device: which device to put the result on
"""
if encoder_padding_mask is None:
return torch.Tensor([max_lengths] * batch_size).to(torch.int32).to(device)
assert encoder_padding_mask.size(0) == max_lengths, "max_lengths does not match"
assert encoder_padding_mask.size(1) == batch_size, "batch_size does not match"
return max_lengths - torch.sum(encoder_padding_mask, dim=0)
|
COCO-LM/fairseq/examples/speech_recognition/data/data_utils.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/speech_recognition/data/data_utils.py",
"repo_id": "COCO-LM",
"token_count": 1351
}
| 173 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Flashlight decoders.
"""
import gc
import itertools as it
import os.path as osp
import warnings
from collections import deque, namedtuple
import numpy as np
import torch
from examples.speech_recognition.data.replabels import unpack_replabels
from fairseq import tasks
from fairseq.utils import apply_to_sample
from omegaconf import open_dict
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
try:
from flashlight.lib.text.dictionary import create_word_dict, load_words
from flashlight.lib.sequence.criterion import CpuViterbiPath, get_data_ptr_as_bytes
from flashlight.lib.text.decoder import (
CriterionType,
LexiconDecoderOptions,
KenLM,
LM,
LMState,
SmearingMode,
Trie,
LexiconDecoder,
)
except:
warnings.warn(
"flashlight python bindings are required to use this functionality. Please install from https://github.com/facebookresearch/flashlight/tree/master/bindings/python"
)
LM = object
LMState = object
class W2lDecoder(object):
def __init__(self, args, tgt_dict):
self.tgt_dict = tgt_dict
self.vocab_size = len(tgt_dict)
self.nbest = args.nbest
# criterion-specific init
if args.criterion == "ctc":
self.criterion_type = CriterionType.CTC
self.blank = (
tgt_dict.index("<ctc_blank>")
if "<ctc_blank>" in tgt_dict.indices
else tgt_dict.bos()
)
if "<sep>" in tgt_dict.indices:
self.silence = tgt_dict.index("<sep>")
elif "|" in tgt_dict.indices:
self.silence = tgt_dict.index("|")
else:
self.silence = tgt_dict.eos()
self.asg_transitions = None
elif args.criterion == "asg_loss":
self.criterion_type = CriterionType.ASG
self.blank = -1
self.silence = -1
self.asg_transitions = args.asg_transitions
self.max_replabel = args.max_replabel
assert len(self.asg_transitions) == self.vocab_size ** 2
else:
raise RuntimeError(f"unknown criterion: {args.criterion}")
def generate(self, models, sample, **unused):
"""Generate a batch of inferences."""
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
}
emissions = self.get_emissions(models, encoder_input)
return self.decode(emissions)
def get_emissions(self, models, encoder_input):
"""Run encoder and normalize emissions"""
model = models[0]
encoder_out = model(**encoder_input)
if self.criterion_type == CriterionType.CTC:
if hasattr(model, "get_logits"):
emissions = model.get_logits(encoder_out) # no need to normalize emissions
else:
emissions = model.get_normalized_probs(encoder_out, log_probs=True)
elif self.criterion_type == CriterionType.ASG:
emissions = encoder_out["encoder_out"]
return emissions.transpose(0, 1).float().cpu().contiguous()
def get_tokens(self, idxs):
"""Normalize tokens by handling CTC blank, ASG replabels, etc."""
idxs = (g[0] for g in it.groupby(idxs))
if self.criterion_type == CriterionType.CTC:
idxs = filter(lambda x: x != self.blank, idxs)
elif self.criterion_type == CriterionType.ASG:
idxs = filter(lambda x: x >= 0, idxs)
idxs = unpack_replabels(list(idxs), self.tgt_dict, self.max_replabel)
return torch.LongTensor(list(idxs))
class W2lViterbiDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
if self.asg_transitions is None:
transitions = torch.FloatTensor(N, N).zero_()
else:
transitions = torch.FloatTensor(self.asg_transitions).view(N, N)
viterbi_path = torch.IntTensor(B, T)
workspace = torch.ByteTensor(CpuViterbiPath.get_workspace_size(B, T, N))
CpuViterbiPath.compute(
B,
T,
N,
get_data_ptr_as_bytes(emissions),
get_data_ptr_as_bytes(transitions),
get_data_ptr_as_bytes(viterbi_path),
get_data_ptr_as_bytes(workspace),
)
return [
[{"tokens": self.get_tokens(viterbi_path[b].tolist()), "score": 0}]
for b in range(B)
]
class W2lKenLMDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
self.unit_lm = getattr(args, "unit_lm", False)
if args.lexicon:
self.lexicon = load_words(args.lexicon)
self.word_dict = create_word_dict(self.lexicon)
self.unk_word = self.word_dict.get_index("<unk>")
self.lm = KenLM(args.kenlm_model, self.word_dict)
self.trie = Trie(self.vocab_size, self.silence)
start_state = self.lm.start(False)
for i, (word, spellings) in enumerate(self.lexicon.items()):
word_idx = self.word_dict.get_index(word)
_, score = self.lm.score(start_state, word_idx)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert (
tgt_dict.unk() not in spelling_idxs
), f"{spelling} {spelling_idxs}"
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = LexiconDecoderOptions(
beam_size=args.beam,
beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))),
beam_threshold=args.beam_threshold,
lm_weight=args.lm_weight,
word_score=args.word_score,
unk_score=args.unk_weight,
sil_score=args.sil_weight,
log_add=False,
criterion_type=self.criterion_type,
)
if self.asg_transitions is None:
N = 768
# self.asg_transitions = torch.FloatTensor(N, N).zero_()
self.asg_transitions = []
self.decoder = LexiconDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence,
self.blank,
self.unk_word,
self.asg_transitions,
self.unit_lm,
)
else:
assert args.unit_lm, "lexicon free decoding can only be done with a unit language model"
from flashlight.lib.text.decoder import LexiconFreeDecoder, LexiconFreeDecoderOptions
d = {w: [[w]] for w in tgt_dict.symbols}
self.word_dict = create_word_dict(d)
self.lm = KenLM(args.kenlm_model, self.word_dict)
self.decoder_opts = LexiconFreeDecoderOptions(
beam_size=args.beam,
beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))),
beam_threshold=args.beam_threshold,
lm_weight=args.lm_weight,
sil_score=args.sil_weight,
log_add=False,
criterion_type=self.criterion_type,
)
self.decoder = LexiconFreeDecoder(
self.decoder_opts, self.lm, self.silence, self.blank, []
)
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
for b in range(B):
emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[: self.nbest]
hypos.append(
[
{
"tokens": self.get_tokens(result.tokens),
"score": result.score,
"words": [
self.word_dict.get_entry(x) for x in result.words if x >= 0
],
}
for result in nbest_results
]
)
return hypos
FairseqLMState = namedtuple("FairseqLMState", ["prefix", "incremental_state", "probs"])
class FairseqLM(LM):
def __init__(self, dictionary, model):
LM.__init__(self)
self.dictionary = dictionary
self.model = model
self.unk = self.dictionary.unk()
self.save_incremental = False # this currently does not work properly
self.max_cache = 20_000
model.cuda()
model.eval()
model.make_generation_fast_()
self.states = {}
self.stateq = deque()
def start(self, start_with_nothing):
state = LMState()
prefix = torch.LongTensor([[self.dictionary.eos()]])
incremental_state = {} if self.save_incremental else None
with torch.no_grad():
res = self.model(prefix.cuda(), incremental_state=incremental_state)
probs = self.model.get_normalized_probs(res, log_probs=True, sample=None)
if incremental_state is not None:
incremental_state = apply_to_sample(lambda x: x.cpu(), incremental_state)
self.states[state] = FairseqLMState(
prefix.numpy(), incremental_state, probs[0, -1].cpu().numpy()
)
self.stateq.append(state)
return state
def score(self, state: LMState, token_index: int, no_cache: bool = False):
"""
Evaluate language model based on the current lm state and new word
Parameters:
-----------
state: current lm state
token_index: index of the word
(can be lexicon index then you should store inside LM the
mapping between indices of lexicon and lm, or lm index of a word)
Returns:
--------
(LMState, float): pair of (new state, score for the current word)
"""
curr_state = self.states[state]
def trim_cache(targ_size):
while len(self.stateq) > targ_size:
rem_k = self.stateq.popleft()
rem_st = self.states[rem_k]
rem_st = FairseqLMState(rem_st.prefix, None, None)
self.states[rem_k] = rem_st
if curr_state.probs is None:
new_incremental_state = (
curr_state.incremental_state.copy()
if curr_state.incremental_state is not None
else None
)
with torch.no_grad():
if new_incremental_state is not None:
new_incremental_state = apply_to_sample(
lambda x: x.cuda(), new_incremental_state
)
elif self.save_incremental:
new_incremental_state = {}
res = self.model(
torch.from_numpy(curr_state.prefix).cuda(),
incremental_state=new_incremental_state,
)
probs = self.model.get_normalized_probs(
res, log_probs=True, sample=None
)
if new_incremental_state is not None:
new_incremental_state = apply_to_sample(
lambda x: x.cpu(), new_incremental_state
)
curr_state = FairseqLMState(
curr_state.prefix, new_incremental_state, probs[0, -1].cpu().numpy()
)
if not no_cache:
self.states[state] = curr_state
self.stateq.append(state)
score = curr_state.probs[token_index].item()
trim_cache(self.max_cache)
outstate = state.child(token_index)
if outstate not in self.states and not no_cache:
prefix = np.concatenate(
[curr_state.prefix, torch.LongTensor([[token_index]])], -1
)
incr_state = curr_state.incremental_state
self.states[outstate] = FairseqLMState(prefix, incr_state, None)
if token_index == self.unk:
score = float("-inf")
return outstate, score
def finish(self, state: LMState):
"""
Evaluate eos for language model based on the current lm state
Returns:
--------
(LMState, float): pair of (new state, score for the current word)
"""
return self.score(state, self.dictionary.eos())
def empty_cache(self):
self.states = {}
self.stateq = deque()
gc.collect()
class W2lFairseqLMDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
self.unit_lm = getattr(args, "unit_lm", False)
self.lexicon = load_words(args.lexicon) if args.lexicon else None
self.idx_to_wrd = {}
checkpoint = torch.load(args.kenlm_model, map_location="cpu")
if "cfg" in checkpoint and checkpoint["cfg"] is not None:
lm_args = checkpoint["cfg"]
else:
lm_args = convert_namespace_to_omegaconf(checkpoint["args"])
with open_dict(lm_args.task):
lm_args.task.data = osp.dirname(args.kenlm_model)
task = tasks.setup_task(lm_args.task)
model = task.build_model(lm_args.model)
model.load_state_dict(checkpoint["model"], strict=False)
self.trie = Trie(self.vocab_size, self.silence)
self.word_dict = task.dictionary
self.unk_word = self.word_dict.unk()
self.lm = FairseqLM(self.word_dict, model)
if self.lexicon:
start_state = self.lm.start(False)
for i, (word, spellings) in enumerate(self.lexicon.items()):
if self.unit_lm:
word_idx = i
self.idx_to_wrd[i] = word
score = 0
else:
word_idx = self.word_dict.index(word)
_, score = self.lm.score(start_state, word_idx, no_cache=True)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert (
tgt_dict.unk() not in spelling_idxs
), f"{spelling} {spelling_idxs}"
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = LexiconDecoderOptions(
beam_size=args.beam,
beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))),
beam_threshold=args.beam_threshold,
lm_weight=args.lm_weight,
word_score=args.word_score,
unk_score=args.unk_weight,
sil_score=args.sil_weight,
log_add=False,
criterion_type=self.criterion_type,
)
self.decoder = LexiconDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence,
self.blank,
self.unk_word,
[],
self.unit_lm,
)
else:
assert args.unit_lm, "lexicon free decoding can only be done with a unit language model"
from flashlight.lib.text.decoder import LexiconFreeDecoder, LexiconFreeDecoderOptions
d = {w: [[w]] for w in tgt_dict.symbols}
self.word_dict = create_word_dict(d)
self.lm = KenLM(args.kenlm_model, self.word_dict)
self.decoder_opts = LexiconFreeDecoderOptions(
beam_size=args.beam,
beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))),
beam_threshold=args.beam_threshold,
lm_weight=args.lm_weight,
sil_score=args.sil_weight,
log_add=False,
criterion_type=self.criterion_type,
)
self.decoder = LexiconFreeDecoder(
self.decoder_opts, self.lm, self.silence, self.blank, []
)
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
def idx_to_word(idx):
if self.unit_lm:
return self.idx_to_wrd[idx]
else:
return self.word_dict[idx]
def make_hypo(result):
hypo = {"tokens": self.get_tokens(result.tokens), "score": result.score}
if self.lexicon:
hypo["words"] = [idx_to_word(x) for x in result.words if x >= 0]
return hypo
for b in range(B):
emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[: self.nbest]
hypos.append([make_hypo(result) for result in nbest_results])
self.lm.empty_cache()
return hypos
|
COCO-LM/fairseq/examples/speech_recognition/w2l_decoder.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/speech_recognition/w2l_decoder.py",
"repo_id": "COCO-LM",
"token_count": 9008
}
| 174 |
#!/usr/bin/env bash
#
# Adapted from https://github.com/facebookresearch/MIXER/blob/master/prepareData.sh
echo 'Cloning Moses github repository (for tokenization scripts)...'
git clone https://github.com/moses-smt/mosesdecoder.git
echo 'Cloning Subword NMT repository (for BPE pre-processing)...'
git clone https://github.com/rsennrich/subword-nmt.git
SCRIPTS=mosesdecoder/scripts
TOKENIZER=$SCRIPTS/tokenizer/tokenizer.perl
LC=$SCRIPTS/tokenizer/lowercase.perl
CLEAN=$SCRIPTS/training/clean-corpus-n.perl
BPEROOT=subword-nmt/subword_nmt
BPE_TOKENS=10000
URL="http://dl.fbaipublicfiles.com/fairseq/data/iwslt14/de-en.tgz"
GZ=de-en.tgz
if [ ! -d "$SCRIPTS" ]; then
echo "Please set SCRIPTS variable correctly to point to Moses scripts."
exit
fi
src=de
tgt=en
lang=de-en
prep=iwslt14.tokenized.de-en
tmp=$prep/tmp
orig=orig
mkdir -p $orig $tmp $prep
echo "Downloading data from ${URL}..."
cd $orig
wget "$URL"
if [ -f $GZ ]; then
echo "Data successfully downloaded."
else
echo "Data not successfully downloaded."
exit
fi
tar zxvf $GZ
cd ..
echo "pre-processing train data..."
for l in $src $tgt; do
f=train.tags.$lang.$l
tok=train.tags.$lang.tok.$l
cat $orig/$lang/$f | \
grep -v '<url>' | \
grep -v '<talkid>' | \
grep -v '<keywords>' | \
sed -e 's/<title>//g' | \
sed -e 's/<\/title>//g' | \
sed -e 's/<description>//g' | \
sed -e 's/<\/description>//g' | \
perl $TOKENIZER -threads 8 -l $l > $tmp/$tok
echo ""
done
perl $CLEAN -ratio 1.5 $tmp/train.tags.$lang.tok $src $tgt $tmp/train.tags.$lang.clean 1 175
for l in $src $tgt; do
perl $LC < $tmp/train.tags.$lang.clean.$l > $tmp/train.tags.$lang.$l
done
echo "pre-processing valid/test data..."
for l in $src $tgt; do
for o in `ls $orig/$lang/IWSLT14.TED*.$l.xml`; do
fname=${o##*/}
f=$tmp/${fname%.*}
echo $o $f
grep '<seg id' $o | \
sed -e 's/<seg id="[0-9]*">\s*//g' | \
sed -e 's/\s*<\/seg>\s*//g' | \
sed -e "s/\’/\'/g" | \
perl $TOKENIZER -threads 8 -l $l | \
perl $LC > $f
echo ""
done
done
echo "creating train, valid, test..."
for l in $src $tgt; do
awk '{if (NR%23 == 0) print $0; }' $tmp/train.tags.de-en.$l > $tmp/valid.$l
awk '{if (NR%23 != 0) print $0; }' $tmp/train.tags.de-en.$l > $tmp/train.$l
cat $tmp/IWSLT14.TED.dev2010.de-en.$l \
$tmp/IWSLT14.TEDX.dev2012.de-en.$l \
$tmp/IWSLT14.TED.tst2010.de-en.$l \
$tmp/IWSLT14.TED.tst2011.de-en.$l \
$tmp/IWSLT14.TED.tst2012.de-en.$l \
> $tmp/test.$l
done
TRAIN=$tmp/train.en-de
BPE_CODE=$prep/code
rm -f $TRAIN
for l in $src $tgt; do
cat $tmp/train.$l >> $TRAIN
done
echo "learn_bpe.py on ${TRAIN}..."
python $BPEROOT/learn_bpe.py -s $BPE_TOKENS < $TRAIN > $BPE_CODE
for L in $src $tgt; do
for f in train.$L valid.$L test.$L; do
echo "apply_bpe.py to ${f}..."
python $BPEROOT/apply_bpe.py -c $BPE_CODE < $tmp/$f > $prep/$f
done
done
|
COCO-LM/fairseq/examples/translation/prepare-iwslt14.sh/0
|
{
"file_path": "COCO-LM/fairseq/examples/translation/prepare-iwslt14.sh",
"repo_id": "COCO-LM",
"token_count": 1423
}
| 175 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import math
import os
import subprocess
import sys
import tempfile
from collections import defaultdict
from itertools import combinations
def read_translations(path, n_repeats):
segment_counter = 0
segment_translations = []
translations = defaultdict(list)
for line in open(path):
segment_translations.append(" ".join(line.split()))
if len(segment_translations) == n_repeats:
translations[segment_counter] = segment_translations
segment_translations = []
segment_counter += 1
return translations
def generate_input(translations, n_repeats):
_, ref_path = tempfile.mkstemp()
_, mt_path = tempfile.mkstemp()
ref_fh = open(ref_path, "w")
mt_fh = open(mt_path, "w")
for segid in sorted(translations.keys()):
assert len(translations[segid]) == n_repeats
indexes = combinations(range(n_repeats), 2)
for idx1, idx2 in indexes:
mt_fh.write(translations[segid][idx1].strip() + "\n")
ref_fh.write(translations[segid][idx2].strip() + "\n")
sys.stderr.write("\nSaved translations to %s and %s" % (ref_path, mt_path))
return ref_path, mt_path
def run_meteor(ref_path, mt_path, metric_path, lang="en"):
_, out_path = tempfile.mkstemp()
subprocess.call(
[
"java",
"-Xmx2G",
"-jar",
metric_path,
mt_path,
ref_path,
"-p",
"0.5 0.2 0.6 0.75", # default parameters, only changed alpha to give equal weight to P and R
"-norm",
"-l",
lang,
],
stdout=open(out_path, "w"),
)
os.remove(ref_path)
os.remove(mt_path)
sys.stderr.write("\nSaved Meteor output to %s" % out_path)
return out_path
def read_output(meteor_output_path, n_repeats):
n_combinations = math.factorial(n_repeats) / (
math.factorial(2) * math.factorial(n_repeats - 2)
)
raw_scores = []
average_scores = []
for line in open(meteor_output_path):
if not line.startswith("Segment "):
continue
score = float(line.strip().split("\t")[1])
raw_scores.append(score)
if len(raw_scores) == n_combinations:
average_scores.append(sum(raw_scores) / n_combinations)
raw_scores = []
os.remove(meteor_output_path)
return average_scores
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--infile")
parser.add_argument("-n", "--repeat_times", type=int)
parser.add_argument("-m", "--meteor")
parser.add_argument("-o", "--output")
args = parser.parse_args()
translations = read_translations(args.infile, args.repeat_times)
sys.stderr.write("\nGenerating input for Meteor...")
ref_path, mt_path = generate_input(translations, args.repeat_times)
sys.stderr.write("\nRunning Meteor...")
out_path = run_meteor(ref_path, mt_path, args.meteor)
sys.stderr.write("\nReading output...")
scores = read_output(out_path, args.repeat_times)
sys.stderr.write("\nWriting results...")
with open(args.output, "w") as o:
for scr in scores:
o.write("{}\n".format(scr))
o.close()
if __name__ == "__main__":
main()
|
COCO-LM/fairseq/examples/unsupervised_quality_estimation/meteor.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/unsupervised_quality_estimation/meteor.py",
"repo_id": "COCO-LM",
"token_count": 1513
}
| 176 |
/*
Copyright (c) Microsoft Corporation.
Licensed under the MIT License.
*/
#include <torch/extension.h>
#include <vector>
/*
CPP Binding for CUDA OP
*/
// CUDA forward declarations
torch::Tensor ngram_repeat_block_cuda_forward(torch::Tensor tokens,
torch::Tensor lprobs, int bsz,
int step, int beam_size,
int no_repeat_ngram_size);
#define CHECK_CUDA(x) \
TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) \
TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) \
CHECK_CUDA(x); \
CHECK_CONTIGUOUS(x)
// Input check and call to CUDA OP
// Backward method not required
torch::Tensor ngram_repeat_block_forward(torch::Tensor tokens,
torch::Tensor lprobs, int bsz,
int step, int beam_size,
int no_repeat_ngram_size) {
CHECK_INPUT(tokens);
CHECK_INPUT(lprobs);
assert(bsz > 0);
assert(step >= 0);
assert(beam_size > 0);
assert(no_repeat_ngram_size > 0);
return ngram_repeat_block_cuda_forward(tokens, lprobs, bsz, step, beam_size,
no_repeat_ngram_size);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &ngram_repeat_block_forward,
"No Repeat Ngram Block forward (CUDA)");
}
|
COCO-LM/fairseq/fairseq/clib/cuda/ngram_repeat_block_cuda.cpp/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/clib/cuda/ngram_repeat_block_cuda.cpp",
"repo_id": "COCO-LM",
"token_count": 778
}
| 177 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import metrics, modules, utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion("masked_lm")
class MaskedLmLoss(FairseqCriterion):
"""
Implementation for the loss used in masked language model (MLM) training.
"""
def __init__(self, task, tpu=False):
super().__init__(task)
self.tpu = tpu
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
masked_tokens = sample["target"].ne(self.padding_idx)
sample_size = masked_tokens.int().sum()
# Rare: when all tokens are masked, project all tokens.
# We use torch.where to avoid device-to-host transfers,
# except on CPU where torch.where is not well supported
# (see github.com/pytorch/pytorch/issues/26247).
if self.tpu:
masked_tokens = None # always project all tokens on TPU
elif masked_tokens.device == torch.device("cpu"):
if not masked_tokens.any():
masked_tokens = None
else:
masked_tokens = torch.where(
masked_tokens.any(),
masked_tokens,
masked_tokens.new([True]),
)
logits = model(**sample["net_input"], masked_tokens=masked_tokens)[0]
targets = model.get_targets(sample, [logits])
if masked_tokens is not None:
targets = targets[masked_tokens]
loss = modules.cross_entropy(
logits.view(-1, logits.size(-1)),
targets.view(-1),
reduction="sum",
ignore_index=self.padding_idx,
)
logging_output = {
"loss": loss if self.tpu else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["nsentences"],
"sample_size": sample_size,
}
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["loss"].avg)
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
|
COCO-LM/fairseq/fairseq/criterions/masked_lm.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/criterions/masked_lm.py",
"repo_id": "COCO-LM",
"token_count": 1360
}
| 178 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
import numpy as np
import torch
import torch.nn.functional as F
from .. import FairseqDataset, BaseWrapperDataset
from ..data_utils import compute_mask_indices, get_buckets, get_bucketed_sizes
logger = logging.getLogger(__name__)
class RawAudioDataset(FairseqDataset):
def __init__(
self,
sample_rate,
max_sample_size=None,
min_sample_size=0,
shuffle=True,
pad=False,
normalize=False,
compute_mask_indices=False,
**mask_compute_kwargs,
):
super().__init__()
self.sample_rate = sample_rate
self.sizes = []
self.max_sample_size = (
max_sample_size if max_sample_size is not None else sys.maxsize
)
self.min_sample_size = min_sample_size
self.pad = pad
self.shuffle = shuffle
self.normalize = normalize
self.compute_mask_indices = compute_mask_indices
if self.compute_mask_indices:
self.mask_compute_kwargs = mask_compute_kwargs
self._features_size_map = {}
self._C = mask_compute_kwargs['encoder_embed_dim']
self._conv_feature_layers = eval(
mask_compute_kwargs['conv_feature_layers']
)
def __getitem__(self, index):
raise NotImplementedError()
def __len__(self):
return len(self.sizes)
def postprocess(self, feats, curr_sample_rate):
if feats.dim() == 2:
feats = feats.mean(-1)
if curr_sample_rate != self.sample_rate:
raise Exception(f"sample rate: {curr_sample_rate}, need {self.sample_rate}")
assert feats.dim() == 1, feats.dim()
if self.normalize:
with torch.no_grad():
feats = F.layer_norm(feats, feats.shape)
return feats
def crop_to_max_size(self, wav, target_size):
size = len(wav)
diff = size - target_size
if diff <= 0:
return wav
start = np.random.randint(0, diff + 1)
end = size - diff + start
return wav[start:end]
def _compute_mask_indices(self, dims, padding_mask):
B, T, C = dims
mask_indices, mask_channel_indices = None, None
if self.mask_compute_kwargs['mask_prob'] > 0:
mask_indices = compute_mask_indices(
(B, T),
padding_mask,
self.mask_compute_kwargs['mask_prob'],
self.mask_compute_kwargs['mask_length'],
self.mask_compute_kwargs['mask_selection'],
self.mask_compute_kwargs['mask_other'],
min_masks=2,
no_overlap=self.mask_compute_kwargs['no_mask_overlap'],
min_space=self.mask_compute_kwargs['mask_min_space'],
)
mask_indices = torch.from_numpy(mask_indices)
if self.mask_compute_kwargs['mask_channel_prob'] > 0:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_compute_kwargs['mask_channel_prob'],
self.mask_compute_kwargs['mask_channel_length'],
self.mask_compute_kwargs['mask_channel_selection'],
self.mask_compute_kwargs['mask_channel_other'],
no_overlap=self.mask_compute_kwargs['no_mask_channel_overlap'],
min_space=self.mask_compute_kwargs['mask_channel_min_space'],
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.unsqueeze(1)
.expand(-1, T, -1)
)
return mask_indices, mask_channel_indices
@staticmethod
def _bucket_tensor(tensor, num_pad, value):
return F.pad(tensor, (0, num_pad), value=value)
def collater(self, samples):
samples = [s for s in samples if s["source"] is not None]
if len(samples) == 0:
return {}
sources = [s["source"] for s in samples]
sizes = [len(s) for s in sources]
if self.pad:
target_size = min(max(sizes), self.max_sample_size)
else:
target_size = min(min(sizes), self.max_sample_size)
collated_sources = sources[0].new_zeros(len(sources), target_size)
padding_mask = (
torch.BoolTensor(collated_sources.shape).fill_(False) if self.pad else None
)
for i, (source, size) in enumerate(zip(sources, sizes)):
diff = size - target_size
if diff == 0:
collated_sources[i] = source
elif diff < 0:
assert self.pad
collated_sources[i] = torch.cat(
[source, source.new_full((-diff,), 0.0)]
)
padding_mask[i, diff:] = True
else:
collated_sources[i] = self.crop_to_max_size(source, target_size)
input = {"source": collated_sources}
out = {"id": torch.LongTensor([s["id"] for s in samples])}
if self.pad:
input["padding_mask"] = padding_mask
if hasattr(self, 'num_buckets') and self.num_buckets > 0:
assert self.pad, "Cannot bucket without padding first."
bucket = max(self._bucketed_sizes[s['id']] for s in samples)
num_pad = bucket - collated_sources.size(-1)
if num_pad:
input['source'] = self._bucket_tensor(
collated_sources, num_pad, 0
)
input['padding_mask'] = self._bucket_tensor(
padding_mask, num_pad, True
)
if self.compute_mask_indices:
B = input['source'].size(0)
T = self._get_mask_indices_dims(input['source'].size(-1))
padding_mask_reshaped = input['padding_mask'].clone()
extra = padding_mask_reshaped.size(1) % T
if extra > 0:
padding_mask_reshaped = padding_mask_reshaped[:, :-extra]
padding_mask_reshaped = padding_mask_reshaped.view(
padding_mask_reshaped.size(0), T, -1
)
padding_mask_reshaped = padding_mask_reshaped.all(-1)
input['padding_count'] = (
padding_mask_reshaped.sum(-1).max().item()
)
mask_indices, mask_channel_indices = self._compute_mask_indices(
(B, T, self._C), padding_mask_reshaped,
)
input["mask_indices"] = mask_indices
input["mask_channel_indices"] = mask_channel_indices
out['sample_size'] = mask_indices.sum().item()
out["net_input"] = input
return out
def _get_mask_indices_dims(self, size, padding=0, dilation=1):
if size not in self._features_size_map:
L_in = size
for (_, kernel_size, stride) in self._conv_feature_layers:
L_out = L_in + 2*padding - dilation*(kernel_size-1) - 1
L_out = 1 + L_out // stride
L_in = L_out
self._features_size_map[size] = L_out
return self._features_size_map[size]
def num_tokens(self, index):
return self.size(index)
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
if self.pad:
return self.sizes[index]
return min(self.sizes[index], self.max_sample_size)
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)[::-1]
class FileAudioDataset(RawAudioDataset):
def __init__(
self,
manifest_path,
sample_rate,
max_sample_size=None,
min_sample_size=0,
shuffle=True,
pad=False,
normalize=False,
num_buckets=0,
compute_mask_indices=False,
**mask_compute_kwargs,
):
super().__init__(
sample_rate=sample_rate,
max_sample_size=max_sample_size,
min_sample_size=min_sample_size,
shuffle=shuffle,
pad=pad,
normalize=normalize,
compute_mask_indices=compute_mask_indices,
**mask_compute_kwargs,
)
self.fnames = []
self.line_inds = set()
skipped = 0
with open(manifest_path, "r") as f:
self.root_dir = f.readline().strip()
for i, line in enumerate(f):
items = line.strip().split("\t")
assert len(items) == 2, line
sz = int(items[1])
if min_sample_size is not None and sz < min_sample_size:
skipped += 1
continue
self.fnames.append(items[0])
self.line_inds.add(i)
self.sizes.append(sz)
self.set_bucket_info(num_buckets)
logger.info(f"loaded {len(self.fnames)}, skipped {skipped} samples")
def set_bucket_info(self, num_buckets):
self.num_buckets = num_buckets
if self.num_buckets > 0:
self._collated_sizes = np.minimum(
np.array(self.sizes), self.max_sample_size,
)
self.buckets = get_buckets(
self._collated_sizes, self.num_buckets,
)
self._bucketed_sizes = get_bucketed_sizes(
self._collated_sizes, self.buckets
)
logger.info(
f"{len(self.buckets)} bucket(s) for the audio dataset: "
f"{self.buckets}"
)
def __getitem__(self, index):
import soundfile as sf
fname = os.path.join(self.root_dir, self.fnames[index])
wav, curr_sample_rate = sf.read(fname)
feats = torch.from_numpy(wav).float()
feats = self.postprocess(feats, curr_sample_rate)
return {"id": index, "source": feats}
|
COCO-LM/fairseq/fairseq/data/audio/raw_audio_dataset.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/audio/raw_audio_dataset.py",
"repo_id": "COCO-LM",
"token_count": 5261
}
| 179 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.data.encoders import register_bpe
from fairseq.data.encoders.byte_utils import (
SPACE,
SPACE_ESCAPE,
byte_encode,
smart_byte_decode,
)
@register_bpe("bytes")
class Bytes(object):
def __init__(self, *unused):
pass
@staticmethod
def add_args(parser):
pass
@staticmethod
def encode(x: str) -> str:
encoded = byte_encode(x)
escaped = encoded.replace(SPACE, SPACE_ESCAPE)
return SPACE.join(list(escaped))
@staticmethod
def decode(x: str) -> str:
unescaped = x.replace(SPACE, "").replace(SPACE_ESCAPE, SPACE)
return smart_byte_decode(unescaped)
|
COCO-LM/fairseq/fairseq/data/encoders/bytes.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/encoders/bytes.py",
"repo_id": "COCO-LM",
"token_count": 334
}
| 180 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import shutil
import struct
from functools import lru_cache
import numpy as np
import torch
from fairseq.dataclass.constants import DATASET_IMPL_CHOICES
from fairseq.data.fasta_dataset import FastaDataset
from fairseq.file_io import PathManager
from . import FairseqDataset
from typing import Union
def best_fitting_int_dtype(
max_int_to_represent,
) -> Union[np.uint16, np.uint32, np.int64]:
if max_int_to_represent is None:
return np.uint32 # Safe guess
elif max_int_to_represent < 65500:
return np.uint16
elif max_int_to_represent < 4294967295:
return np.uint32
else:
return np.int64
# we avoid np.uint64 because it doesn't save space and its type promotion behaves unexpectedly
# https://github.com/numpy/numpy/issues/5745
def get_available_dataset_impl():
return list(map(str, DATASET_IMPL_CHOICES))
def infer_dataset_impl(path):
if IndexedRawTextDataset.exists(path):
return "raw"
elif IndexedDataset.exists(path):
with open(index_file_path(path), "rb") as f:
magic = f.read(8)
if magic == IndexedDataset._HDR_MAGIC:
return "cached"
elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]:
return "mmap"
else:
return None
elif FastaDataset.exists(path):
return "fasta"
else:
return None
def make_builder(out_file, impl, vocab_size=None):
if impl == "mmap":
return MMapIndexedDatasetBuilder(
out_file, dtype=best_fitting_int_dtype(vocab_size)
)
elif impl == "fasta":
raise NotImplementedError
else:
return IndexedDatasetBuilder(out_file)
def make_dataset(path, impl, fix_lua_indexing=False, dictionary=None):
if impl == "raw" and IndexedRawTextDataset.exists(path):
assert dictionary is not None
return IndexedRawTextDataset(path, dictionary)
elif impl == "lazy" and IndexedDataset.exists(path):
return IndexedDataset(path, fix_lua_indexing=fix_lua_indexing)
elif impl == "cached" and IndexedDataset.exists(path):
return IndexedCachedDataset(path, fix_lua_indexing=fix_lua_indexing)
elif impl == "mmap" and MMapIndexedDataset.exists(path):
return MMapIndexedDataset(path)
elif impl == "fasta" and FastaDataset.exists(path):
from fairseq.data.fasta_dataset import EncodedFastaDataset
return EncodedFastaDataset(path, dictionary)
return None
def dataset_exists(path, impl):
if impl == "raw":
return IndexedRawTextDataset.exists(path)
elif impl == "mmap":
return MMapIndexedDataset.exists(path)
else:
return IndexedDataset.exists(path)
def read_longs(f, n):
a = np.empty(n, dtype=np.int64)
f.readinto(a)
return a
def write_longs(f, a):
f.write(np.array(a, dtype=np.int64))
_code_to_dtype = {
1: np.uint8,
2: np.int8,
3: np.int16,
4: np.int32,
5: np.int64,
6: np.float,
7: np.double,
8: np.uint16,
9: np.uint32,
10: np.uint64,
}
def _dtype_header_code(dtype) -> int:
for k in _code_to_dtype.keys():
if _code_to_dtype[k] == dtype:
return k
raise ValueError(dtype)
def index_file_path(prefix_path):
return prefix_path + ".idx"
def data_file_path(prefix_path):
return prefix_path + ".bin"
class IndexedDataset(FairseqDataset):
"""Loader for TorchNet IndexedDataset"""
_HDR_MAGIC = b"TNTIDX\x00\x00"
def __init__(self, path, fix_lua_indexing=False):
super().__init__()
self.path = path
self.fix_lua_indexing = fix_lua_indexing
self.data_file = None
self.read_index(path)
def read_index(self, path):
with open(index_file_path(path), "rb") as f:
magic = f.read(8)
assert magic == self._HDR_MAGIC, (
"Index file doesn't match expected format. "
"Make sure that --dataset-impl is configured properly."
)
version = f.read(8)
assert struct.unpack("<Q", version) == (1,)
code, self.element_size = struct.unpack("<QQ", f.read(16))
self.dtype = _code_to_dtype[code]
self._len, self.s = struct.unpack("<QQ", f.read(16))
self.dim_offsets = read_longs(f, self._len + 1)
self.data_offsets = read_longs(f, self._len + 1)
self.sizes = read_longs(f, self.s)
def read_data(self, path):
self.data_file = open(data_file_path(path), "rb", buffering=0)
def check_index(self, i):
if i < 0 or i >= self._len:
raise IndexError("index out of range")
def __del__(self):
if self.data_file:
self.data_file.close()
@lru_cache(maxsize=8)
def __getitem__(self, i) -> torch.Tensor:
if not self.data_file:
self.read_data(self.path)
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i] : self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
item = torch.from_numpy(a).long()
if self.fix_lua_indexing:
item -= 1 # subtract 1 for 0-based indexing
return item
def __len__(self):
return self._len
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
@staticmethod
def exists(path):
return PathManager.exists(index_file_path(path)) and PathManager.exists(
data_file_path(path)
)
@property
def supports_prefetch(self):
return False # avoid prefetching to save memory
class IndexedCachedDataset(IndexedDataset):
def __init__(self, path, fix_lua_indexing=False):
super().__init__(path, fix_lua_indexing=fix_lua_indexing)
self.cache = None
self.cache_index = {}
@property
def supports_prefetch(self):
return True
def prefetch(self, indices):
if all(i in self.cache_index for i in indices):
return
if not self.data_file:
self.read_data(self.path)
indices = sorted(set(indices))
total_size = 0
for i in indices:
total_size += self.data_offsets[i + 1] - self.data_offsets[i]
self.cache = np.empty(total_size, dtype=self.dtype)
ptx = 0
self.cache_index.clear()
for i in indices:
self.cache_index[i] = ptx
size = self.data_offsets[i + 1] - self.data_offsets[i]
a = self.cache[ptx : ptx + size]
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
ptx += size
if self.data_file:
# close and delete data file after prefetch so we can pickle
self.data_file.close()
self.data_file = None
@lru_cache(maxsize=8)
def __getitem__(self, i):
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i] : self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
ptx = self.cache_index[i]
np.copyto(a, self.cache[ptx : ptx + a.size])
item = torch.from_numpy(a).long()
if self.fix_lua_indexing:
item -= 1 # subtract 1 for 0-based indexing
return item
class IndexedRawTextDataset(FairseqDataset):
"""Takes a text file as input and binarizes it in memory at instantiation.
Original lines are also kept in memory"""
def __init__(self, path, dictionary, append_eos=True, reverse_order=False):
self.tokens_list = []
self.lines = []
self.sizes = []
self.append_eos = append_eos
self.reverse_order = reverse_order
self.read_data(path, dictionary)
self.size = len(self.tokens_list)
def read_data(self, path, dictionary):
with open(path, "r", encoding="utf-8") as f:
for line in f:
self.lines.append(line.strip("\n"))
tokens = dictionary.encode_line(
line,
add_if_not_exist=False,
append_eos=self.append_eos,
reverse_order=self.reverse_order,
).long()
self.tokens_list.append(tokens)
self.sizes.append(len(tokens))
self.sizes = np.array(self.sizes)
def check_index(self, i):
if i < 0 or i >= self.size:
raise IndexError("index out of range")
@lru_cache(maxsize=8)
def __getitem__(self, i):
self.check_index(i)
return self.tokens_list[i]
def get_original_text(self, i):
self.check_index(i)
return self.lines[i]
def __del__(self):
pass
def __len__(self):
return self.size
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
@staticmethod
def exists(path):
return PathManager.exists(path)
class IndexedDatasetBuilder:
element_sizes = {
np.uint8: 1,
np.int8: 1,
np.int16: 2,
np.int32: 4,
np.int64: 8,
np.float: 4,
np.double: 8,
}
def __init__(self, out_file, dtype=np.int32):
self.out_file = open(out_file, "wb")
self.dtype = dtype
self.data_offsets = [0]
self.dim_offsets = [0]
self.sizes = []
self.element_size = self.element_sizes[self.dtype]
def add_item(self, tensor):
# +1 for Lua compatibility
bytes = self.out_file.write(np.array(tensor.numpy() + 1, dtype=self.dtype))
self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size)
for s in tensor.size():
self.sizes.append(s)
self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size()))
def merge_file_(self, another_file):
index = IndexedDataset(another_file)
assert index.dtype == self.dtype
begin = self.data_offsets[-1]
for offset in index.data_offsets[1:]:
self.data_offsets.append(begin + offset)
self.sizes.extend(index.sizes)
begin = self.dim_offsets[-1]
for dim_offset in index.dim_offsets[1:]:
self.dim_offsets.append(begin + dim_offset)
with open(data_file_path(another_file), "rb") as f:
while True:
data = f.read(1024)
if data:
self.out_file.write(data)
else:
break
def finalize(self, index_file):
self.out_file.close()
index = open(index_file, "wb")
index.write(b"TNTIDX\x00\x00")
index.write(struct.pack("<Q", 1))
index.write(
struct.pack("<QQ", _dtype_header_code(self.dtype), self.element_size)
)
index.write(struct.pack("<QQ", len(self.data_offsets) - 1, len(self.sizes)))
write_longs(index, self.dim_offsets)
write_longs(index, self.data_offsets)
write_longs(index, self.sizes)
index.close()
def _warmup_mmap_file(path):
with open(path, "rb") as stream:
while stream.read(100 * 1024 * 1024):
pass
class MMapIndexedDataset(torch.utils.data.Dataset):
class Index:
_HDR_MAGIC = b"MMIDIDX\x00\x00"
@classmethod
def writer(cls, path, dtype):
class _Writer:
def __enter__(self):
self._file = open(path, "wb")
self._file.write(cls._HDR_MAGIC)
self._file.write(struct.pack("<Q", 1))
self._file.write(struct.pack("<B", _dtype_header_code(dtype)))
return self
@staticmethod
def _get_pointers(sizes):
dtype_size = dtype().itemsize
address = 0
pointers = []
for size in sizes:
pointers.append(address)
address += size * dtype_size
return pointers
def write(self, sizes):
pointers = self._get_pointers(sizes)
self._file.write(struct.pack("<Q", len(sizes)))
sizes = np.array(sizes, dtype=np.int32)
self._file.write(sizes.tobytes(order="C"))
del sizes
pointers = np.array(pointers, dtype=np.int64)
self._file.write(pointers.tobytes(order="C"))
del pointers
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
return _Writer()
def __init__(self, path):
with open(path, "rb") as stream:
magic_test = stream.read(9)
assert self._HDR_MAGIC == magic_test, (
"Index file doesn't match expected format. "
"Make sure that --dataset-impl is configured properly."
)
version = struct.unpack("<Q", stream.read(8))
assert (1,) == version
(dtype_code,) = struct.unpack("<B", stream.read(1))
self._dtype = _code_to_dtype[dtype_code]
self._dtype_size = self._dtype().itemsize
self._len = struct.unpack("<Q", stream.read(8))[0]
offset = stream.tell()
# this will slow down the initialization, and actually is not need
# _warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode="r", order="C")
self._bin_buffer = memoryview(self._bin_buffer_mmap)
self._sizes = np.frombuffer(
self._bin_buffer, dtype=np.int32, count=self._len, offset=offset
)
self._pointers = np.frombuffer(
self._bin_buffer,
dtype=np.int64,
count=self._len,
offset=offset + self._sizes.nbytes,
)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
@property
def dtype(self):
return self._dtype
@property
def sizes(self):
return self._sizes
@lru_cache(maxsize=8)
def __getitem__(self, i):
return self._pointers[i], self._sizes[i]
def __len__(self):
return self._len
def __init__(self, path):
super().__init__()
self._path = None
self._index = None
self._bin_buffer = None
self._do_init(path)
def __getstate__(self):
return self._path
def __setstate__(self, state):
self._do_init(state)
def _do_init(self, path):
self._path = path
self._index = self.Index(index_file_path(self._path))
# this will slow down the initialization, and actually is not need
# _warmup_mmap_file(data_file_path(self._path))
self._bin_buffer_mmap = np.memmap(
data_file_path(self._path), mode="r", order="C"
)
self._bin_buffer = memoryview(self._bin_buffer_mmap)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
del self._index
def __len__(self):
return len(self._index)
@lru_cache(maxsize=8)
def __getitem__(self, i):
ptr, size = self._index[i]
np_array = np.frombuffer(
self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr
)
if self._index.dtype != np.int64:
np_array = np_array.astype(np.int64)
return torch.from_numpy(np_array)
@property
def sizes(self):
return self._index.sizes
@property
def supports_prefetch(self):
return False
@staticmethod
def exists(path):
return PathManager.exists(index_file_path(path)) and PathManager.exists(
data_file_path(path)
)
def get_indexed_dataset_to_local(path) -> str:
local_index_path = PathManager.get_local_path(index_file_path(path))
local_data_path = PathManager.get_local_path(data_file_path(path))
assert local_index_path.endswith(".idx") and local_data_path.endswith(".bin"), (
"PathManager.get_local_path does not return files with expected patterns: "
f"{local_index_path} and {local_data_path}"
)
local_path = local_data_path[:-4] # stripping surfix ".bin"
assert local_path == local_index_path[:-4] # stripping surfix ".idx"
return local_path
class MMapIndexedDatasetBuilder:
def __init__(self, out_file, dtype=np.int64):
self._data_file = open(out_file, "wb")
self._dtype = dtype
self._sizes = []
def add_item(self, tensor):
np_array = np.array(tensor.numpy(), dtype=self._dtype)
self._data_file.write(np_array.tobytes(order="C"))
self._sizes.append(np_array.size)
def merge_file_(self, another_file):
# Concatenate index
index = MMapIndexedDataset.Index(index_file_path(another_file))
assert index.dtype == self._dtype
for size in index.sizes:
self._sizes.append(size)
# Concatenate data
with open(data_file_path(another_file), "rb") as f:
shutil.copyfileobj(f, self._data_file)
def finalize(self, index_file):
self._data_file.close()
with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index:
index.write(self._sizes)
|
COCO-LM/fairseq/fairseq/data/indexed_dataset.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/indexed_dataset.py",
"repo_id": "COCO-LM",
"token_count": 8754
}
| 181 |
from enum import Enum
from typing import Dict, List, Optional, Sequence
import torch
from fairseq.data import Dictionary
class EncoderLangtok(Enum):
"""
Prepend to the beginning of source sentence either the
source or target language token. (src/tgt).
"""
src = "src"
tgt = "tgt"
class LangTokSpec(Enum):
main = "main"
mono_dae = "mono_dae"
class LangTokStyle(Enum):
multilingual = "multilingual"
mbart = "mbart"
@torch.jit.export
def get_lang_tok(
lang: str, lang_tok_style: str, spec: str = LangTokSpec.main.value
) -> str:
# TOKEN_STYLES can't be defined outside this fn since it needs to be
# TorchScriptable.
TOKEN_STYLES: Dict[str, str] = {
LangTokStyle.mbart.value: "[{}]",
LangTokStyle.multilingual.value: "__{}__",
}
if spec.endswith("dae"):
lang = f"{lang}_dae"
elif spec.endswith("mined"):
lang = f"{lang}_mined"
style = TOKEN_STYLES[lang_tok_style]
return style.format(lang)
def augment_dictionary(
dictionary: Dictionary,
language_list: List[str],
lang_tok_style: str,
langtoks_specs: Sequence[str] = (LangTokSpec.main.value,),
extra_data: Optional[Dict[str, str]] = None,
) -> None:
for spec in langtoks_specs:
for language in language_list:
dictionary.add_symbol(
get_lang_tok(lang=language, lang_tok_style=lang_tok_style, spec=spec)
)
if lang_tok_style == LangTokStyle.mbart.value or (
extra_data is not None and LangTokSpec.mono_dae.value in extra_data
):
dictionary.add_symbol("<mask>")
|
COCO-LM/fairseq/fairseq/data/multilingual/multilingual_utils.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/multilingual/multilingual_utils.py",
"repo_id": "COCO-LM",
"token_count": 698
}
| 182 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from . import BaseWrapperDataset
class RollDataset(BaseWrapperDataset):
def __init__(self, dataset, shifts):
super().__init__(dataset)
self.shifts = shifts
def __getitem__(self, index):
item = self.dataset[index]
return torch.roll(item, self.shifts)
|
COCO-LM/fairseq/fairseq/data/roll_dataset.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/roll_dataset.py",
"repo_id": "COCO-LM",
"token_count": 173
}
| 183 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import namedtuple
import numpy as np
import torch
from fairseq import utils
DecoderOut = namedtuple(
"IterativeRefinementDecoderOut",
["output_tokens", "output_scores", "attn", "step", "max_step", "history"],
)
class IterativeRefinementGenerator(object):
def __init__(
self,
tgt_dict,
models=None,
eos_penalty=0.0,
max_iter=10,
max_ratio=2,
beam_size=1,
decoding_format=None,
retain_dropout=False,
adaptive=True,
retain_history=False,
reranking=False,
):
"""
Generates translations based on iterative refinement.
Args:
tgt_dict: target dictionary
eos_penalty: if > 0.0, it penalized early-stopping in decoding
max_iter: maximum number of refinement iterations
max_ratio: generate sequences of maximum length ax, where x is the source length
decoding_format: decoding mode in {'unigram', 'ensemble', 'vote', 'dp', 'bs'}
retain_dropout: retaining dropout in the inference
adaptive: decoding with early stop
"""
self.bos = tgt_dict.bos()
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.eos_penalty = eos_penalty
self.max_iter = max_iter
self.max_ratio = max_ratio
self.beam_size = beam_size
self.reranking = reranking
self.decoding_format = decoding_format
self.retain_dropout = retain_dropout
self.retain_history = retain_history
self.adaptive = adaptive
self.models = models
def generate_batched_itr(
self,
data_itr,
maxlen_a=None,
maxlen_b=None,
cuda=False,
timer=None,
prefix_size=0,
):
"""Iterate over a batched dataset and yield individual translations.
Args:
maxlen_a/b: generate sequences of maximum length ax + b,
where x is the source sentence length.
cuda: use GPU for generation
timer: StopwatchMeter for timing generations.
"""
for sample in data_itr:
if "net_input" not in sample:
continue
if timer is not None:
timer.start()
with torch.no_grad():
hypos = self.generate(
self.models,
sample,
prefix_tokens=sample["target"][:, :prefix_size]
if prefix_size > 0
else None,
)
if timer is not None:
timer.stop(sample["ntokens"])
for i, id in enumerate(sample["id"]):
# remove padding
src = utils.strip_pad(sample["net_input"]["src_tokens"][i, :], self.pad)
ref = utils.strip_pad(sample["target"][i, :], self.pad)
yield id, src, ref, hypos[i]
@torch.no_grad()
def generate(self, models, sample, prefix_tokens=None, constraints=None):
if constraints is not None:
raise NotImplementedError(
"Constrained decoding with the IterativeRefinementGenerator is not supported"
)
# TODO: iterative refinement generator does not support ensemble for now.
if not self.retain_dropout:
for model in models:
model.eval()
model, reranker = models[0], None
if self.reranking:
assert len(models) > 1, "Assuming the last checkpoint is the reranker"
assert (
self.beam_size > 1
), "Reranking requires multiple translation for each example"
reranker = models[-1]
models = models[:-1]
if len(models) > 1 and hasattr(model, "enable_ensemble"):
assert model.allow_ensemble, "{} does not support ensembling".format(
model.__class__.__name__
)
model.enable_ensemble(models)
# TODO: better encoder inputs?
src_tokens = sample["net_input"]["src_tokens"]
src_lengths = sample["net_input"]["src_lengths"]
bsz, src_len = src_tokens.size()
# initialize
encoder_out = model.forward_encoder([src_tokens, src_lengths])
prev_decoder_out = model.initialize_output_tokens(encoder_out, src_tokens)
if self.beam_size > 1:
assert (
model.allow_length_beam
), "{} does not support decoding with length beam.".format(
model.__class__.__name__
)
# regenerate data based on length-beam
length_beam_order = (
utils.new_arange(src_tokens, self.beam_size, bsz).t().reshape(-1)
)
encoder_out = model.encoder.reorder_encoder_out(
encoder_out, length_beam_order
)
prev_decoder_out = model.regenerate_length_beam(
prev_decoder_out, self.beam_size
)
bsz = bsz * self.beam_size
sent_idxs = torch.arange(bsz)
prev_output_tokens = prev_decoder_out.output_tokens.clone()
if self.retain_history:
prev_decoder_out = prev_decoder_out._replace(history=[prev_output_tokens])
finalized = [[] for _ in range(bsz)]
def is_a_loop(x, y, s, a):
b, l_x, l_y = x.size(0), x.size(1), y.size(1)
if l_x > l_y:
y = torch.cat([y, x.new_zeros(b, l_x - l_y).fill_(self.pad)], 1)
s = torch.cat([s, s.new_zeros(b, l_x - l_y)], 1)
if a is not None:
a = torch.cat([a, a.new_zeros(b, l_x - l_y, a.size(2))], 1)
elif l_x < l_y:
x = torch.cat([x, y.new_zeros(b, l_y - l_x).fill_(self.pad)], 1)
return (x == y).all(1), y, s, a
def finalized_hypos(step, prev_out_token, prev_out_score, prev_out_attn):
cutoff = prev_out_token.ne(self.pad)
tokens = prev_out_token[cutoff]
if prev_out_score is None:
scores, score = None, None
else:
scores = prev_out_score[cutoff]
score = scores.mean()
if prev_out_attn is None:
hypo_attn, alignment = None, None
else:
hypo_attn = prev_out_attn[cutoff]
alignment = hypo_attn.max(dim=1)[1]
return {
"steps": step,
"tokens": tokens,
"positional_scores": scores,
"score": score,
"hypo_attn": hypo_attn,
"alignment": alignment,
}
for step in range(self.max_iter + 1):
decoder_options = {
"eos_penalty": self.eos_penalty,
"max_ratio": self.max_ratio,
"decoding_format": self.decoding_format,
}
prev_decoder_out = prev_decoder_out._replace(
step=step,
max_step=self.max_iter + 1,
)
decoder_out = model.forward_decoder(
prev_decoder_out, encoder_out, **decoder_options
)
if self.adaptive:
# terminate if there is a loop
terminated, out_tokens, out_scores, out_attn = is_a_loop(
prev_output_tokens,
decoder_out.output_tokens,
decoder_out.output_scores,
decoder_out.attn,
)
decoder_out = decoder_out._replace(
output_tokens=out_tokens,
output_scores=out_scores,
attn=out_attn,
)
else:
terminated = decoder_out.output_tokens.new_zeros(
decoder_out.output_tokens.size(0)
).bool()
if step == self.max_iter: # reach last iteration, terminate
terminated.fill_(1)
# collect finalized sentences
finalized_idxs = sent_idxs[terminated]
finalized_tokens = decoder_out.output_tokens[terminated]
finalized_scores = decoder_out.output_scores[terminated]
finalized_attn = (
None
if (decoder_out.attn is None or decoder_out.attn.size(0) == 0)
else decoder_out.attn[terminated]
)
if self.retain_history:
finalized_history_tokens = [h[terminated] for h in decoder_out.history]
for i in range(finalized_idxs.size(0)):
finalized[finalized_idxs[i]] = [
finalized_hypos(
step,
finalized_tokens[i],
finalized_scores[i],
None if finalized_attn is None else finalized_attn[i],
)
]
if self.retain_history:
finalized[finalized_idxs[i]][0]["history"] = []
for j in range(len(finalized_history_tokens)):
finalized[finalized_idxs[i]][0]["history"].append(
finalized_hypos(
step, finalized_history_tokens[j][i], None, None
)
)
# check if all terminated
if terminated.sum() == terminated.size(0):
break
# for next step
not_terminated = ~terminated
prev_decoder_out = decoder_out._replace(
output_tokens=decoder_out.output_tokens[not_terminated],
output_scores=decoder_out.output_scores[not_terminated],
attn=decoder_out.attn[not_terminated]
if (decoder_out.attn is not None and decoder_out.attn.size(0) > 0)
else None,
history=[h[not_terminated] for h in decoder_out.history]
if decoder_out.history is not None
else None,
)
encoder_out = model.encoder.reorder_encoder_out(
encoder_out, not_terminated.nonzero(as_tuple=False).squeeze()
)
sent_idxs = sent_idxs[not_terminated]
prev_output_tokens = prev_decoder_out.output_tokens.clone()
if self.beam_size > 1:
if reranker is not None:
finalized = self.rerank(
reranker, finalized, [src_tokens, src_lengths], self.beam_size
)
# aggregate information from length beam
finalized = [
finalized[
np.argmax(
[
finalized[self.beam_size * i + j][0]["score"]
for j in range(self.beam_size)
]
)
+ self.beam_size * i
]
for i in range(len(finalized) // self.beam_size)
]
return finalized
def rerank(self, reranker, finalized, encoder_input, beam_size):
def rebuild_batch(finalized):
finalized_tokens = [f[0]["tokens"] for f in finalized]
finalized_maxlen = max(f.size(0) for f in finalized_tokens)
final_output_tokens = (
finalized_tokens[0]
.new_zeros(len(finalized_tokens), finalized_maxlen)
.fill_(self.pad)
)
for i, f in enumerate(finalized_tokens):
final_output_tokens[i, : f.size(0)] = f
return final_output_tokens
final_output_tokens = rebuild_batch(finalized)
final_output_tokens[
:, 0
] = self.eos # autoregressive model assumes starting with EOS
reranker_encoder_out = reranker.encoder(*encoder_input)
length_beam_order = (
utils.new_arange(
final_output_tokens, beam_size, reranker_encoder_out.encoder_out.size(1)
)
.t()
.reshape(-1)
)
reranker_encoder_out = reranker.encoder.reorder_encoder_out(
reranker_encoder_out, length_beam_order
)
reranking_scores = reranker.get_normalized_probs(
reranker.decoder(final_output_tokens[:, :-1], reranker_encoder_out),
True,
None,
)
reranking_scores = reranking_scores.gather(2, final_output_tokens[:, 1:, None])
reranking_masks = final_output_tokens[:, 1:].ne(self.pad)
reranking_scores = (
reranking_scores[:, :, 0].masked_fill_(~reranking_masks, 0).sum(1)
)
reranking_scores = reranking_scores / reranking_masks.sum(1).type_as(
reranking_scores
)
for i in range(len(finalized)):
finalized[i][0]["score"] = reranking_scores[i]
return finalized
|
COCO-LM/fairseq/fairseq/iterative_refinement_generator.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/iterative_refinement_generator.py",
"repo_id": "COCO-LM",
"token_count": 7110
}
| 184 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from fairseq.model_parallel.models.transformer import ModelParallelTransformerDecoder
from fairseq.models import register_model, register_model_architecture
from fairseq.models.transformer_lm import TransformerLanguageModel
try:
from fairseq.model_parallel.megatron.mpu import VocabParallelEmbedding
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
DEFAULT_MAX_TARGET_POSITIONS = 1024
@register_model("model_parallel_transformer_lm")
class ModelParallelTransformerLanguageModel(TransformerLanguageModel):
@staticmethod
def add_args(parser):
TransformerLanguageModel.add_args(parser)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
if not has_megatron_submodule:
raise ImportError(
"\n\nPlease install the megatron submodule:"
"\n\n git submodule update --init "
"fairseq/model_parallel/megatron"
)
# make sure all arguments are present in older models
base_lm_architecture(args)
task.source_dictionary.pad_to_multiple_(args.model_parallel_size * 8)
task.target_dictionary.pad_to_multiple_(args.model_parallel_size * 8)
if args.decoder_layers_to_keep:
args.decoder_layers = len(args.decoder_layers_to_keep.split(","))
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = getattr(
args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS
)
if args.character_embeddings:
raise NotImplementedError(
"Character embeddings is not supported for model parallel"
)
elif args.adaptive_input:
raise NotImplementedError(
"Adaptive input is not supported for model parallel"
)
else:
embed_tokens = cls.build_embedding(
args, task.source_dictionary, args.decoder_input_dim
)
decoder = ModelParallelTransformerDecoder(
args,
task.target_dictionary,
embed_tokens,
no_encoder_attn=True,
)
return cls(decoder)
@staticmethod
def add_args(parser):
TransformerLanguageModel.add_args(parser)
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
def _vocab_init(tensor, **kwargs):
nn.init.normal_(tensor, mean=0, std=embed_dim ** -0.5)
nn.init.constant_(tensor[1], 0)
embed_tokens = VocabParallelEmbedding(
len(dictionary), embed_dim, dictionary.pad(), init_method=_vocab_init
)
return embed_tokens
def base_lm_architecture(args):
# backward compatibility for older model checkpoints
if hasattr(args, "no_tie_adaptive_proj"):
# previous models defined --no-tie-adaptive-proj, so use the existence of
# that option to determine if this is an "old" model checkpoint
args.no_decoder_final_norm = True # old models always set this to True
if args.no_tie_adaptive_proj is False:
args.tie_adaptive_proj = True
if hasattr(args, "decoder_final_norm"):
args.no_decoder_final_norm = not args.decoder_final_norm
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.relu_dropout = getattr(args, "relu_dropout", 0.0)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
# Model training is not stable without this
args.decoder_normalize_before = True
args.no_decoder_final_norm = getattr(args, "no_decoder_final_norm", False)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.adaptive_softmax_factor = getattr(args, "adaptive_softmax_factor", 4)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.character_embeddings = getattr(args, "character_embeddings", False)
args.character_filters = getattr(
args,
"character_filters",
"[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]",
)
args.character_embedding_dim = getattr(args, "character_embedding_dim", 4)
args.char_embedder_highway_layers = getattr(args, "char_embedder_highway_layers", 2)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.adaptive_input_factor = getattr(args, "adaptive_input_factor", 4)
args.adaptive_input_cutoff = getattr(args, "adaptive_input_cutoff", None)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.tie_adaptive_proj = getattr(args, "tie_adaptive_proj", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0.0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0.0)
args.add_bos_token = getattr(args, "add_bos_token", False)
@register_model_architecture("model_parallel_transformer_lm", "transformer_lm_megatron")
def transformer_lm_megatron(args):
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 3072)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 3072 * 4)
args.decoder_layers = getattr(args, "decoder_layers", 72)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 32)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_fn = getattr(args, "activation_fn", "gelu")
base_lm_architecture(args)
@register_model_architecture(
"model_parallel_transformer_lm", "transformer_lm_megatron_11b"
)
def transformer_lm_megatron_11b(args):
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 3072)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 3072 * 6)
args.decoder_layers = getattr(args, "decoder_layers", 72)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 32)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_fn = getattr(args, "activation_fn", "gelu")
base_lm_architecture(args)
|
COCO-LM/fairseq/fairseq/model_parallel/models/transformer_lm.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/model_parallel/models/transformer_lm.py",
"repo_id": "COCO-LM",
"token_count": 3194
}
| 185 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
BeamableMM,
FairseqDropout,
GradMultiply,
LearnedPositionalEmbedding,
LinearizedConvolution,
)
@register_model("fconv")
class FConvModel(FairseqEncoderDecoderModel):
"""
A fully convolutional model, i.e. a convolutional encoder and a
convolutional decoder, as described in `"Convolutional Sequence to Sequence
Learning" (Gehring et al., 2017) <https://arxiv.org/abs/1705.03122>`_.
Args:
encoder (FConvEncoder): the encoder
decoder (FConvDecoder): the decoder
The Convolutional model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.fconv_parser
:prog:
"""
@classmethod
def hub_models(cls):
def moses_subword(path):
return {
"path": path,
"tokenizer": "moses",
"bpe": "subword_nmt",
}
return {
"conv.wmt14.en-fr": moses_subword(
"https://dl.fbaipublicfiles.com/fairseq/models/wmt14.v2.en-fr.fconv-py.tar.bz2"
),
"conv.wmt14.en-de": moses_subword(
"https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-de.fconv-py.tar.bz2"
),
"conv.wmt17.en-de": moses_subword(
"https://dl.fbaipublicfiles.com/fairseq/models/wmt17.v2.en-de.fconv-py.tar.bz2"
),
}
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
self.encoder.num_attention_layers = sum(
layer is not None for layer in decoder.attention
)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-layers', type=str, metavar='EXPR',
help='encoder layers [(dim, kernel_size), ...]')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-layers', type=str, metavar='EXPR',
help='decoder layers [(dim, kernel_size), ...]')
parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N',
help='decoder output embedding dimension')
parser.add_argument('--decoder-attention', type=str, metavar='EXPR',
help='decoder attention [True, ...]')
parser.add_argument('--share-input-output-embed', action='store_true',
help='share input and output embeddings (requires'
' --decoder-out-embed-dim and --decoder-embed-dim'
' to be equal)')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure that all args are properly defaulted (in case there are any new ones)
base_architecture(args)
encoder_embed_dict = None
if args.encoder_embed_path:
encoder_embed_dict = utils.parse_embedding(args.encoder_embed_path)
utils.print_embed_overlap(encoder_embed_dict, task.source_dictionary)
decoder_embed_dict = None
if args.decoder_embed_path:
decoder_embed_dict = utils.parse_embedding(args.decoder_embed_path)
utils.print_embed_overlap(decoder_embed_dict, task.target_dictionary)
encoder = FConvEncoder(
dictionary=task.source_dictionary,
embed_dim=args.encoder_embed_dim,
embed_dict=encoder_embed_dict,
convolutions=eval(args.encoder_layers),
dropout=args.dropout,
max_positions=args.max_source_positions,
)
decoder = FConvDecoder(
dictionary=task.target_dictionary,
embed_dim=args.decoder_embed_dim,
embed_dict=decoder_embed_dict,
convolutions=eval(args.decoder_layers),
out_embed_dim=args.decoder_out_embed_dim,
attention=eval(args.decoder_attention),
dropout=args.dropout,
max_positions=args.max_target_positions,
share_embed=args.share_input_output_embed,
)
return FConvModel(encoder, decoder)
class FConvEncoder(FairseqEncoder):
"""
Convolutional encoder consisting of `len(convolutions)` layers.
Args:
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_dim (int, optional): embedding dimension
embed_dict (str, optional): filename from which to load pre-trained
embeddings
max_positions (int, optional): maximum supported input sequence length
convolutions (list, optional): the convolutional layer structure. Each
list item `i` corresponds to convolutional layer `i`. Layers are
given as ``(out_channels, kernel_width, [residual])``. Residual
connections are added between layers when ``residual=1`` (which is
the default behavior).
dropout (float, optional): dropout to be applied before each conv layer
"""
def __init__(
self,
dictionary,
embed_dim=512,
embed_dict=None,
max_positions=1024,
convolutions=((512, 3),) * 20,
dropout=0.1,
):
super().__init__(dictionary)
self.dropout_module = FairseqDropout(
dropout, module_name=self.__class__.__name__
)
self.num_attention_layers = None
num_embeddings = len(dictionary)
self.padding_idx = dictionary.pad()
self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx)
if embed_dict:
self.embed_tokens = utils.load_embedding(
embed_dict, self.dictionary, self.embed_tokens
)
self.embed_positions = PositionalEmbedding(
max_positions,
embed_dim,
self.padding_idx,
)
convolutions = extend_conv_spec(convolutions)
in_channels = convolutions[0][0]
self.fc1 = Linear(embed_dim, in_channels, dropout=dropout)
self.projections = nn.ModuleList()
self.convolutions = nn.ModuleList()
self.residuals = []
layer_in_channels = [in_channels]
for _, (out_channels, kernel_size, residual) in enumerate(convolutions):
if residual == 0:
residual_dim = out_channels
else:
residual_dim = layer_in_channels[-residual]
self.projections.append(
Linear(residual_dim, out_channels)
if residual_dim != out_channels
else None
)
if kernel_size % 2 == 1:
padding = kernel_size // 2
else:
padding = 0
self.convolutions.append(
ConvTBC(
in_channels,
out_channels * 2,
kernel_size,
dropout=dropout,
padding=padding,
)
)
self.residuals.append(residual)
in_channels = out_channels
layer_in_channels.append(out_channels)
self.fc2 = Linear(in_channels, embed_dim)
def forward(self, src_tokens, src_lengths):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (LongTensor): lengths of each source sentence of shape
`(batch)`
Returns:
dict:
- **encoder_out** (tuple): a tuple with two elements, where the
first element is the last encoder layer's output and the
second element is the same quantity summed with the input
embedding (used for attention). The shape of both tensors is
`(batch, src_len, embed_dim)`.
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
"""
# embed tokens and positions
x = self.embed_tokens(src_tokens) + self.embed_positions(src_tokens)
x = self.dropout_module(x)
input_embedding = x
# project to size of convolution
x = self.fc1(x)
# used to mask padding in input
encoder_padding_mask = src_tokens.eq(self.padding_idx).t() # -> T x B
if not encoder_padding_mask.any():
encoder_padding_mask = None
# B x T x C -> T x B x C
x = x.transpose(0, 1)
residuals = [x]
# temporal convolutions
for proj, conv, res_layer in zip(
self.projections, self.convolutions, self.residuals
):
if res_layer > 0:
residual = residuals[-res_layer]
residual = residual if proj is None else proj(residual)
else:
residual = None
if encoder_padding_mask is not None:
x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0)
x = self.dropout_module(x)
if conv.kernel_size[0] % 2 == 1:
# padding is implicit in the conv
x = conv(x)
else:
padding_l = (conv.kernel_size[0] - 1) // 2
padding_r = conv.kernel_size[0] // 2
x = F.pad(x, (0, 0, 0, 0, padding_l, padding_r))
x = conv(x)
x = F.glu(x, dim=2)
if residual is not None:
x = (x + residual) * math.sqrt(0.5)
residuals.append(x)
# T x B x C -> B x T x C
x = x.transpose(1, 0)
# project back to size of embedding
x = self.fc2(x)
if encoder_padding_mask is not None:
encoder_padding_mask = encoder_padding_mask.t() # -> B x T
x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0)
# scale gradients (this only affects backward, not forward)
x = GradMultiply.apply(x, 1.0 / (2.0 * self.num_attention_layers))
# add output to input embedding for attention
y = (x + input_embedding) * math.sqrt(0.5)
return {
"encoder_out": (x, y),
"encoder_padding_mask": encoder_padding_mask, # B x T
}
def reorder_encoder_out(self, encoder_out, new_order):
if encoder_out["encoder_out"] is not None:
encoder_out["encoder_out"] = (
encoder_out["encoder_out"][0].index_select(0, new_order),
encoder_out["encoder_out"][1].index_select(0, new_order),
)
if encoder_out["encoder_padding_mask"] is not None:
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(0, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return self.embed_positions.max_positions
class AttentionLayer(nn.Module):
def __init__(self, conv_channels, embed_dim, bmm=None):
super().__init__()
# projects from output of convolution to embedding dimension
self.in_projection = Linear(conv_channels, embed_dim)
# projects from embedding dimension to convolution size
self.out_projection = Linear(embed_dim, conv_channels)
self.bmm = bmm if bmm is not None else torch.bmm
def forward(self, x, target_embedding, encoder_out, encoder_padding_mask):
residual = x
# attention
x = (self.in_projection(x) + target_embedding) * math.sqrt(0.5)
x = self.bmm(x, encoder_out[0])
# don't attend over padding
if encoder_padding_mask is not None:
x = (
x.float()
.masked_fill(encoder_padding_mask.unsqueeze(1), float("-inf"))
.type_as(x)
) # FP16 support: cast to float and back
# softmax over last dim
sz = x.size()
x = F.softmax(x.view(sz[0] * sz[1], sz[2]), dim=1)
x = x.view(sz)
attn_scores = x
x = self.bmm(x, encoder_out[1])
# scale attention output (respecting potentially different lengths)
s = encoder_out[1].size(1)
if encoder_padding_mask is None:
x = x * (s * math.sqrt(1.0 / s))
else:
s = s - encoder_padding_mask.type_as(x).sum(
dim=1, keepdim=True
) # exclude padding
s = s.unsqueeze(-1)
x = x * (s * s.rsqrt())
# project back
x = (self.out_projection(x) + residual) * math.sqrt(0.5)
return x, attn_scores
def make_generation_fast_(self, beamable_mm_beam_size=None, **kwargs):
"""Replace torch.bmm with BeamableMM."""
if beamable_mm_beam_size is not None:
del self.bmm
self.add_module("bmm", BeamableMM(beamable_mm_beam_size))
class FConvDecoder(FairseqIncrementalDecoder):
"""Convolutional decoder"""
def __init__(
self,
dictionary,
embed_dim=512,
embed_dict=None,
out_embed_dim=256,
max_positions=1024,
convolutions=((512, 3),) * 20,
attention=True,
dropout=0.1,
share_embed=False,
positional_embeddings=True,
adaptive_softmax_cutoff=None,
adaptive_softmax_dropout=0.0,
):
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([2]))
self.dropout_module = FairseqDropout(
dropout, module_name=self.__class__.__name__
)
self.need_attn = True
convolutions = extend_conv_spec(convolutions)
in_channels = convolutions[0][0]
if isinstance(attention, bool):
# expand True into [True, True, ...] and do the same with False
attention = [attention] * len(convolutions)
if not isinstance(attention, list) or len(attention) != len(convolutions):
raise ValueError(
"Attention is expected to be a list of booleans of "
"length equal to the number of layers."
)
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
if embed_dict:
self.embed_tokens = utils.load_embedding(
embed_dict, self.dictionary, self.embed_tokens
)
self.embed_positions = (
PositionalEmbedding(
max_positions,
embed_dim,
padding_idx,
)
if positional_embeddings
else None
)
self.fc1 = Linear(embed_dim, in_channels, dropout=dropout)
self.projections = nn.ModuleList()
self.convolutions = nn.ModuleList()
self.attention = nn.ModuleList()
self.residuals = []
layer_in_channels = [in_channels]
for i, (out_channels, kernel_size, residual) in enumerate(convolutions):
if residual == 0:
residual_dim = out_channels
else:
residual_dim = layer_in_channels[-residual]
self.projections.append(
Linear(residual_dim, out_channels)
if residual_dim != out_channels
else None
)
self.convolutions.append(
LinearizedConv1d(
in_channels,
out_channels * 2,
kernel_size,
padding=(kernel_size - 1),
dropout=dropout,
)
)
self.attention.append(
AttentionLayer(out_channels, embed_dim) if attention[i] else None
)
self.residuals.append(residual)
in_channels = out_channels
layer_in_channels.append(out_channels)
self.adaptive_softmax = None
self.fc2 = self.fc3 = None
if adaptive_softmax_cutoff is not None:
assert not share_embed
self.adaptive_softmax = AdaptiveSoftmax(
num_embeddings,
in_channels,
adaptive_softmax_cutoff,
dropout=adaptive_softmax_dropout,
)
else:
self.fc2 = Linear(in_channels, out_embed_dim)
if share_embed:
assert out_embed_dim == embed_dim, (
"Shared embed weights implies same dimensions "
" out_embed_dim={} vs embed_dim={}".format(out_embed_dim, embed_dim)
)
self.fc3 = nn.Linear(out_embed_dim, num_embeddings)
self.fc3.weight = self.embed_tokens.weight
else:
self.fc3 = Linear(out_embed_dim, num_embeddings, dropout=dropout)
def forward(
self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused
):
if encoder_out is not None:
encoder_padding_mask = encoder_out["encoder_padding_mask"]
encoder_out = encoder_out["encoder_out"]
# split and transpose encoder outputs
encoder_a, encoder_b = self._split_encoder_out(
encoder_out, incremental_state
)
if self.embed_positions is not None:
pos_embed = self.embed_positions(prev_output_tokens, incremental_state)
else:
pos_embed = 0
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
x = self._embed_tokens(prev_output_tokens, incremental_state)
# embed tokens and combine with positional embeddings
x += pos_embed
x = self.dropout_module(x)
target_embedding = x
# project to size of convolution
x = self.fc1(x)
# B x T x C -> T x B x C
x = self._transpose_if_training(x, incremental_state)
# temporal convolutions
avg_attn_scores = None
num_attn_layers = len(self.attention)
residuals = [x]
for proj, conv, attention, res_layer in zip(
self.projections, self.convolutions, self.attention, self.residuals
):
if res_layer > 0:
residual = residuals[-res_layer]
residual = residual if proj is None else proj(residual)
else:
residual = None
x = self.dropout_module(x)
x = conv(x, incremental_state)
x = F.glu(x, dim=2)
# attention
if attention is not None:
x = self._transpose_if_training(x, incremental_state)
x, attn_scores = attention(
x, target_embedding, (encoder_a, encoder_b), encoder_padding_mask
)
if not self.training and self.need_attn:
attn_scores = attn_scores / num_attn_layers
if avg_attn_scores is None:
avg_attn_scores = attn_scores
else:
avg_attn_scores.add_(attn_scores)
x = self._transpose_if_training(x, incremental_state)
# residual
if residual is not None:
x = (x + residual) * math.sqrt(0.5)
residuals.append(x)
# T x B x C -> B x T x C
x = self._transpose_if_training(x, incremental_state)
# project back to size of vocabulary if not using adaptive softmax
if self.fc2 is not None and self.fc3 is not None:
x = self.fc2(x)
x = self.dropout_module(x)
x = self.fc3(x)
return x, avg_attn_scores
def reorder_incremental_state(self, incremental_state, new_order):
super().reorder_incremental_state(incremental_state, new_order)
encoder_out = utils.get_incremental_state(
self, incremental_state, "encoder_out"
)
if encoder_out is not None:
encoder_out = tuple(eo.index_select(0, new_order) for eo in encoder_out)
utils.set_incremental_state(
self, incremental_state, "encoder_out", encoder_out
)
def max_positions(self):
"""Maximum output length supported by the decoder."""
return (
self.embed_positions.max_positions
if self.embed_positions is not None
else float("inf")
)
def upgrade_state_dict(self, state_dict):
if utils.item(state_dict.get("decoder.version", torch.Tensor([1]))[0]) < 2:
# old models use incorrect weight norm dimension
for i, conv in enumerate(self.convolutions):
# reconfigure weight norm
nn.utils.remove_weight_norm(conv)
self.convolutions[i] = nn.utils.weight_norm(conv, dim=0)
state_dict["decoder.version"] = torch.Tensor([1])
return state_dict
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
def _embed_tokens(self, tokens, incremental_state):
if incremental_state is not None:
# keep only the last token for incremental forward pass
tokens = tokens[:, -1:]
return self.embed_tokens(tokens)
def _split_encoder_out(self, encoder_out, incremental_state):
"""Split and transpose encoder outputs.
This is cached when doing incremental inference.
"""
cached_result = utils.get_incremental_state(
self, incremental_state, "encoder_out"
)
if cached_result is not None:
return cached_result
# transpose only once to speed up attention layers
encoder_a, encoder_b = encoder_out
encoder_a = encoder_a.transpose(1, 2).contiguous()
result = (encoder_a, encoder_b)
if incremental_state is not None:
utils.set_incremental_state(self, incremental_state, "encoder_out", result)
return result
def _transpose_if_training(self, x, incremental_state):
if incremental_state is None:
x = x.transpose(0, 1)
return x
def extend_conv_spec(convolutions):
"""
Extends convolutional spec that is a list of tuples of 2 or 3 parameters
(kernel size, dim size and optionally how many layers behind to look for residual)
to default the residual propagation param if it is not specified
"""
extended = []
for spec in convolutions:
if len(spec) == 3:
extended.append(spec)
elif len(spec) == 2:
extended.append(spec + (1,))
else:
raise Exception(
"invalid number of parameters in convolution spec "
+ str(spec)
+ ". expected 2 or 3"
)
return tuple(extended)
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, 0, 0.1)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx):
m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx)
nn.init.normal_(m.weight, 0, 0.1)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, dropout=0.0):
"""Weight-normalized Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features)
nn.init.normal_(m.weight, mean=0, std=math.sqrt((1 - dropout) / in_features))
nn.init.constant_(m.bias, 0)
return nn.utils.weight_norm(m)
def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0.0, **kwargs):
"""Weight-normalized Conv1d layer optimized for decoding"""
m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
nn.init.normal_(m.weight, mean=0, std=std)
nn.init.constant_(m.bias, 0)
return nn.utils.weight_norm(m, dim=2)
def ConvTBC(in_channels, out_channels, kernel_size, dropout=0.0, **kwargs):
"""Weight-normalized Conv1d layer"""
from fairseq.modules import ConvTBC
m = ConvTBC(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
nn.init.normal_(m.weight, mean=0, std=std)
nn.init.constant_(m.bias, 0)
return nn.utils.weight_norm(m, dim=2)
@register_model_architecture("fconv", "fconv")
def base_architecture(args):
args.dropout = getattr(args, "dropout", 0.1)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_layers = getattr(args, "encoder_layers", "[(512, 3)] * 20")
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_layers = getattr(args, "decoder_layers", "[(512, 3)] * 20")
args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256)
args.decoder_attention = getattr(args, "decoder_attention", "True")
args.share_input_output_embed = getattr(args, "share_input_output_embed", False)
@register_model_architecture("fconv", "fconv_iwslt_de_en")
def fconv_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_layers = getattr(args, "encoder_layers", "[(256, 3)] * 4")
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256)
args.decoder_layers = getattr(args, "decoder_layers", "[(256, 3)] * 3")
args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256)
base_architecture(args)
@register_model_architecture("fconv", "fconv_wmt_en_ro")
def fconv_wmt_en_ro(args):
args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512)
base_architecture(args)
@register_model_architecture("fconv", "fconv_wmt_en_de")
def fconv_wmt_en_de(args):
convs = "[(512, 3)] * 9" # first 9 layers have 512 units
convs += " + [(1024, 3)] * 4" # next 4 layers have 1024 units
convs += " + [(2048, 1)] * 2" # final 2 layers use 1x1 convolutions
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.encoder_layers = getattr(args, "encoder_layers", convs)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 768)
args.decoder_layers = getattr(args, "decoder_layers", convs)
args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512)
base_architecture(args)
@register_model_architecture("fconv", "fconv_wmt_en_fr")
def fconv_wmt_en_fr(args):
convs = "[(512, 3)] * 6" # first 6 layers have 512 units
convs += " + [(768, 3)] * 4" # next 4 layers have 768 units
convs += " + [(1024, 3)] * 3" # next 3 layers have 1024 units
convs += " + [(2048, 1)] * 1" # next 1 layer uses 1x1 convolutions
convs += " + [(4096, 1)] * 1" # final 1 layer uses 1x1 convolutions
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.encoder_layers = getattr(args, "encoder_layers", convs)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 768)
args.decoder_layers = getattr(args, "decoder_layers", convs)
args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512)
base_architecture(args)
|
COCO-LM/fairseq/fairseq/models/fconv.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/fconv.py",
"repo_id": "COCO-LM",
"token_count": 13729
}
| 186 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq.models import register_model, register_model_architecture
from fairseq.models.nat import NATransformerModel
def _sequential_poisoning(s, V, beta=0.33, bos=2, eos=3, pad=1):
# s: input batch
# V: vocabulary size
rand_words = torch.randint(low=4, high=V, size=s.size(), device=s.device)
choices = torch.rand(size=s.size(), device=s.device)
choices.masked_fill_((s == pad) | (s == bos) | (s == eos), 1)
replace = choices < beta / 3
repeat = (choices >= beta / 3) & (choices < beta * 2 / 3)
swap = (choices >= beta * 2 / 3) & (choices < beta)
safe = choices >= beta
for i in range(s.size(1) - 1):
rand_word = rand_words[:, i]
next_word = s[:, i + 1]
self_word = s[:, i]
replace_i = replace[:, i]
swap_i = swap[:, i] & (next_word != 3)
repeat_i = repeat[:, i] & (next_word != 3)
safe_i = safe[:, i] | ((next_word == 3) & (~replace_i))
s[:, i] = (
self_word * (safe_i | repeat_i).long()
+ next_word * swap_i.long()
+ rand_word * replace_i.long()
)
s[:, i + 1] = (
next_word * (safe_i | replace_i).long()
+ self_word * (swap_i | repeat_i).long()
)
return s
def gumbel_noise(input, TINY=1e-8):
return (
input.new_zeros(*input.size())
.uniform_()
.add_(TINY)
.log_()
.neg_()
.add_(TINY)
.log_()
.neg_()
)
@register_model("iterative_nonautoregressive_transformer")
class IterNATransformerModel(NATransformerModel):
@staticmethod
def add_args(parser):
NATransformerModel.add_args(parser)
parser.add_argument(
"--train-step",
type=int,
help="number of refinement iterations during training",
)
parser.add_argument(
"--dae-ratio",
type=float,
help="the probability of switching to the denoising auto-encoder loss",
)
parser.add_argument(
"--stochastic-approx",
action="store_true",
help="sampling from the decoder as the inputs for next iteration",
)
@classmethod
def build_model(cls, args, task):
model = super().build_model(args, task)
model.train_step = getattr(args, "train_step", 4)
model.dae_ratio = getattr(args, "dae_ratio", 0.5)
model.stochastic_approx = getattr(args, "stochastic_approx", False)
return model
def forward(
self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs
):
B, T = prev_output_tokens.size()
# encoding
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
# length prediction
length_out = self.decoder.forward_length(
normalize=False, encoder_out=encoder_out
)
length_tgt = self.decoder.forward_length_prediction(
length_out, encoder_out, tgt_tokens
)
# decoding
word_ins_outs, word_ins_tgts, word_ins_masks = [], [], []
for t in range(self.train_step):
word_ins_out = self.decoder(
normalize=False,
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
step=t,
)
word_ins_tgt = tgt_tokens
word_ins_mask = word_ins_tgt.ne(self.pad)
word_ins_outs.append(word_ins_out)
word_ins_tgts.append(word_ins_tgt)
word_ins_masks.append(word_ins_mask)
if t < (self.train_step - 1):
# prediction for next iteration
if self.stochastic_approx:
word_ins_prediction = (
word_ins_out + gumbel_noise(word_ins_out)
).max(-1)[1]
else:
word_ins_prediction = word_ins_out.max(-1)[1]
prev_output_tokens = prev_output_tokens.masked_scatter(
word_ins_mask, word_ins_prediction[word_ins_mask]
)
if self.dae_ratio > 0:
# we do not perform denoising for the first iteration
corrputed = (
torch.rand(size=(B,), device=prev_output_tokens.device)
< self.dae_ratio
)
corrputed_tokens = _sequential_poisoning(
tgt_tokens[corrputed],
len(self.tgt_dict),
0.33,
self.bos,
self.eos,
self.pad,
)
prev_output_tokens[corrputed] = corrputed_tokens
# concat everything
word_ins_out = torch.cat(word_ins_outs, 0)
word_ins_tgt = torch.cat(word_ins_tgts, 0)
word_ins_mask = torch.cat(word_ins_masks, 0)
return {
"word_ins": {
"out": word_ins_out,
"tgt": word_ins_tgt,
"mask": word_ins_mask,
"ls": self.args.label_smoothing,
"nll_loss": True,
},
"length": {
"out": length_out,
"tgt": length_tgt,
"factor": self.decoder.length_loss_factor,
},
}
@register_model_architecture(
"iterative_nonautoregressive_transformer", "iterative_nonautoregressive_transformer"
)
def inat_base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.apply_bert_init = getattr(args, "apply_bert_init", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
# --- special arguments ---
args.sg_length_pred = getattr(args, "sg_length_pred", False)
args.pred_length_offset = getattr(args, "pred_length_offset", False)
args.length_loss_factor = getattr(args, "length_loss_factor", 0.1)
args.ngram_predictor = getattr(args, "ngram_predictor", 1)
args.src_embedding_copy = getattr(args, "src_embedding_copy", False)
args.train_step = getattr(args, "train_step", 4)
args.dae_ratio = getattr(args, "dae_ratio", 0.5)
args.stochastic_approx = getattr(args, "stochastic_approx", False)
@register_model_architecture(
"iterative_nonautoregressive_transformer",
"iterative_nonautoregressive_transformer_wmt_en_de",
)
def iter_nat_wmt_en_de(args):
inat_base_architecture(args)
|
COCO-LM/fairseq/fairseq/models/nat/iterative_nonautoregressive_transformer.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/nat/iterative_nonautoregressive_transformer.py",
"repo_id": "COCO-LM",
"token_count": 4214
}
| 187 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Tuple, List
import torch
import torch.nn.functional as F
from fairseq.models import FairseqEncoder
from fairseq.models.speech_to_text import (
ConvTransformerEncoder,
)
from fairseq.models.speech_to_text.utils import attention_suppression
from fairseq.models.speech_to_text.utils import (
lengths_to_encoder_padding_mask,
segments_to_sequence,
sequence_to_segments,
)
from fairseq.modules import MultiheadAttention, TransformerEncoderLayer
from torch import nn, Tensor
# ------------------------------------------------------------------------------
# AugmentedMemoryConvTransformerEncoder
# ------------------------------------------------------------------------------
class AugmentedMemoryConvTransformerEncoder(ConvTransformerEncoder):
def __init__(self, args):
super().__init__(args)
args.encoder_stride = self.stride()
self.left_context = args.left_context // args.encoder_stride
self.right_context = args.right_context // args.encoder_stride
self.left_context_after_stride = args.left_context // args.encoder_stride
self.right_context_after_stride = args.right_context // args.encoder_stride
self.transformer_layers = nn.ModuleList([])
self.transformer_layers.extend(
[
AugmentedMemoryTransformerEncoderLayer(args)
for i in range(args.encoder_layers)
]
)
def stride(self):
# Hard coded here. Should infer from convs in future
stride = 4
return stride
def forward(self, src_tokens, src_lengths, states=None):
"""Encode input sequence.
:param torch.Tensor xs: input tensor
:param torch.Tensor masks: input mask
:return: position embedded tensor and mask
:rtype Tuple[torch.Tensor, torch.Tensor]:
"""
bsz, max_seq_len, _ = src_tokens.size()
x = (
src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim)
.transpose(1, 2)
.contiguous()
)
x = self.conv(x)
bsz, _, output_seq_len, _ = x.size()
x = x.transpose(1, 2).transpose(0, 1).contiguous().view(output_seq_len, bsz, -1)
x = self.out(x)
x = self.embed_scale * x
subsampling_factor = 1.0 * max_seq_len / output_seq_len
input_lengths = torch.max(
(src_lengths.float() / subsampling_factor).ceil().long(),
x.size(0) * src_lengths.new_ones([src_lengths.size(0)]).long(),
)
encoder_padding_mask, _ = lengths_to_encoder_padding_mask(
input_lengths, batch_first=True
)
# TODO: fix positional embedding
positions = self.embed_positions(encoder_padding_mask).transpose(0, 1)
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
# State to store memory banks etc.
if states is None:
states = [
{"memory_banks": None, "encoder_states": None}
for i in range(len(self.transformer_layers))
]
for i, layer in enumerate(self.transformer_layers):
# x size:
# (self.left_size + self.segment_size + self.right_size)
# / self.stride, num_heads, dim
# TODO: Consider mask here
x = layer(x, states[i])
states[i]["encoder_states"] = x[
self.left_context_after_stride : -self.right_context_after_stride
]
lengths = (
(
~encoder_padding_mask[
:, self.left_context_after_stride : -self.right_context_after_stride
]
)
.sum(dim=1, keepdim=True)
.long()
)
return states[-1]["encoder_states"], lengths, states
# ------------------------------------------------------------------------------
# AugmentedMemoryTransformerEncoderLayer
# ------------------------------------------------------------------------------
class AugmentedMemoryTransformerEncoderLayer(TransformerEncoderLayer):
def __init__(self, args):
super().__init__(args)
self.left_context = args.left_context // args.encoder_stride
self.right_context = args.right_context // args.encoder_stride
def forward(self, x, state):
length, batch_size, x_dim = x.size()
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
# init_state
if state.get("memory_banks", None) is None:
state["memory_banks"] = []
# TODO reseach new sum_query method
seg_start = self.left_context
seg_end = length - self.right_context
if seg_start < seg_end:
summarization_query = torch.mean(x[seg_start:seg_end], keepdim=True, dim=0)
else:
summarization_query = x.new_zeros(1, batch_size, x_dim)
x = torch.cat([x, summarization_query], dim=0)
x = self.self_attn(input_and_summary=x, state=state)
x = self.dropout_module(x)
x = residual + x
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = residual + x
if not self.normalize_before:
x = self.final_layer_norm(x)
return x
def build_self_attention(self, embed_dim, args):
return AugmentedMemoryMultiheadAttention(
embed_dim=embed_dim,
num_heads=args.encoder_attention_heads,
dropout=args.attention_dropout,
self_attention=True,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
tanh_on_mem=True,
max_memory_size=args.max_memory_size,
)
# ------------------------------------------------------------------------------
# AugmentedMemoryMultiheadAttention
# ------------------------------------------------------------------------------
class AugmentedMemoryMultiheadAttention(MultiheadAttention):
"""
Augmented Memory Attention from
Streaming Transformer-based Acoustic Models
Using Self-attention with Augmented Memory
https://arxiv.org/abs/2005.08042
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
q_noise=0.0,
qn_block_size=8,
tanh_on_mem=False,
memory_dim=None,
std_scale=0.5, # 0.5 based on https://arxiv.org/abs/2005.09137
max_memory_size=-1,
disable_mem_on_mem_attn=True,
):
super().__init__(
embed_dim,
num_heads,
kdim,
vdim,
dropout,
bias,
add_bias_kv,
add_zero_attn,
self_attention,
encoder_decoder_attention,
q_noise,
qn_block_size,
)
self.memory_dim = memory_dim if memory_dim is not None else embed_dim
self.std_scale = std_scale
self.disable_mem_on_mem_attn = disable_mem_on_mem_attn
# This Operator was used for factorization in PySpeech
self.v2e = lambda x: x
if tanh_on_mem:
self.squash_mem = torch.tanh
self.nonlinear_squash_mem = True
else:
self.squash_mem = lambda x: x
self.nonlinear_squash_mem = False
self.max_memory_size = max_memory_size
def forward(self, input_and_summary, state):
"""
input: Encoder states of current segment with left or right context,
plus one summarization query
"""
length, batch_size, _ = input_and_summary.shape
length = length - 1 # not include sum_query, last index
memory = state["memory_banks"]
# TODO: positional embedding on memory
if self.max_memory_size > -1 and len(memory) > self.max_memory_size:
# TODO: need to fix here
if self.max_memory_size == 0:
memory = memory.new_zeros(1, memory.size(1), self.memory_dim)
else:
memory = memory[-self.max_memory_size :]
memory_and_input = torch.cat(memory + [input_and_summary[:-1]], dim=0)
input_and_sum_query = input_and_summary
q = self.q_proj(self.v2e(input_and_sum_query))
k = self.k_proj(self.v2e(memory_and_input))
v = self.v_proj(self.v2e(memory_and_input))
q = (
q.contiguous()
.view(-1, batch_size * self.num_heads, self.head_dim)
.transpose(0, 1)
* self.scaling
)
k = (
k.contiguous()
.view(-1, batch_size * self.num_heads, self.head_dim)
.transpose(0, 1)
)
v = (
v.contiguous()
.view(-1, batch_size * self.num_heads, self.head_dim)
.transpose(0, 1)
)
attention_weights = torch.bmm(q, k.transpose(1, 2))
if self.disable_mem_on_mem_attn:
attention_weights = self.suppress_mem_on_mem_attention(
batch_size, self.num_heads, len(memory), attention_weights
)
if self.std_scale is not None:
attention_weights = attention_suppression(attention_weights, self.std_scale)
assert list(attention_weights.shape) == [
batch_size * self.num_heads,
length + 1,
length + len(memory),
]
attention_weights = torch.nn.functional.softmax(
attention_weights.float(), dim=-1
).type_as(attention_weights)
attention_probs = self.dropout_module(attention_weights)
# [T, T, B, n_head] + [T, B, n_head, d_head] -> [T, B, n_head, d_head]
attention = torch.bmm(attention_probs, v)
assert list(attention.shape) == [
batch_size * self.num_heads,
length + 1,
self.head_dim,
]
attention = (
attention.transpose(0, 1)
.contiguous()
.view(length + 1, batch_size, self.embed_dim)
)
output_and_memory = self.out_proj(attention)
next_m = output_and_memory[-1:]
next_m = self.squash_mem(next_m)
output = output_and_memory[:-1]
state["memory_banks"].append(next_m)
return output
def suppress_mem_on_mem_attention(
self, B: int, num_heads: int, mem_size: int, attention_weight: Tensor
):
"""
Arguments:
- B: batch size
- num_heads: number of attention heads
- mem_size: size of memory bank
- attention_weight: a [B*num_heads, T + 1, T + mem_size] vector
Return:
modified attention_weight with [B*num_heads, -1, :mem_size] = -inf
"""
attention_weight[:, -1, :mem_size] = float("-inf")
return attention_weight
# ------------------------------------------------------------------------------
# SequenceEncoder
# ------------------------------------------------------------------------------
class SequenceEncoder(FairseqEncoder):
"""
SequenceEncoder encodes sequences.
More specifically, `src_tokens` and `src_lengths` in `forward()` should
describe a batch of "complete" sequences rather than segments.
Segment-by-segment inference can be triggered by `segment_size`:
1) `segment_size` is None:
SequenceEncoder treats the input sequence as one single segment.
2) `segment_size` is not None (some int instead):
SequenceEncoder does the following:
1. breaks the input sequence into several segments
2. inference on each segment and collect the outputs
3. concatanete segment outputs into the output sequence.
Note that `segment_size` here shouldn't include additional left/right
contexts needed, for example if we wish to infer with LC-BLSTM where the
middle chunk size is 100 and right context is 20, `segment_size` should be
100.
"""
def __init__(self, args, module):
super().__init__(None)
self.module = module
self.input_time_axis = 1
self.output_time_axis = 0
self.segment_size = args.segment_size
self.left_context = args.left_context
self.right_context = args.right_context
def forward(
self,
src_tokens: Tensor,
src_lengths: Tensor,
states=None,
):
seg_src_tokens_lengths = sequence_to_segments(
sequence=src_tokens,
time_axis=self.input_time_axis,
lengths=src_lengths,
segment_size=self.segment_size,
extra_left_context=self.left_context,
extra_right_context=self.right_context,
)
seg_encoder_states_lengths: List[Tuple[Tensor, Tensor]] = []
for seg_src_tokens, seg_src_lengths in seg_src_tokens_lengths:
(seg_encoder_states, seg_enc_lengths, states) = self.module(
seg_src_tokens,
seg_src_lengths,
states=states,
)
seg_encoder_states_lengths.append((seg_encoder_states, seg_enc_lengths))
encoder_out, enc_lengths = segments_to_sequence(
segments=seg_encoder_states_lengths, time_axis=self.output_time_axis
)
encoder_padding_mask, _ = lengths_to_encoder_padding_mask(
enc_lengths, batch_first=True
)
if not encoder_padding_mask.any():
encoder_padding_mask = None
return {
"encoder_out": [encoder_out],
"encoder_padding_mask": [encoder_padding_mask],
"encoder_embedding": [],
"encoder_states": [states],
"src_tokens": [],
"src_lengths": [],
}
def incremental_encode(
self,
seg_src_tokens: Tensor,
seg_src_lengths: Tensor,
states=None,
):
"""
Different from forward function, this function takes segmented speech
as input, and append encoder states to previous states
"""
(seg_encoder_states, seg_enc_lengths, states) = self.module(
seg_src_tokens,
seg_src_lengths,
states=states,
)
return seg_encoder_states, seg_enc_lengths, states
# ------------------------------------------------------------------------------
# Augmented memory model decorator
# ------------------------------------------------------------------------------
def augmented_memory(klass):
class StreamSeq2SeqModel(klass):
@staticmethod
def add_args(parser):
super(StreamSeq2SeqModel, StreamSeq2SeqModel).add_args(parser)
parser.add_argument(
"--segment-size", type=int, required=True, help="Length of the segment."
)
parser.add_argument(
"--left-context",
type=int,
default=0,
help="Left context for the segment.",
)
parser.add_argument(
"--right-context",
type=int,
default=0,
help="Right context for the segment.",
)
parser.add_argument(
"--max-memory-size",
type=int,
default=-1,
help="Right context for the segment.",
)
StreamSeq2SeqModel.__name__ = klass.__name__
return StreamSeq2SeqModel
|
COCO-LM/fairseq/fairseq/models/speech_to_text/modules/augmented_memory_attention.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/speech_to_text/modules/augmented_memory_attention.py",
"repo_id": "COCO-LM",
"token_count": 7411
}
| 188 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import operator
import torch
import torch.nn.functional as F
from fairseq.modules.fairseq_dropout import FairseqDropout
from fairseq.modules.quant_noise import quant_noise
from torch import nn
class TiedLinear(nn.Module):
def __init__(self, weight, transpose):
super().__init__()
self.weight = weight
self.transpose = transpose
def forward(self, input):
return F.linear(input, self.weight.t() if self.transpose else self.weight)
class TiedHeadModule(nn.Module):
def __init__(self, weights, input_dim, num_classes, q_noise, qn_block_size):
super().__init__()
tied_emb, _ = weights
self.num_words, emb_dim = tied_emb.size()
self.word_proj = quant_noise(
TiedLinear(tied_emb, transpose=False), q_noise, qn_block_size
)
if input_dim != emb_dim:
self.word_proj = nn.Sequential(
quant_noise(
nn.Linear(input_dim, emb_dim, bias=False), q_noise, qn_block_size
),
self.word_proj,
)
self.class_proj = quant_noise(
nn.Linear(input_dim, num_classes, bias=False), q_noise, qn_block_size
)
self.out_dim = self.num_words + num_classes
self.register_buffer("_float_tensor", torch.FloatTensor(1))
def forward(self, input):
inp_sz = functools.reduce(operator.mul, input.shape[:-1], 1)
out = self._float_tensor.new(inp_sz, self.out_dim)
out[:, : self.num_words] = self.word_proj(input.view(inp_sz, -1))
out[:, self.num_words :] = self.class_proj(input.view(inp_sz, -1))
return out
class AdaptiveSoftmax(nn.Module):
"""
This is an implementation of the efficient softmax approximation for
graphical processing units (GPU), described in the paper "Efficient softmax
approximation for GPUs" (http://arxiv.org/abs/1609.04309).
"""
def __init__(
self,
vocab_size,
input_dim,
cutoff,
dropout,
factor=4.0,
adaptive_inputs=None,
tie_proj=False,
q_noise=0,
qn_block_size=8,
):
super().__init__()
if vocab_size > cutoff[-1]:
cutoff = cutoff + [vocab_size]
else:
assert (
vocab_size == cutoff[-1]
), "cannot specify cutoff larger than vocab size"
output_dim = cutoff[0] + len(cutoff) - 1
self.vocab_size = vocab_size
self.cutoff = cutoff
self.dropout_module = FairseqDropout(
dropout, module_name=self.__class__.__name__
)
self.input_dim = input_dim
self.factor = factor
self.q_noise = q_noise
self.qn_block_size = qn_block_size
self.lsm = nn.LogSoftmax(dim=1)
if adaptive_inputs is not None:
self.head = TiedHeadModule(
adaptive_inputs.weights_for_band(0),
input_dim,
len(cutoff) - 1,
self.q_noise,
self.qn_block_size,
)
else:
self.head = quant_noise(
nn.Linear(input_dim, output_dim, bias=False),
self.q_noise,
self.qn_block_size,
)
self._make_tail(adaptive_inputs, tie_proj)
def init_weights(m):
if (
hasattr(m, "weight")
and not isinstance(m, TiedLinear)
and not isinstance(m, TiedHeadModule)
):
nn.init.xavier_uniform_(m.weight)
self.apply(init_weights)
self.register_buffer("version", torch.LongTensor([1]))
def _make_tail(self, adaptive_inputs=None, tie_proj=False):
self.tail = nn.ModuleList()
for i in range(len(self.cutoff) - 1):
dim = int(self.input_dim // self.factor ** (i + 1))
tied_emb, tied_proj = (
adaptive_inputs.weights_for_band(i + 1)
if adaptive_inputs is not None
else (None, None)
)
if tied_proj is not None:
if tie_proj:
proj = quant_noise(
TiedLinear(tied_proj, transpose=True),
self.q_noise,
self.qn_block_size,
)
else:
proj = quant_noise(
nn.Linear(tied_proj.size(0), tied_proj.size(1), bias=False),
self.q_noise,
self.qn_block_size,
)
else:
proj = quant_noise(
nn.Linear(self.input_dim, dim, bias=False),
self.q_noise,
self.qn_block_size,
)
if tied_emb is None:
out_proj = nn.Linear(
dim, self.cutoff[i + 1] - self.cutoff[i], bias=False
)
else:
out_proj = TiedLinear(tied_emb, transpose=False)
m = nn.Sequential(
proj,
nn.Dropout(self.dropout_module.p),
quant_noise(out_proj, self.q_noise, self.qn_block_size),
)
self.tail.append(m)
def upgrade_state_dict_named(self, state_dict, name):
version_name = name + ".version"
if version_name not in state_dict:
raise Exception("This version of the model is no longer supported")
def adapt_target(self, target):
"""
In order to be efficient, the AdaptiveSoftMax does not compute the
scores for all the word of the vocabulary for all the examples. It is
thus necessary to call the method adapt_target of the AdaptiveSoftMax
layer inside each forward pass.
"""
target = target.view(-1)
new_target = [target.clone()]
target_idxs = []
for i in range(len(self.cutoff) - 1):
mask = target.ge(self.cutoff[i]).mul(target.lt(self.cutoff[i + 1]))
new_target[0][mask] = self.cutoff[0] + i
if mask.any():
target_idxs.append(mask.nonzero(as_tuple=False).squeeze(1))
new_target.append(target[mask].add(-self.cutoff[i]))
else:
target_idxs.append(None)
new_target.append(None)
return new_target, target_idxs
def forward(self, input, target):
"""
Args:
input: (b x t x d)
target: (b x t)
Returns:
2 lists: output for each cutoff section and new targets by cut off
"""
input = input.contiguous().view(-1, input.size(-1))
input = self.dropout_module(input)
new_target, target_idxs = self.adapt_target(target)
output = [self.head(input)]
for i in range(len(target_idxs)):
if target_idxs[i] is not None:
output.append(self.tail[i](input.index_select(0, target_idxs[i])))
else:
output.append(None)
return output, new_target
def get_log_prob(self, input, target):
"""
Computes the log probabilities for all the words of the vocabulary,
given a 2D tensor of hidden vectors.
"""
bsz, length, dim = input.size()
input = input.contiguous().view(-1, dim)
if target is not None:
_, target_idxs = self.adapt_target(target)
else:
target_idxs = None
head_y = self.head(input)
log_probs = head_y.new_zeros(input.size(0), self.vocab_size)
head_sz = self.cutoff[0] + len(self.tail)
log_probs[:, :head_sz] = self.lsm(head_y)
tail_priors = log_probs[:, self.cutoff[0] : head_sz].clone()
for i in range(len(self.tail)):
start = self.cutoff[i]
end = self.cutoff[i + 1]
if target_idxs is None:
tail_out = log_probs[:, start:end]
tail_out.copy_(self.tail[i](input))
log_probs[:, start:end] = self.lsm(tail_out).add_(
tail_priors[:, i, None]
)
elif target_idxs[i] is not None:
idxs = target_idxs[i]
tail_out = log_probs[idxs, start:end]
tail_out.copy_(self.tail[i](input[idxs]))
log_probs[idxs, start:end] = self.lsm(tail_out).add_(
tail_priors[idxs, i, None]
)
log_probs = log_probs.view(bsz, length, -1)
return log_probs
|
COCO-LM/fairseq/fairseq/modules/adaptive_softmax.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/adaptive_softmax.py",
"repo_id": "COCO-LM",
"token_count": 4613
}
| 189 |
#include <torch/torch.h>
#include <vector>
std::vector<float*> dynamicconv_cpu_forward(
float* input,
float* filters,
int padding_l);
std::vector<float*> dynamicconv_cpu_backward(
float* gradOutput,
int padding_l,
float* input,
float* filters);
std::vector<float*> dynamicconv_forward(
float* input,
float* filters,
int padding_l) {
return dynamicconv_cpu_forward(input, filters, padding_l);
}
std::vector<float*> dynamicconv_backward(
float* gradOutput,
int padding_l,
float* input,
float* filters) {
return dynamicconv_cpu_backward(gradOutput, padding_l, input, filters);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &dynamicconv_forward, "dynamicconv forward (CPU)");
m.def("backward", &dynamicconv_backward, "dynamicconv backward (CPU)");
}
|
COCO-LM/fairseq/fairseq/modules/dynamicconv_layer/dynamiconv_cpu.cpp/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/dynamicconv_layer/dynamiconv_cpu.cpp",
"repo_id": "COCO-LM",
"token_count": 327
}
| 190 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import lightconv_cuda
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.modules.fairseq_dropout import FairseqDropout
from torch import nn
from torch.autograd import Function
class lightconvFunction(Function):
@staticmethod
def forward(ctx, x, weights, padding_l):
ctx.padding_l = padding_l
outputs = lightconv_cuda.forward(x, weights, padding_l)
variables = [x, weights]
ctx.save_for_backward(*variables)
return outputs[0]
@staticmethod
def backward(ctx, grad_output):
outputs = lightconv_cuda.backward(
grad_output.contiguous(), ctx.padding_l, *ctx.saved_tensors
)
grad_input, grad_weights = outputs
return grad_input, grad_weights, None
@with_incremental_state
class LightconvLayer(nn.Module):
def __init__(
self,
input_size,
kernel_size=1,
padding_l=None,
weight_softmax=False,
num_heads=1,
weight_dropout=0.0,
bias=False,
):
super(LightconvLayer, self).__init__()
self.input_size = input_size
self.kernel_size = kernel_size
self.padding_l = padding_l
self.num_heads = num_heads
self.weight_softmax = weight_softmax
self.weight_dropout_module = FairseqDropout(
weight_dropout, module_name=self.__class__.__name__
)
self.weight = nn.Parameter(torch.Tensor(num_heads, kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(input_size))
else:
self.bias = None
self.reset_parameters()
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
for k, v in state_dict.items():
if k.endswith(prefix + "weight"):
if v.dim() == 3 and v.size(1) == 1:
state_dict[k] = v.squeeze(1)
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight)
if self.bias is not None:
nn.init.constant_(self.bias, 0.0)
def forward(self, x, incremental_state=None):
# during inference time, incremental BMM is faster
if incremental_state is not None:
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
R = C // H
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is None:
input_buffer = x.new()
x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3)
if self.kernel_size > 1:
self._set_input_buffer(
incremental_state, x_unfold[:, :, :, -self.kernel_size + 1 :]
)
x_unfold = x_unfold.view(T * B * H, R, -1)
weight = self.weight
if self.weight_softmax:
weight = F.softmax(weight.float(), dim=1).type_as(weight)
weight = weight[:, -x_unfold.size(2) :]
K = weight.size(1)
weight = (
weight.view(1, H, K)
.expand(T * B, H, K)
.contiguous()
.view(T * B * H, K, 1)
)
weight = self.weight_dropout_module(weight)
output = torch.bmm(x_unfold, weight) # T*B*H x R x 1
output = output.view(T, B, C)
return output
# during training time, use CUDA kernel
else:
x = x.permute(1, 2, 0).contiguous()
weight = self.weight
if self.weight_softmax:
weight = F.softmax(self.weight, -1)
if self.weight_dropout_module.p:
weight = self.weight_dropout_module(weight)
return lightconvFunction.apply(x, weight, self.padding_l).permute(2, 0, 1)
def reorder_incremental_state(self, incremental_state, new_order):
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
input_buffer = input_buffer.index_select(1, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state):
return utils.get_incremental_state(self, incremental_state, "input_buffer")
def _set_input_buffer(self, incremental_state, new_buffer):
return utils.set_incremental_state(
self, incremental_state, "input_buffer", new_buffer
)
def half(self):
return self._apply(lambda t: t.half() if t.is_floating_point() else t)
|
COCO-LM/fairseq/fairseq/modules/lightconv_layer/lightconv_layer.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/lightconv_layer/lightconv_layer.py",
"repo_id": "COCO-LM",
"token_count": 2251
}
| 191 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
def parse_config_yaml(yaml_data):
# Initialize to default options.
quantization_options = {
"n_centroids": {
"Linear": ["in_features", {"*": 256}],
"Embedding": ["embedding_dim", {"*": 256}],
},
"block_sizes": {
"Linear": ["fuzzy_name", {"fc": 8, "attn": 4, "emb": 4}],
"Embedding": ["fuzzy_name", {"emb": 8}],
},
"layers_to_quantize": [
"decoder\\.layers\\.\\d+\\.fc[12]",
"decoder\\.embed_tokens\\.embeddings\\.[012]\\.[01]",
"decoder\\.layers\\.\\d+\\.self_attn\\.(k_proj|v_proj|q_proj|out_proj)",
],
}
if "n_centroids" in yaml_data:
quantization_options["n_centroids"] = {
layer: convert_yaml_to_tuple(layer_data)
for layer, layer_data in yaml_data["n_centroids"].items()
}
if "block_sizes" in yaml_data:
quantization_options["block_sizes"] = {
layer: convert_yaml_to_tuple(layer_data)
for layer, layer_data in yaml_data["block_sizes"].items()
}
if "layers_to_quantize" in yaml_data:
quantization_options["layers_to_quantize"] = yaml_data["layers_to_quantize"]
return quantization_options
def convert_yaml_to_tuple(yaml_dictionary):
"""Converts a yaml dictionary with two keys: `key` and `value` into a two
argument tuple of those values."""
return (yaml_dictionary["key"], yaml_dictionary["value"])
|
COCO-LM/fairseq/fairseq/modules/quantization/quantization_options.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/quantization/quantization_options.py",
"repo_id": "COCO-LM",
"token_count": 763
}
| 192 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, Tuple
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.modules import (
FairseqDropout,
LayerDropModuleList,
LayerNorm,
MultiheadAttention,
SelfMultiheadAttention,
PositionalEmbedding,
TransformerSentenceEncoderLayer,
)
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
def init_bert_params(module):
"""
Initialize the weights specific to the BERT Model.
This overrides the default initializations depending on the specified arguments.
1. If normal_init_linear_weights is set then weights of linear
layer will be initialized using the normal distribution and
bais will be set to the specified value.
2. If normal_init_embed_weights is set then weights of embedding
layer will be initialized using the normal distribution.
3. If normal_init_proj_weights is set then weights of
in_project_weight for MultiHeadAttention initialized using
the normal distribution (to be validated).
"""
def normal_(data):
# with FSDP, module params will be on CUDA, so we cast them back to CPU
# so that the RNG is consistent with and without FSDP
data.copy_(
data.cpu().normal_(mean=0.0, std=0.02).to(data.device)
)
if isinstance(module, nn.Linear):
normal_(module.weight.data)
if module.bias is not None:
module.bias.data.zero_()
if isinstance(module, nn.Embedding):
normal_(module.weight.data)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
if isinstance(module, MultiheadAttention):
normal_(module.in_proj_weight.data)
# normal_(module.q_proj.weight.data)
# normal_(module.k_proj.weight.data)
# normal_(module.v_proj.weight.data)
def relative_position_bucket(relative_position, num_buckets=32, max_distance=128):
sign = torch.sign(relative_position)
num_buckets //= 2
n = torch.abs(relative_position)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = n < max_exact
max_bucket_val = num_buckets - 1 - max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
val_if_large = max_exact + torch.ceil(
torch.log(n.float() / max_exact) / math.log((max_distance - 1) / max_exact) * (max_bucket_val)
).long()
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret = torch.where(is_small, n, val_if_large) * sign
return ret
class TransformerSentenceEncoder(nn.Module):
"""
Implementation for a Bi-directional Transformer based Sentence Encoder used
in BERT/XLM style pre-trained models.
This first computes the token embedding using the token embedding matrix,
position embeddings (if specified) and segment embeddings
(if specified). After applying the specified number of
TransformerEncoderLayers, it outputs all the internal states of the
encoder as well as the final representation associated with the first
token (usually CLS token).
Input:
- tokens: B x T matrix representing sentences
- segment_labels: B x T matrix representing segment label for tokens
Output:
- a tuple of the following:
- a list of internal model states used to compute the
predictions where each tensor has shape T x B x C
- sentence representation associated with first input token
in format B x C.
"""
def __init__(
self,
padding_idx: int,
vocab_size: int,
num_encoder_layers: int = 6,
embedding_dim: int = 768,
ffn_embedding_dim: int = 3072,
num_attention_heads: int = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
layerdrop: float = 0.0,
max_seq_len: int = 256,
num_segments: int = 2,
use_position_embeddings: bool = True,
offset_positions_by_padding: bool = True,
encoder_normalize_before: bool = False,
apply_bert_init: bool = False,
activation_fn: str = "relu",
learned_pos_embedding: bool = True,
embed_scale: float = None,
freeze_embeddings: bool = False,
n_trans_layers_to_freeze: int = 0,
export: bool = False,
traceable: bool = False,
q_noise: float = 0.0,
qn_block_size: int = 8,
share_embed_tokens: object = None,
share_embed_positions: object = None,
share_emb_layer_norm: object = None,
shared_embedding_dim: int = 768,
rel_pos: int = 0,
rel_pos_bins: int = 32,
max_rel_pos: int = 128,
checkpoint_activations: bool = False,
offload_activations: bool = False
) -> None:
super().__init__()
self.padding_idx = padding_idx
self.vocab_size = vocab_size
self.dropout_module = FairseqDropout(
dropout, module_name=self.__class__.__name__
)
self.layerdrop = layerdrop
self.max_seq_len = max_seq_len
self.embedding_dim = embedding_dim
self.num_segments = num_segments
self.use_position_embeddings = use_position_embeddings
self.apply_bert_init = apply_bert_init
self.learned_pos_embedding = learned_pos_embedding
self.traceable = traceable
self.shared_embedding_dim = shared_embedding_dim
self.num_attention_heads = num_attention_heads
self.checkpoint_activations = checkpoint_activations
self.offload_activations = offload_activations
self.embed_linear = None
if share_embed_tokens is None:
self.embed_tokens = nn.Embedding(
self.vocab_size, self.embedding_dim, self.padding_idx
)
else:
self.embed_tokens = share_embed_tokens
if self.shared_embedding_dim != self.embedding_dim:
self.embed_linear = nn.Linear(self.shared_embedding_dim, self.embedding_dim)
if share_embed_positions is None:
self.embed_positions = nn.Embedding(self.max_seq_len, self.embedding_dim)
else:
self.embed_positions = share_embed_positions
# self.embed_tokens = self.build_embedding(
# self.vocab_size, self.embedding_dim, self.padding_idx
# )
self.embed_scale = embed_scale
if q_noise > 0:
self.quant_noise = apply_quant_noise_(
nn.Linear(self.embedding_dim, self.embedding_dim, bias=False),
q_noise,
qn_block_size,
)
else:
self.quant_noise = None
self.segment_embeddings = (
nn.Embedding(self.num_segments, self.embedding_dim, padding_idx=None)
if self.num_segments > 0
else None
)
if share_emb_layer_norm is None:
self.emb_layer_norm = LayerNorm(self.embedding_dim, export=export)
else:
self.emb_layer_norm = share_emb_layer_norm
if self.layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend(
[
self.build_transformer_sentence_encoder_layer(
embedding_dim=self.embedding_dim,
ffn_embedding_dim=ffn_embedding_dim,
num_attention_heads=num_attention_heads,
dropout=self.dropout_module.p,
attention_dropout=attention_dropout,
activation_dropout=activation_dropout,
activation_fn=activation_fn,
export=export,
q_noise=q_noise,
qn_block_size=qn_block_size,
encoder_normalize_before=encoder_normalize_before,
)
for _ in range(num_encoder_layers)
]
)
# Apply initialization of model params after building the model
if self.apply_bert_init:
self.apply(init_bert_params)
def freeze_module_params(m):
if m is not None:
for p in m.parameters():
p.requires_grad = False
if freeze_embeddings:
freeze_module_params(self.embed_tokens)
freeze_module_params(self.segment_embeddings)
freeze_module_params(self.embed_positions)
freeze_module_params(self.emb_layer_norm)
for layer in range(n_trans_layers_to_freeze):
freeze_module_params(self.layers[layer])
self.rel_pos = True if rel_pos == 1 else False
if self.rel_pos:
assert rel_pos_bins % 2 == 0
self.rel_pos_bins = rel_pos_bins
self.max_rel_pos = max_rel_pos
self.relative_attention_bias = nn.Embedding(self.rel_pos_bins, self.num_attention_heads)
seq_len = self.max_seq_len
context_position = torch.arange(seq_len, dtype=torch.long)[:, None]
memory_position = torch.arange(seq_len, dtype=torch.long)[None, :]
relative_position = memory_position - context_position
self.rp_bucket = relative_position_bucket(
relative_position,
num_buckets=self.rel_pos_bins,
max_distance=self.max_rel_pos
)
self.rp_bucket -= self.rp_bucket.min()
def get_rel_pos_bias(self, x):
# Assume the input is ordered. If your input token is permuted, you may need to update this accordingly
if self.rp_bucket.device != x.device:
self.rp_bucket = self.rp_bucket.to(x.device)
seq_len = x.size(1)
rp_bucket = self.rp_bucket[:seq_len, :seq_len]
values = F.embedding(rp_bucket, self.relative_attention_bias.weight)
values = values.permute([2, 0, 1])
return values.contiguous()
def build_embedding(self, vocab_size, embedding_dim, padding_idx):
return nn.Embedding(vocab_size, embedding_dim, padding_idx)
def build_transformer_sentence_encoder_layer(
self,
embedding_dim,
ffn_embedding_dim,
num_attention_heads,
dropout,
attention_dropout,
activation_dropout,
activation_fn,
export,
q_noise,
qn_block_size,
encoder_normalize_before,
):
layer = TransformerSentenceEncoderLayer(
embedding_dim=embedding_dim,
ffn_embedding_dim=ffn_embedding_dim,
num_attention_heads=num_attention_heads,
dropout=dropout,
attention_dropout=attention_dropout,
activation_dropout=activation_dropout,
activation_fn=activation_fn,
export=export,
q_noise=q_noise,
qn_block_size=qn_block_size,
encoder_normalize_before=encoder_normalize_before,
)
if self.checkpoint_activations:
layer = checkpoint_wrapper(layer, offload_to_cpu=self.offload_activations)
return layer
def forward(
self,
tokens: torch.Tensor,
segment_labels: torch.Tensor = None,
last_state_only: bool = False,
positions: Optional[torch.Tensor] = None,
token_embeddings: Optional[torch.Tensor] = None,
attn_mask: Optional[torch.Tensor] = None,
use_ext_padding_mask: bool = False,
padding_mask: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
is_tpu = tokens.device.type == "xla"
# compute padding mask. This is needed for multi-head attention
if is_tpu or self.traceable:
padding_mask = tokens.eq(self.padding_idx)
elif use_ext_padding_mask:
padding_mask = padding_mask
else:
padding_mask = tokens.eq(self.padding_idx)
if not padding_mask.any():
padding_mask = None
if token_embeddings is not None:
x = token_embeddings
else:
x = self.embed_tokens(tokens)
if self.embed_linear is not None:
x = self.embed_linear(x)
if self.embed_scale is not None:
x *= self.embed_scale
seq_len = tokens.size(1)
if self.embed_positions is not None:
weight = self.embed_positions.weight[:seq_len, :]
x += weight
if self.segment_embeddings is not None and segment_labels is not None:
x += self.segment_embeddings(segment_labels)
if self.quant_noise is not None:
x = self.quant_noise(x)
if self.emb_layer_norm is not None:
x = self.emb_layer_norm(x)
x = self.dropout_module(x)
# account for padding while computing the representation
if padding_mask is not None:
x = x * (1 - padding_mask.unsqueeze(-1).type_as(x))
# B x T x C -> T x B x C
x = x.transpose(0, 1)
inner_states = []
if not last_state_only:
inner_states.append(x)
rel_pos_bias = self.get_rel_pos_bias(tokens).repeat(tokens.size(0), 1, 1) if self.rel_pos else None
if attn_mask is None:
attn_mask = rel_pos_bias
else:
attn_mask += rel_pos_bias
if attn_mask is not None and padding_mask is not None:
# merge key_padding_mask and attn_mask
attn_mask = attn_mask.view(tokens.size(0), -1, seq_len, seq_len)
attn_mask.masked_fill_(
padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf")
)
attn_mask = attn_mask.view(-1, seq_len, seq_len)
padding_mask = None
for layer in self.layers:
x, _ = layer(x, self_attn_padding_mask=padding_mask, attn_bias=attn_mask)
if not last_state_only:
inner_states.append(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
sentence_rep = x[:, 0, :]
if last_state_only:
inner_states = [x]
if self.traceable:
return torch.stack(inner_states), sentence_rep
else:
return inner_states, sentence_rep
|
COCO-LM/fairseq/fairseq/modules/transformer_sentence_encoder.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/transformer_sentence_encoder.py",
"repo_id": "COCO-LM",
"token_count": 6886
}
| 193 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
class DynamicLossScaler(object):
def __init__(
self,
init_scale=2.0 ** 15,
scale_factor=2.0,
scale_window=2000,
tolerance=0.0,
threshold=None,
min_loss_scale=1e-4,
):
self.loss_scale = init_scale
self.scale_factor = scale_factor
self.scale_window = scale_window
self.tolerance = tolerance
self.threshold = threshold
self._iter = 0
self._last_overflow_iter = -1
self._last_rescale_iter = -1
self._overflows_since_rescale = 0
self.min_loss_scale = min_loss_scale
def scale(self, outputs):
return self.loss_scale * outputs
def update(self):
if (self._iter - self._last_overflow_iter) % self.scale_window == 0:
self.loss_scale *= self.scale_factor
self._last_rescale_iter = self._iter
self._iter += 1
def _decrease_loss_scale(self):
self.loss_scale /= self.scale_factor
if self.threshold is not None:
self.loss_scale = max(self.loss_scale, self.threshold)
def check_overflow(self, grad_norm):
# detect inf and nan
if grad_norm == float("inf") or grad_norm != grad_norm:
# overflow has occured
prev_scale = self.loss_scale
iter_since_rescale = self._iter - self._last_rescale_iter
self._last_overflow_iter = self._iter
self._overflows_since_rescale += 1
pct_overflow = self._overflows_since_rescale / float(iter_since_rescale)
if pct_overflow >= self.tolerance:
self._decrease_loss_scale()
self._last_rescale_iter = self._iter
self._overflows_since_rescale = 0
if self.loss_scale <= self.min_loss_scale:
# Use FloatingPointError as an uncommon error that parent
# functions can safely catch to stop training.
self.loss_scale = prev_scale
raise FloatingPointError(
(
"Minimum loss scale reached ({}). Your loss is probably exploding. "
"Try lowering the learning rate, using gradient clipping or "
"increasing the batch size."
).format(self.min_loss_scale)
)
self._iter += 1
raise OverflowError("setting loss scale to: " + str(self.loss_scale))
|
COCO-LM/fairseq/fairseq/optim/dynamic_loss_scaler.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/optim/dynamic_loss_scaler.py",
"repo_id": "COCO-LM",
"token_count": 1216
}
| 194 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections.abc import Collection
from dataclasses import dataclass, field
from typing import List
import torch
from fairseq.dataclass import FairseqDataclass
from omegaconf import II, DictConfig
from torch.optim.optimizer import Optimizer, required
from . import FairseqOptimizer, register_optimizer
@dataclass
class FairseqNAGConfig(FairseqDataclass):
momentum: float = field(default=0.99, metadata={"help": "momentum factor"})
weight_decay: float = field(default=0.0, metadata={"help": "weight decay"})
# TODO common vars in parent class
lr: List[float] = II("optimization.lr")
@register_optimizer("nag", dataclass=FairseqNAGConfig)
class FairseqNAG(FairseqOptimizer):
def __init__(self, cfg: DictConfig, params):
super().__init__(cfg)
self._optimizer = NAG(params, **self.optimizer_config)
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
"lr": self.cfg.lr[0]
if isinstance(self.cfg.lr, Collection)
else self.cfg.lr,
"momentum": self.cfg.momentum,
"weight_decay": self.cfg.weight_decay,
}
class NAG(Optimizer):
def __init__(self, params, lr=required, momentum=0, weight_decay=0):
defaults = dict(lr=lr, lr_old=lr, momentum=momentum, weight_decay=weight_decay)
super(NAG, self).__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return True
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group["weight_decay"]
momentum = group["momentum"]
lr = group["lr"]
lr_old = group.get("lr_old", lr)
lr_correct = lr / lr_old if lr_old > 0 else lr
for p in group["params"]:
if p.grad is None:
continue
p_data_fp32 = p.data
if p_data_fp32.dtype in {torch.float16, torch.bfloat16}:
p_data_fp32 = p_data_fp32.float()
d_p = p.grad.data.float()
param_state = self.state[p]
if "momentum_buffer" not in param_state:
param_state["momentum_buffer"] = torch.zeros_like(d_p)
else:
param_state["momentum_buffer"] = param_state["momentum_buffer"].to(
d_p
)
buf = param_state["momentum_buffer"]
if weight_decay != 0:
p_data_fp32.mul_(1 - lr * weight_decay)
p_data_fp32.add_(buf, alpha=momentum * momentum * lr_correct)
p_data_fp32.add_(d_p, alpha=-(1 + momentum) * lr)
buf.mul_(momentum * lr_correct).add_(d_p, alpha=-lr)
if p.data.dtype in {torch.float16, torch.bfloat16}:
p.data.copy_(p_data_fp32)
group["lr_old"] = lr
return loss
|
COCO-LM/fairseq/fairseq/optim/nag.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/optim/nag.py",
"repo_id": "COCO-LM",
"token_count": 1741
}
| 195 |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import logging
import os
import sys
import torch
from argparse import Namespace
from dataclasses import dataclass, field
from typing import Optional, Any
from omegaconf import MISSING, II
from fairseq.data import AddTargetDataset, Dictionary, FileAudioDataset, encoders
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.configs import GenerationConfig
from . import FairseqTask, register_task
from .. import utils
from ..logging import metrics
logger = logging.getLogger(__name__)
class LabelEncoder(object):
def __init__(self, dictionary):
self.dictionary = dictionary
def __call__(self, label):
return self.dictionary.encode_line(
label, append_eos=False, add_if_not_exist=False
)
@dataclass
class AudioPretrainingConfig(FairseqDataclass):
data: str = field(default=MISSING, metadata={"help": "path to data directory"})
labels: Optional[str] = field(
default=None,
metadata={"help": "extension of the label file to load, used for fine-tuning"},
)
sample_rate: int = field(
default=16_000,
metadata={
"help": "target sample rate. audio files will be up/down sampled to this rate"
},
)
normalize: bool = field(
default=False,
metadata={"help": "if set, normalizes input to have 0 mean and unit variance"},
)
enable_padding: bool = field(
default=False, metadata={"help": "pad shorter samples instead of cropping"}
)
max_sample_size: Optional[int] = field(
default=None, metadata={"help": "max sample size to crop to for batching"}
)
min_sample_size: Optional[int] = field(
default=None, metadata={"help": "min sample size to skip small examples"}
)
# Options for reporting WER metrics during validation. Only applicable to
# Seq2Seq models during fine-tuning
eval_wer: bool = field(
default=False, metadata={"help": "compute WER for Seq2Seq models"}
)
eval_wer_config: GenerationConfig = field(
default_factory=lambda: GenerationConfig(),
metadata={"help": "beam search config for evaluating wer during training"},
)
eval_wer_tokenizer: Any = field(
default=None,
metadata={"help": "tokenizer config for evaluating wer during training"},
)
eval_wer_post_process: str = field(
default="letter",
metadata={
"help": "remove BPE tokens before scoring (can be sentencepiece, letter, and more)"
},
)
autoregressive: bool = field(
default=False,
metadata={
"help": "required for autoregressive decoders (like seq2seq models); "
"adds 'prev_output_tokens' to input and appends eos to target"
},
)
num_batch_buckets: int = field(
default=0,
metadata={
"help": "number of buckets"
},
)
precompute_mask_indices: bool = field(
default=False,
metadata={
"help": "flag to compute mask indices in data preparation.",
},
)
# The following are needed to precompute mask and mask channel indices
# before model's forward.
mask_length: Optional[int] = II("model.mask_length")
mask_prob: Optional[float] = II("model.mask_prob")
mask_selection: Optional[str] = II("model.mask_selection")
mask_other: Optional[float] = II("model.mask_other")
no_mask_overlap: Optional[bool] = II("model.no_mask_overlap")
mask_min_space: Optional[int] = II("model.mask_min_space")
mask_channel_length: Optional[int] = II("model.mask_channel_length")
mask_channel_prob: Optional[float] = II("model.mask_channel_prob")
mask_channel_selection: Optional[str] = II("model.mask_channel_selection")
mask_channel_other: Optional[float] = II("model.mask_channel_other")
no_mask_channel_overlap: Optional[bool] = II("model.no_mask_channel_overlap")
mask_channel_min_space: Optional[int] = II("model.mask_channel_min_space")
conv_feature_layers: Optional[str] = II("model.conv_feature_layers")
encoder_embed_dim: Optional[int] = II("model.encoder_embed_dim")
tpu: bool = II("common.tpu")
@register_task("audio_pretraining", dataclass=AudioPretrainingConfig)
class AudioPretrainingTask(FairseqTask):
""""""
cfg: AudioPretrainingConfig
def __init__(
self,
cfg: AudioPretrainingConfig,
):
super().__init__(cfg)
if cfg.eval_wer:
assert cfg.labels is not None, "eval_wer can only be set during fine-tuning"
self.blank_symbol = "<s>"
self.state.add_factory("target_dictionary", self.load_target_dictionary)
@classmethod
def setup_task(cls, cfg: AudioPretrainingConfig, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
cfg (AudioPretrainingConfig): configuration of this task
"""
return cls(cfg)
def load_target_dictionary(self):
if self.cfg.labels:
dict_path = os.path.join(
self.cfg.data, f"dict.{self.cfg.labels}.txt"
)
return Dictionary.load(dict_path)
return None
def _get_mask_precompute_kwargs(self, cfg):
if self.cfg.precompute_mask_indices or self.cfg.tpu:
args = [
'mask_length',
'mask_prob',
'mask_selection',
'mask_other',
'no_mask_overlap',
'mask_min_space',
'mask_channel_length',
'mask_channel_prob',
'mask_channel_selection',
'mask_channel_other',
'no_mask_channel_overlap',
'mask_channel_min_space',
'encoder_embed_dim',
'conv_feature_layers',
]
return {arg: cfg[arg] for arg in args}
else:
return {}
def load_dataset(
self, split: str, task_cfg: FairseqDataclass = None, **kwargs
):
data_path = self.cfg.data
task_cfg = task_cfg or self.cfg
# upgrade old task
if isinstance(task_cfg, Namespace):
if not hasattr(task_cfg, "autoregressive"):
task_cfg.autoregressive = not task_cfg.criterion == 'ctc'
manifest = os.path.join(data_path, "{}.tsv".format(split))
self.datasets[split] = FileAudioDataset(
manifest,
sample_rate=task_cfg.get('sample_rate', self.cfg.sample_rate),
max_sample_size=self.cfg.max_sample_size,
min_sample_size=self.cfg.min_sample_size,
pad=task_cfg.labels is not None or task_cfg.enable_padding,
normalize=task_cfg.normalize,
num_buckets=self.cfg.num_batch_buckets or int(self.cfg.tpu),
compute_mask_indices=(
self.cfg.precompute_mask_indices or self.cfg.tpu
),
**self._get_mask_precompute_kwargs(task_cfg),
)
if self.cfg.tpu and task_cfg['mask_channel_prob'] == 0.0:
logger.info(
"Pretraining on TPUs may suffer convergence "
"issues when training with `mask_channel_prob` value of "
"0. You may want to set this to a low value close to 0."
)
if task_cfg.labels:
label_path = os.path.join(data_path, f"{split}.{task_cfg.labels}")
with open(label_path, "r") as f:
labels = [
line for i, line in enumerate(f)
if i in self.datasets[split].line_inds
]
assert len(labels) == len(self.datasets[split]), (
f"labels length ({len(labels)}) and dataset length "
f"({len(self.datasets[split])}) do not match")
process_label = LabelEncoder(self.target_dictionary)
self.datasets[split] = AddTargetDataset(
self.datasets[split],
labels,
pad=self.target_dictionary.pad(),
eos=self.target_dictionary.eos(),
batch_targets=True,
process_label=process_label,
add_to_input=task_cfg.get('autoregressive', False),
)
@property
def source_dictionary(self):
return None
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self.state.target_dictionary
def max_positions(self):
"""Maximum input length supported by the encoder."""
return (sys.maxsize, sys.maxsize)
def filter_indices_by_size(
self,
indices,
dataset,
max_positions=None,
ignore_invalid_inputs=False,
):
# we do not need to filter by size in this task as dataloaders take care of this
return indices
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
if self.cfg.eval_wer and self.cfg.autoregressive:
metrics = self._inference_with_wer(self.sequence_generator, sample, model)
logging_output["_num_char_errors"] = metrics["num_char_errors"]
logging_output["_num_chars"] = metrics["num_chars"]
logging_output["_num_word_errors"] = metrics["num_word_errors"]
logging_output["_num_words"] = metrics["num_words"]
return loss, sample_size, logging_output
def build_model(self, model_cfg: FairseqDataclass):
model = super().build_model(model_cfg)
if self.cfg.eval_wer and self.cfg.autoregressive:
self.sequence_generator = self.build_generator(
[model],
self.cfg.eval_wer_config,
)
if self.cfg.eval_wer_tokenizer:
self.tokenizer = encoders.build_tokenizer(self.cfg.eval_wer_tokenizer)
else:
self.tokenizer = None
return model
def _inference_with_wer(self, generator, sample, model):
import editdistance
def decode(toks):
s = self.target_dictionary.string(
toks.int().cpu(),
self.cfg.eval_wer_post_process,
escape_unk=True,
)
if self.tokenizer:
s = self.tokenizer.decode(s)
return s
num_word_errors, num_char_errors = 0, 0
num_chars, num_words = 0, 0
gen_out = self.inference_step(generator, [model], sample, None)
for i in range(len(gen_out)):
hyp = decode(gen_out[i][0]["tokens"])
ref = decode(
utils.strip_pad(sample["target"][i], self.target_dictionary.pad()),
)
num_char_errors += editdistance.eval(hyp, ref)
num_chars += len(ref)
hyp_words = hyp.split()
ref_words = ref.split()
num_word_errors += editdistance.eval(hyp_words, ref_words)
num_words += len(ref_words)
return {
"num_char_errors": num_char_errors,
"num_chars": num_chars,
"num_word_errors": num_word_errors,
"num_words": num_words,
}
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
zero = torch.scalar_tensor(0.0)
num_char_errors = sum(
log.get("_num_char_errors", zero) for log in logging_outputs
)
num_chars = sum(log.get("_num_chars", zero) for log in logging_outputs)
num_word_errors = sum(
log.get("_num_word_errors", zero) for log in logging_outputs
)
num_words = sum(log.get("_num_words", zero) for log in logging_outputs)
metrics.log_scalar("_num_char_errors", num_char_errors)
metrics.log_scalar("_num_chars", num_chars)
metrics.log_scalar("_num_word_errors", num_word_errors)
metrics.log_scalar("_num_words", num_words)
if num_words > 0:
metrics.log_derived(
"uer",
lambda meters: meters["_num_char_errors"].sum
* 100.0
/ meters["_num_chars"].sum
if meters["_num_chars"].sum > 0
else float("nan"),
)
metrics.log_derived(
"wer",
lambda meters: meters["_num_word_errors"].sum
* 100.0
/ meters["_num_words"].sum
if meters["_num_words"].sum > 0
else float("nan"),
)
|
COCO-LM/fairseq/fairseq/tasks/audio_pretraining.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/tasks/audio_pretraining.py",
"repo_id": "COCO-LM",
"token_count": 5946
}
| 196 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq import utils
from fairseq.data import LanguagePairDataset
from . import register_task
from .translation import TranslationTask, load_langpair_dataset
@register_task("translation_from_pretrained_bart")
class TranslationFromPretrainedBARTTask(TranslationTask):
"""
Translate from source language to target language with a model initialized with a multilingual pretrain.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
TranslationTask.add_args(parser)
parser.add_argument('--langs', required=True, metavar='LANG',
help='comma-separated list of monolingual language, '
'for example, "en,de,fr". These should match the '
'langs from pretraining (and be in the same order). '
'You should always add all pretraining language idx '
'during finetuning.')
parser.add_argument('--prepend-bos', action='store_true',
help='prepend bos token to each sentence, which matches '
'mBART pretraining')
# fmt: on
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args, src_dict, tgt_dict)
self.langs = args.langs.split(",")
for d in [src_dict, tgt_dict]:
for l in self.langs:
d.add_symbol("[{}]".format(l))
d.add_symbol("<mask>")
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.args.source_lang, self.args.target_lang
self.datasets[split] = load_langpair_dataset(
data_path,
split,
src,
self.src_dict,
tgt,
self.tgt_dict,
combine=combine,
dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=getattr(self.args, "max_source_positions", 1024),
max_target_positions=getattr(self.args, "max_target_positions", 1024),
load_alignments=self.args.load_alignments,
prepend_bos=getattr(self.args, "prepend_bos", False),
append_source_id=True,
)
def build_generator(self, models, args, **unused):
if getattr(args, "score_reference", False):
from fairseq.sequence_scorer import SequenceScorer
return SequenceScorer(
self.target_dictionary,
eos=self.tgt_dict.index("[{}]".format(self.args.target_lang)),
)
else:
from fairseq.sequence_generator import SequenceGenerator
return SequenceGenerator(
models,
self.target_dictionary,
beam_size=getattr(args, "beam", 5),
max_len_a=getattr(args, "max_len_a", 0),
max_len_b=getattr(args, "max_len_b", 200),
min_len=getattr(args, "min_len", 1),
normalize_scores=(not getattr(args, "unnormalized", False)),
len_penalty=getattr(args, "lenpen", 1),
unk_penalty=getattr(args, "unkpen", 0),
temperature=getattr(args, "temperature", 1.0),
match_source_len=getattr(args, "match_source_len", False),
no_repeat_ngram_size=getattr(args, "no_repeat_ngram_size", 0),
eos=self.tgt_dict.index("[{}]".format(self.args.target_lang)),
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
src_lang_id = self.source_dictionary.index("[{}]".format(self.args.source_lang))
source_tokens = []
for s_t in src_tokens:
s_t = torch.cat([s_t, s_t.new(1).fill_(src_lang_id)])
source_tokens.append(s_t)
dataset = LanguagePairDataset(
source_tokens,
src_lengths,
self.source_dictionary,
tgt_dict=self.target_dictionary,
constraints=constraints,
)
return dataset
|
COCO-LM/fairseq/fairseq/tasks/translation_from_pretrained_bart.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/tasks/translation_from_pretrained_bart.py",
"repo_id": "COCO-LM",
"token_count": 2495
}
| 197 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
BLEU scoring of generated translations against reference translations.
"""
import argparse
import os
import sys
from fairseq.data import dictionary
from fairseq.scoring import bleu
def get_parser():
parser = argparse.ArgumentParser(
description="Command-line script for BLEU scoring."
)
# fmt: off
parser.add_argument('-s', '--sys', default='-', help='system output')
parser.add_argument('-r', '--ref', required=True, help='references')
parser.add_argument('-o', '--order', default=4, metavar='N',
type=int, help='consider ngrams up to this order')
parser.add_argument('--ignore-case', action='store_true',
help='case-insensitive scoring')
parser.add_argument('--sacrebleu', action='store_true',
help='score with sacrebleu')
parser.add_argument('--sentence-bleu', action='store_true',
help='report sentence-level BLEUs (i.e., with +1 smoothing)')
# fmt: on
return parser
def cli_main():
parser = get_parser()
args = parser.parse_args()
print(args)
assert args.sys == "-" or os.path.exists(
args.sys
), "System output file {} does not exist".format(args.sys)
assert os.path.exists(args.ref), "Reference file {} does not exist".format(args.ref)
dict = dictionary.Dictionary()
def readlines(fd):
for line in fd.readlines():
if args.ignore_case:
yield line.lower()
else:
yield line
if args.sacrebleu:
import sacrebleu
def score(fdsys):
with open(args.ref) as fdref:
print(sacrebleu.corpus_bleu(fdsys, [fdref]).format())
elif args.sentence_bleu:
def score(fdsys):
with open(args.ref) as fdref:
scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk())
for i, (sys_tok, ref_tok) in enumerate(
zip(readlines(fdsys), readlines(fdref))
):
scorer.reset(one_init=True)
sys_tok = dict.encode_line(sys_tok)
ref_tok = dict.encode_line(ref_tok)
scorer.add(ref_tok, sys_tok)
print(i, scorer.result_string(args.order))
else:
def score(fdsys):
with open(args.ref) as fdref:
scorer = bleu.Scorer(
bleu.BleuConfig(
pad=dict.pad(),
eos=dict.eos(),
unk=dict.unk(),
)
)
for sys_tok, ref_tok in zip(readlines(fdsys), readlines(fdref)):
sys_tok = dict.encode_line(sys_tok)
ref_tok = dict.encode_line(ref_tok)
scorer.add(ref_tok, sys_tok)
print(scorer.result_string(args.order))
if args.sys == "-":
score(sys.stdin)
else:
with open(args.sys, "r") as f:
score(f)
if __name__ == "__main__":
cli_main()
|
COCO-LM/fairseq/fairseq_cli/score.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq_cli/score.py",
"repo_id": "COCO-LM",
"token_count": 1636
}
| 198 |
import math
import torch
import numbers
from torch.nn.parameter import Parameter
from torch.nn import init
from torch.nn import functional as F
import fused_layernorm_cuda
class FusedLayerNormAffineFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight, bias, normalized_shape, eps):
ctx.normalized_shape = normalized_shape
ctx.eps = eps
input_ = input.contiguous()
weight_ = weight.contiguous()
bias_ = bias.contiguous()
output, mean, invvar = fused_layernorm_cuda.forward_affine(
input_, ctx.normalized_shape, weight_, bias_, ctx.eps)
ctx.save_for_backward(input_, weight_, bias_, mean, invvar)
return output
@staticmethod
def backward(ctx, grad_output):
input_, weight_, bias_, mean, invvar = ctx.saved_tensors
grad_input = grad_weight = grad_bias = None
grad_input, grad_weight, grad_bias = fused_layernorm_cuda.backward_affine(
grad_output.contiguous(), mean, invvar,
input_, ctx.normalized_shape,
weight_, bias_, ctx.eps)
return grad_input, grad_weight, grad_bias, None, None
class FusedLayerNormFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, normalized_shape, eps):
ctx.normalized_shape = normalized_shape
ctx.eps = eps
input_ = input.contiguous()
output, mean, invvar = fused_layernorm_cuda.forward(
input_, ctx.normalized_shape, ctx.eps)
ctx.save_for_backward(input_, mean, invvar)
return output
@staticmethod
def backward(ctx, grad_output):
input_, mean, invvar = ctx.saved_tensors
grad_input = None
grad_input = fused_layernorm_cuda.backward(
grad_output.contiguous(), mean, invvar,
input_, ctx.normalized_shape,
ctx.eps)
return grad_input, None, None
def fused_layer_norm_affine(input, normalized_shape, weight, bias, eps=1e-6):
return FusedLayerNormAffineFunction.apply(input, weight, bias, normalized_shape, eps)
def fused_layer_norm(input, normalized_shape, eps=1e-6):
return FusedLayerNormFunction.apply(input, normalized_shape, eps)
class FusedLayerNorm(torch.nn.Module):
r"""Applies Layer Normalization over a mini-batch of inputs as described in
the paper `Layer Normalization`_ .
Currently only runs on cuda() tensors.
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated separately over the last
certain number dimensions which have to be of the shape specified by
:attr:`normalized_shape`.
:math:`\gamma` and :math:`\beta` are learnable affine transform parameters of
:attr:`normalized_shape` if :attr:`elementwise_affine` is ``True``.
.. note::
Unlike Batch Normalization and Instance Normalization, which applies
scalar scale and bias for each entire channel/plane with the
:attr:`affine` option, Layer Normalization applies per-element scale and
bias with :attr:`elementwise_affine`.
This layer uses statistics computed from input data in both training and
evaluation modes.
Args:
normalized_shape (int or list or torch.Size): input shape from an expected input
of size
.. math::
[* \times \text{normalized}\_\text{shape}[0] \times \text{normalized}\_\text{shape}[1]
\times \ldots \times \text{normalized}\_\text{shape}[-1]]
If a single integer is used, it is treated as a singleton list, and this module will
normalize over the last dimension which is expected to be of that specific size.
eps: a value added to the denominator for numerical stability. Default: 1e-5
elementwise_affine: a boolean value that when set to ``True``, this module
has learnable per-element affine parameters initialized to ones (for weights)
and zeros (for biases). Default: ``True``.
Shape:
- Input: :math:`(N, *)`
- Output: :math:`(N, *)` (same shape as input)
Examples::
>>> input = torch.randn(20, 5, 10, 10)
>>> # With Learnable Parameters
>>> m = apex.normalization.FusedLayerNorm(input.size()[1:])
>>> # Without Learnable Parameters
>>> m = apex.normalization.FusedLayerNorm(input.size()[1:], elementwise_affine=False)
>>> # Normalize over last two dimensions
>>> m = apex.normalization.FusedLayerNorm([10, 10])
>>> # Normalize over last dimension of size 10
>>> m = apex.normalization.FusedLayerNorm(10)
>>> # Activating the module
>>> output = m(input)
.. _`Layer Normalization`: https://arxiv.org/abs/1607.06450
"""
def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True):
super(FusedLayerNorm, self).__init__()
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = (normalized_shape,)
self.normalized_shape = torch.Size(normalized_shape)
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = Parameter(torch.Tensor(*normalized_shape))
self.bias = Parameter(torch.Tensor(*normalized_shape))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.elementwise_affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def forward(self, input):
if not input.is_cuda:
return F.layer_norm(
input, self.normalized_shape, self.weight, self.bias, self.eps)
if self.elementwise_affine:
return FusedLayerNormAffineFunction.apply(
input, self.weight, self.bias, self.normalized_shape,self.eps)
else:
return FusedLayerNormFunction.apply(input, self.normalized_shape, self.eps)
def extra_repr(self):
return '{normalized_shape}, eps={eps}, ' \
'elementwise_affine={elementwise_affine}'.format(**self.__dict__)
|
COCO-LM/fairseq/fused_ops/fused_ops/layernorm/fused_layer_norm.py/0
|
{
"file_path": "COCO-LM/fairseq/fused_ops/fused_ops/layernorm/fused_layer_norm.py",
"repo_id": "COCO-LM",
"token_count": 2430
}
| 199 |
#!/usr/bin/env python
"""Helper script to compare two argparse.Namespace objects."""
from argparse import Namespace # noqa
def main():
ns1 = eval(input("Namespace 1: "))
ns2 = eval(input("Namespace 2: "))
def keys(ns):
ks = set()
for k in dir(ns):
if not k.startswith("_"):
ks.add(k)
return ks
k1 = keys(ns1)
k2 = keys(ns2)
def print_keys(ks, ns1, ns2=None):
for k in ks:
if ns2 is None:
print("{}\t{}".format(k, getattr(ns1, k, None)))
else:
print(
"{}\t{}\t{}".format(k, getattr(ns1, k, None), getattr(ns2, k, None))
)
print("Keys unique to namespace 1:")
print_keys(k1 - k2, ns1)
print()
print("Keys unique to namespace 2:")
print_keys(k2 - k1, ns2)
print()
print("Overlapping keys with different values:")
ks = [k for k in k1 & k2 if getattr(ns1, k, "None") != getattr(ns2, k, "None")]
print_keys(ks, ns1, ns2)
print()
if __name__ == "__main__":
main()
|
COCO-LM/fairseq/scripts/compare_namespaces.py/0
|
{
"file_path": "COCO-LM/fairseq/scripts/compare_namespaces.py",
"repo_id": "COCO-LM",
"token_count": 553
}
| 200 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import subprocess
import sys
from setuptools import setup, find_packages, Extension
from setuptools import Extension, find_packages, setup
import site
site.ENABLE_USER_SITE = True
if sys.version_info < (3, 6):
sys.exit("Sorry, Python >= 3.6 is required for fairseq.")
def write_version_py():
with open(os.path.join("fairseq", "version.txt")) as f:
version = f.read().strip()
# append latest commit hash to version string
try:
sha = (
subprocess.check_output(["git", "rev-parse", "HEAD"])
.decode("ascii")
.strip()
)
version += "+" + sha[:7]
except Exception:
pass
# write version info to fairseq/version.py
with open(os.path.join("fairseq", "version.py"), "w") as f:
f.write('__version__ = "{}"\n'.format(version))
return version
version = write_version_py()
with open("README.md") as f:
readme = f.read()
if sys.platform == "darwin":
extra_compile_args = ["-stdlib=libc++", "-O3"]
else:
extra_compile_args = ["-std=c++11", "-O3"]
class NumpyExtension(Extension):
"""Source: https://stackoverflow.com/a/54128391"""
def __init__(self, *args, **kwargs):
self.__include_dirs = []
super().__init__(*args, **kwargs)
@property
def include_dirs(self):
import numpy
return self.__include_dirs + [numpy.get_include()]
@include_dirs.setter
def include_dirs(self, dirs):
self.__include_dirs = dirs
extensions = [
Extension(
"fairseq.libbleu",
sources=[
"fairseq/clib/libbleu/libbleu.cpp",
"fairseq/clib/libbleu/module.cpp",
],
extra_compile_args=extra_compile_args,
),
NumpyExtension(
"fairseq.data.data_utils_fast",
sources=["fairseq/data/data_utils_fast.pyx"],
language="c++",
extra_compile_args=extra_compile_args,
),
NumpyExtension(
"fairseq.data.token_block_utils_fast",
sources=["fairseq/data/token_block_utils_fast.pyx"],
language="c++",
extra_compile_args=extra_compile_args,
),
]
cmdclass = {}
try:
# torch is not available when generating docs
from torch.utils import cpp_extension
extensions.extend(
[
cpp_extension.CppExtension(
"fairseq.libnat",
sources=[
"fairseq/clib/libnat/edit_dist.cpp",
],
)
]
)
if "CUDA_HOME" in os.environ:
extensions.extend(
[
cpp_extension.CppExtension(
"fairseq.libnat_cuda",
sources=[
"fairseq/clib/libnat_cuda/edit_dist.cu",
"fairseq/clib/libnat_cuda/binding.cpp",
],
),
cpp_extension.CppExtension(
"fairseq.ngram_repeat_block_cuda",
sources=[
"fairseq/clib/cuda/ngram_repeat_block_cuda.cpp",
"fairseq/clib/cuda/ngram_repeat_block_cuda_kernel.cu",
],
),
]
)
cmdclass["build_ext"] = cpp_extension.BuildExtension
except ImportError:
pass
if "READTHEDOCS" in os.environ:
# don't build extensions when generating docs
extensions = []
if "build_ext" in cmdclass:
del cmdclass["build_ext"]
# use CPU build of PyTorch
dependency_links = [
"https://download.pytorch.org/whl/cpu/torch-1.7.0%2Bcpu-cp36-cp36m-linux_x86_64.whl"
]
else:
dependency_links = []
if "clean" in sys.argv[1:]:
# Source: https://bit.ly/2NLVsgE
print("deleting Cython files...")
import subprocess
subprocess.run(
["rm -f fairseq/*.so fairseq/**/*.so fairseq/*.pyd fairseq/**/*.pyd"],
shell=True,
)
extra_packages = []
if os.path.exists(os.path.join("fairseq", "model_parallel", "megatron", "mpu")):
extra_packages.append("fairseq.model_parallel.megatron.mpu")
def do_setup(package_data):
setup(
name="fairseq",
version=version,
description="Facebook AI Research Sequence-to-Sequence Toolkit",
url="https://github.com/pytorch/fairseq",
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
long_description=readme,
long_description_content_type="text/markdown",
setup_requires=[
"cython",
'numpy<1.20.0; python_version<"3.7"',
'numpy; python_version>="3.7"',
"setuptools>=18.0",
],
install_requires=[
"cffi",
"cython",
'dataclasses; python_version<"3.7"',
"hydra-core<1.1",
"omegaconf<2.1",
'numpy<1.20.0; python_version<"3.7"',
'numpy; python_version>="3.7"',
"regex",
"sacrebleu>=1.4.12",
"torch",
"tqdm",
],
dependency_links=dependency_links,
packages=find_packages(
exclude=[
"examples",
"examples.*",
"scripts",
"scripts.*",
"tests",
"tests.*",
]
)
+ extra_packages,
package_data=package_data,
ext_modules=extensions,
test_suite="tests",
entry_points={
"console_scripts": [
"fairseq-eval-lm = fairseq_cli.eval_lm:cli_main",
"fairseq-generate = fairseq_cli.generate:cli_main",
"fairseq-hydra-train = fairseq_cli.hydra_train:cli_main",
"fairseq-interactive = fairseq_cli.interactive:cli_main",
"fairseq-preprocess = fairseq_cli.preprocess:cli_main",
"fairseq-score = fairseq_cli.score:cli_main",
"fairseq-train = fairseq_cli.train:cli_main",
"fairseq-validate = fairseq_cli.validate:cli_main",
],
},
cmdclass=cmdclass,
zip_safe=False,
)
def get_files(path, relative_to="fairseq"):
all_files = []
for root, _dirs, files in os.walk(path, followlinks=True):
root = os.path.relpath(root, relative_to)
for file in files:
if file.endswith(".pyc"):
continue
all_files.append(os.path.join(root, file))
return all_files
if __name__ == "__main__":
try:
# symlink examples into fairseq package so package_data accepts them
fairseq_examples = os.path.join("fairseq", "examples")
if "build_ext" not in sys.argv[1:] and not os.path.exists(fairseq_examples):
os.symlink(os.path.join("..", "examples"), fairseq_examples)
package_data = {
"fairseq": (
get_files(fairseq_examples) + get_files(os.path.join("fairseq", "config"))
)
}
do_setup(package_data)
finally:
if "build_ext" not in sys.argv[1:] and os.path.islink(fairseq_examples):
os.unlink(fairseq_examples)
|
COCO-LM/fairseq/setup.py/0
|
{
"file_path": "COCO-LM/fairseq/setup.py",
"repo_id": "COCO-LM",
"token_count": 3812
}
| 201 |
#!/usr/bin/env python3
# import models/encoder/decoder to be tested
from examples.speech_recognition.models.vggtransformer import (
TransformerDecoder,
VGGTransformerEncoder,
VGGTransformerModel,
vggtransformer_1,
vggtransformer_2,
vggtransformer_base,
)
# import base test class
from .asr_test_base import (
DEFAULT_TEST_VOCAB_SIZE,
TestFairseqDecoderBase,
TestFairseqEncoderBase,
TestFairseqEncoderDecoderModelBase,
get_dummy_dictionary,
get_dummy_encoder_output,
get_dummy_input,
)
class VGGTransformerModelTest_mid(TestFairseqEncoderDecoderModelBase):
def setUp(self):
def override_config(args):
"""
vggtrasformer_1 use 14 layers of transformer,
for testing purpose, it is too expensive. For fast turn-around
test, reduce the number of layers to 3.
"""
args.transformer_enc_config = (
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 3"
)
super().setUp()
extra_args_setter = [vggtransformer_1, override_config]
self.setUpModel(VGGTransformerModel, extra_args_setter)
self.setUpInput(get_dummy_input(T=50, D=80, B=5, K=DEFAULT_TEST_VOCAB_SIZE))
class VGGTransformerModelTest_big(TestFairseqEncoderDecoderModelBase):
def setUp(self):
def override_config(args):
"""
vggtrasformer_2 use 16 layers of transformer,
for testing purpose, it is too expensive. For fast turn-around
test, reduce the number of layers to 3.
"""
args.transformer_enc_config = (
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 3"
)
super().setUp()
extra_args_setter = [vggtransformer_2, override_config]
self.setUpModel(VGGTransformerModel, extra_args_setter)
self.setUpInput(get_dummy_input(T=50, D=80, B=5, K=DEFAULT_TEST_VOCAB_SIZE))
class VGGTransformerModelTest_base(TestFairseqEncoderDecoderModelBase):
def setUp(self):
def override_config(args):
"""
vggtrasformer_base use 12 layers of transformer,
for testing purpose, it is too expensive. For fast turn-around
test, reduce the number of layers to 3.
"""
args.transformer_enc_config = (
"((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 3"
)
super().setUp()
extra_args_setter = [vggtransformer_base, override_config]
self.setUpModel(VGGTransformerModel, extra_args_setter)
self.setUpInput(get_dummy_input(T=50, D=80, B=5, K=DEFAULT_TEST_VOCAB_SIZE))
class VGGTransformerEncoderTest(TestFairseqEncoderBase):
def setUp(self):
super().setUp()
self.setUpInput(get_dummy_input(T=50, D=80, B=5))
def test_forward(self):
print("1. test standard vggtransformer")
self.setUpEncoder(VGGTransformerEncoder(input_feat_per_channel=80))
super().test_forward()
print("2. test vggtransformer with limited right context")
self.setUpEncoder(
VGGTransformerEncoder(
input_feat_per_channel=80, transformer_context=(-1, 5)
)
)
super().test_forward()
print("3. test vggtransformer with limited left context")
self.setUpEncoder(
VGGTransformerEncoder(
input_feat_per_channel=80, transformer_context=(5, -1)
)
)
super().test_forward()
print("4. test vggtransformer with limited right context and sampling")
self.setUpEncoder(
VGGTransformerEncoder(
input_feat_per_channel=80,
transformer_context=(-1, 12),
transformer_sampling=(2, 2),
)
)
super().test_forward()
print("5. test vggtransformer with windowed context and sampling")
self.setUpEncoder(
VGGTransformerEncoder(
input_feat_per_channel=80,
transformer_context=(12, 12),
transformer_sampling=(2, 2),
)
)
class TransformerDecoderTest(TestFairseqDecoderBase):
def setUp(self):
super().setUp()
dict = get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE)
decoder = TransformerDecoder(dict)
dummy_encoder_output = get_dummy_encoder_output(encoder_out_shape=(50, 5, 256))
self.setUpDecoder(decoder)
self.setUpInput(dummy_encoder_output)
self.setUpPrevOutputTokens()
|
COCO-LM/fairseq/tests/speech_recognition/test_vggtransformer.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/speech_recognition/test_vggtransformer.py",
"repo_id": "COCO-LM",
"token_count": 2141
}
| 202 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import unittest
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.models.transformer import TransformerModel
from tests.test_sequence_generator import get_dummy_task_and_parser
class TestInferenceDropout(unittest.TestCase):
def setUp(self):
self.task, self.parser = get_dummy_task_and_parser()
TransformerModel.add_args(self.parser)
self.args = self.parser.parse_args([])
self.args.encoder_layers = 2
self.args.decoder_layers = 1
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_sets_inference_dropout_to_true(self):
self.args.retain_dropout = True
self.transformer_model = TransformerModel.build_model(self.args, self.task)
cfg = convert_namespace_to_omegaconf(self.args)
self.transformer_model.prepare_for_inference_(cfg)
assert self.transformer_model.encoder.dropout_module.apply_during_inference
assert self.transformer_model.decoder.dropout_module.apply_during_inference
for layer in self.transformer_model.encoder.layers:
assert layer.dropout_module.apply_during_inference
def test_inference_dropout_false_by_default(self):
self.transformer_model = TransformerModel.build_model(self.args, self.task)
cfg = convert_namespace_to_omegaconf(self.args)
self.transformer_model.prepare_for_inference_(cfg)
assert not self.transformer_model.encoder.dropout_module.apply_during_inference
assert not self.transformer_model.decoder.dropout_module.apply_during_inference
for layer in self.transformer_model.encoder.layers:
assert not layer.dropout_module.apply_during_inference
for layer in self.transformer_model.decoder.layers:
assert not layer.dropout_module.apply_during_inference
def test_applies_training_mode(self):
self.transformer_model = TransformerModel.build_model(self.args, self.task)
assert self.transformer_model.encoder.dropout_module.training
for layer in self.transformer_model.encoder.layers:
assert layer.dropout_module.training
self.transformer_model.eval()
assert not self.transformer_model.decoder.dropout_module.training
for layer in self.transformer_model.encoder.layers:
assert not layer.dropout_module.training
def test_retain_modules(self):
self.args.retain_dropout = True
self.args.retain_dropout_modules = [
"TransformerEncoder",
"TransformerEncoderLayer",
]
self.transformer_model = TransformerModel.build_model(self.args, self.task)
cfg = convert_namespace_to_omegaconf(self.args)
self.transformer_model.prepare_for_inference_(cfg)
assert self.transformer_model.encoder.dropout_module.apply_during_inference
assert not self.transformer_model.decoder.dropout_module.apply_during_inference
for layer in self.transformer_model.decoder.layers:
assert not layer.dropout_module.apply_during_inference
|
COCO-LM/fairseq/tests/test_inference_dropout.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/test_inference_dropout.py",
"repo_id": "COCO-LM",
"token_count": 1291
}
| 203 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import unittest
import tests.utils as test_utils
import torch
from fairseq.sequence_scorer import SequenceScorer
class TestSequenceScorer(unittest.TestCase):
def test_sequence_scorer(self):
# construct dummy dictionary
d = test_utils.dummy_dictionary(vocab_size=2)
self.assertEqual(d.pad(), 1)
self.assertEqual(d.eos(), 2)
self.assertEqual(d.unk(), 3)
eos = d.eos()
w1 = 4
w2 = 5
# construct dataloader
data = [
{
"source": torch.LongTensor([w1, w2, eos]),
"target": torch.LongTensor([w1, w2, w1, eos]),
},
{
"source": torch.LongTensor([w2, eos]),
"target": torch.LongTensor([w2, w1, eos]),
},
{
"source": torch.LongTensor([w2, eos]),
"target": torch.LongTensor([w2, eos]),
},
]
data_itr = test_utils.dummy_dataloader(data)
# specify expected output probabilities
args = argparse.Namespace()
unk = 0.0
args.beam_probs = [
# step 0:
torch.FloatTensor(
[
# eos w1 w2
[0.0, unk, 0.6, 0.4], # sentence 1
[0.0, unk, 0.4, 0.6], # sentence 2
[0.0, unk, 0.7, 0.3], # sentence 3
]
),
# step 1:
torch.FloatTensor(
[
# eos w1 w2
[0.0, unk, 0.2, 0.7], # sentence 1
[0.0, unk, 0.8, 0.2], # sentence 2
[0.7, unk, 0.1, 0.2], # sentence 3
]
),
# step 2:
torch.FloatTensor(
[
# eos w1 w2
[0.10, unk, 0.50, 0.4], # sentence 1
[0.15, unk, 0.15, 0.7], # sentence 2
[0.00, unk, 0.00, 0.0], # sentence 3
]
),
# step 3:
torch.FloatTensor(
[
# eos w1 w2
[0.9, unk, 0.05, 0.05], # sentence 1
[0.0, unk, 0.00, 0.0], # sentence 2
[0.0, unk, 0.00, 0.0], # sentence 3
]
),
]
expected_scores = [
[0.6, 0.7, 0.5, 0.9], # sentence 1
[0.6, 0.8, 0.15], # sentence 2
[0.3, 0.7], # sentence 3
]
task = test_utils.TestTranslationTask.setup_task(args, d, d)
model = task.build_model(args)
scorer = SequenceScorer(task.target_dictionary)
for sample in data_itr:
hypos = task.inference_step(scorer, [model], sample)
for id, hypos_id in zip(sample["id"].tolist(), hypos):
self.assertHypoTokens(hypos_id[0], data[id]["target"])
self.assertHypoScore(hypos_id[0], expected_scores[id])
def assertHypoTokens(self, hypo, tokens):
self.assertTensorEqual(hypo["tokens"], torch.LongTensor(tokens))
def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0):
pos_scores = torch.FloatTensor(pos_probs).log()
self.assertAlmostEqual(hypo["positional_scores"], pos_scores)
self.assertEqual(pos_scores.numel(), hypo["tokens"].numel())
score = pos_scores.sum()
if normalized:
score /= pos_scores.numel() ** lenpen
self.assertLess(abs(score - hypo["score"]), 1e-6)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
if __name__ == "__main__":
unittest.main()
|
COCO-LM/fairseq/tests/test_sequence_scorer.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/test_sequence_scorer.py",
"repo_id": "COCO-LM",
"token_count": 2352
}
| 204 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
## Finetuning COCO-LM for question-answering on SQuAD.
## The script is largely adapted from the huggingface transformers library.
from __future__ import absolute_import, division, print_function
import argparse
import glob
import timeit
import logging
import os
import random
import json
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
from tqdm import tqdm, trange
from transformers import WEIGHTS_NAME
from transformers import AdamW, get_linear_schedule_with_warmup
from utils_for_squad import (read_squad_examples, convert_examples_to_features,
RawResult, write_predictions,
RawResultExtended, write_predictions_extended)
# The follwing import is the official SQuAD evaluation script (2.0).
# You can remove it from the dependencies if you are using this script outside of the library
from utils_squad_evaluate import EVAL_OPTS, main as evaluate_on_squad
from cocolm.tokenization_cocolm import COCOLMTokenizer
from cocolm.configuration_cocolm import COCOLMConfig
from cocolm.modeling_cocolm import COCOLMForQuestionAnswering
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
'cocolm': (COCOLMConfig, COCOLMForQuestionAnswering, COCOLMTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def to_list(tensor):
return tensor.detach().cpu().tolist()
def get_optimizer_grouped_parameters(
model, weight_decay, learning_rate, layer_decay, n_layers, layer_wise_weight_decay):
assert isinstance(model, torch.nn.Module)
groups = {}
num_max_layer = 0
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
for para_name, para_var in model.named_parameters():
if any(nd in para_name for nd in no_decay):
weight_decay_in_this_group = 0.0
else:
weight_decay_in_this_group = weight_decay
if para_name.startswith('cocolm.embedding') or para_name == 'cocolm.rel_pos_bias.weight':
depth = 0
elif para_name.startswith('cocolm.encoder.layer'):
depth = int(para_name.split('.')[3]) + 1
num_max_layer = max(num_max_layer, depth)
elif para_name.startswith('task_layer'):
depth = n_layers + 2
else:
if layer_decay < 1.0:
raise NotImplementedError()
depth = 0
if layer_decay < 1.0 and layer_wise_weight_decay:
weight_decay_in_this_group *= (layer_decay ** (n_layers + 2 - depth))
if layer_decay < 1.0:
group_name = "layer{}_decay{}".format(depth, weight_decay_in_this_group)
else:
group_name = "weight_decay{}".format(weight_decay_in_this_group)
if group_name not in groups:
group = {
"params": [para_var],
"weight_decay": weight_decay_in_this_group,
}
if layer_decay < 1.0:
group["lr"] = learning_rate * (layer_decay ** (n_layers + 2 - depth))
groups[group_name] = group
else:
group = groups[group_name]
group["params"].append(para_var)
assert num_max_layer == n_layers
return list(groups.values())
def train(args, train_dataset, model, tokenizer):
""" Train the model """
if args.local_rank in [-1, 0] and args.log_dir:
tb_writer = SummaryWriter(log_dir=args.log_dir)
else:
tb_writer = None
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, num_workers=1)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
optimizer_grouped_parameters = get_optimizer_grouped_parameters(
model=model, weight_decay=args.weight_decay, learning_rate=args.learning_rate,
layer_decay=args.layer_decay, n_layers=model.config.num_hidden_layers,
layer_wise_weight_decay=args.layer_wise_weight_decay,
)
warmup_steps = t_total * args.warmup_ratio
correct_bias = not args.disable_bias_correct
logger.info("*********** Optimizer setting: ***********")
logger.info("Learning rate = %.10f" % args.learning_rate)
logger.info("Adam epsilon = %.10f" % args.adam_epsilon)
logger.info("Adam_betas = (%.4f, %.4f)" % (float(args.adam_betas[0]), float(args.adam_betas[1])))
logger.info("Correct_bias = %s" % str(correct_bias))
optimizer = AdamW(
optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon,
betas=(float(args.adam_betas[0]), float(args.adam_betas[1])),
correct_bias=correct_bias,
)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
amp_state_dict = amp.state_dict()
amp_state_dict['loss_scaler0']['loss_scale'] = args.fp16_init_loss_scale
logger.info("Set fp16_init_loss_scale to %.1f" % args.fp16_init_loss_scale)
amp.load_state_dict(amp_state_dict)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
metric_for_best = args.metric_for_choose_best_checkpoint
best_performance = None
best_epoch = None
global_step = 1
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
if args.disable_tqdm:
epoch_iterator = train_dataloader
else:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2],
'start_positions': batch[3],
'end_positions': batch[4]}
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.max_grad_norm > 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
logs = {}
logs['learning_rate'] = scheduler.get_lr()[0]
logs['loss'] = (tr_loss - logging_loss) / args.logging_steps
if tb_writer is not None:
tb_writer.add_scalar('lr', max(scheduler.get_lr()), global_step)
tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step)
logger.info(json.dumps({**logs, **{'step': global_step}}))
logging_loss = tr_loss
if args.max_steps > 0 and global_step > args.max_steps:
if not args.disable_tqdm:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer, prefix='epoch-{}'.format(_ + 1))
if metric_for_best is None:
metric_for_best = list(results.items())[0][0]
if best_epoch is None:
best_epoch = _ + 1
best_performance = results
else:
if best_performance[metric_for_best] < results[metric_for_best]:
best_performance = results
best_epoch = _ + 1
# Save model checkpoint
output_dir = os.path.join(args.output_dir, 'epoch-{}'.format(_ + 1))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not args.do_not_save:
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
if args.local_rank in [-1, 0] and tb_writer is not None:
tb_writer.close()
if best_epoch is not None:
logger.info(" ***************** Best checkpoint: {}, chosen by {} *****************".format(
best_epoch, metric_for_best))
logger.info("Best performance = %s" % json.dumps(best_performance))
save_best_result(best_epoch, best_performance, args.output_dir)
return global_step, tr_loss / global_step
def save_best_result(best_epoch, best_performance, output_dir):
best_performance["checkpoint"] = best_epoch
with open(os.path.join(output_dir, "best_performance.json"), mode="w") as writer:
writer.write(json.dumps(best_performance, indent=2))
def evaluate(args, model, tokenizer, prefix=""):
dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True)
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(dataset)
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
all_results = []
start_time = timeit.default_timer()
if args.disable_tqdm:
epoch_iterator = eval_dataloader
else:
epoch_iterator = tqdm(eval_dataloader, desc="Evaluating")
for batch in epoch_iterator:
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2]
}
example_indices = batch[3]
outputs = model(**inputs)
for i, example_index in enumerate(example_indices):
eval_feature = features[example_index.item()]
unique_id = int(eval_feature.unique_id)
result = RawResult(unique_id = unique_id,
start_logits = to_list(outputs[0][0][i]),
end_logits = to_list(outputs[0][1][i]))
all_results.append(result)
evalTime = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(dataset))
# Compute predictions
output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix))
output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix))
if args.version_2_with_negative:
output_null_log_odds_file = os.path.join(args.output_dir, "null_odds_{}.json".format(prefix))
else:
output_null_log_odds_file = None
tokens_to_text = None
if hasattr(tokenizer, 'convert_tokens_to_string'):
tokens_to_text = tokenizer.convert_tokens_to_string
write_predictions(examples, features, all_results, args.n_best_size,
args.max_answer_length, args.do_lower_case, output_prediction_file,
output_nbest_file, output_null_log_odds_file, args.verbose_logging,
args.version_2_with_negative, args.null_score_diff_threshold, tokens_to_text=tokens_to_text)
# Evaluate with the official SQuAD script
evaluate_options = EVAL_OPTS(data_file=args.predict_file,
pred_file=output_prediction_file,
na_prob_file=output_null_log_odds_file)
results = evaluate_on_squad(evaluate_options)
eval_output_dir = os.path.join(args.output_dir, prefix)
if not os.path.exists(eval_output_dir):
os.makedirs(eval_output_dir)
output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
writer.write(json.dumps(results, indent=2))
logger.info("Results = %s" % json.dumps(results, indent=2))
return results
def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Load data features from cache or dataset file
input_file = args.predict_file if evaluate else args.train_file
cached_features_file = args.cached_train_file if not evaluate else args.cached_dev_file
if cached_features_file is None:
if args.disable_auto_cache and args.local_rank != -1:
logger.warning("Please cache the features in DDP mode !")
raise RuntimeError()
if not args.disable_auto_cache:
cached_features_file = os.path.join(os.path.dirname(input_file), 'cached_{}_{}_{}'.format(
'dev' if evaluate else 'train',
list(filter(None, args.model_name_or_path.split('/'))).pop(),
str(args.max_seq_length)))
if cached_features_file is not None and os.path.exists(cached_features_file) and not args.overwrite_cache:
examples = read_squad_examples(input_file=input_file,
is_training=not evaluate,
version_2_with_negative=args.version_2_with_negative)
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", input_file)
examples = read_squad_examples(input_file=input_file,
is_training=not evaluate,
version_2_with_negative=args.version_2_with_negative)
features = convert_examples_to_features(examples=examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=not evaluate,
cls_token=tokenizer.cls_token,
sep_token=tokenizer.sep_token,
pad_token_id=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
cls_token_segment_id=0,
pad_token_segment_id=0,
cls_token_at_end=False,
sequence_a_is_doc=False,
add_two_separators=True if args.model_type in ['cocolm'] else False)
if args.local_rank in [-1, 0] and cached_features_file is not None:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long)
all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float)
if evaluate:
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_example_index, all_cls_index, all_p_mask)
else:
all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)
all_is_impossible = torch.tensor([1 if f.is_impossible else 0 for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_start_positions, all_end_positions,
all_cls_index, all_p_mask, all_is_impossible)
if output_examples:
return dataset, examples, features
return dataset
def eval_str_list(x, type=float):
if x is None:
return None
if isinstance(x, str):
x = eval(x)
try:
return list(map(type, x))
except TypeError:
return [type(x)]
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--train_file", default=None, type=str, required=True,
help="SQuAD json for training. E.g., train-v1.1.json")
parser.add_argument("--predict_file", default=None, type=str, required=True,
help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model checkpoints and predictions will be written.")
parser.add_argument("--do_not_save", action='store_true',
help="Disable save models after each epoch. ")
parser.add_argument("--log_dir", default=None, type=str,
help="The output directory where the log will be written.")
parser.add_argument("--cached_train_file", default=None, type=str,
help="Path to cache the train set features. ")
parser.add_argument("--cached_dev_file", default=None, type=str,
help="Path to cache the dev set features. ")
parser.add_argument('--disable_auto_cache', action='store_true',
help='Disable the function for automatic cache the training/dev features.')
parser.add_argument('--disable_tqdm', action='store_true',
help='Disable the tqdm bar. ')
## Other parameters
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name_or_path", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument('--version_2_with_negative', action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.')
parser.add_argument('--null_score_diff_threshold', type=float, default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.")
parser.add_argument("--max_seq_length", default=384, type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--doc_stride", default=128, type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.")
parser.add_argument("--max_query_length", default=64, type=int,
help="The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--evaluate_during_training", action='store_true',
help="Rul evaluation during training at each logging step.")
parser.add_argument("--layer_wise_weight_decay", action='store_true',
help="Apply the weight decay for each layer with a decayed rate (--layer_decay). ")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--layer_decay", default=1.0, type=float,
help="Layer decay rate for the layer-wise learning rate. ")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument('--adam_betas', '--adam_beta', default='0.9,0.999', type=eval_str_list, metavar='B',
help='betas for Adam optimizer')
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--disable_bias_correct", action='store_true',
help="Disable the bias correction items. ")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_ratio", default=0.1, type=float,
help="Linear warmup over warmup_ratio.")
parser.add_argument("--n_best_size", default=20, type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.")
parser.add_argument("--beam_size", default=20, type=int,
help="beam size when doing joint predictions. ")
parser.add_argument("--max_answer_length", default=30, type=int,
help="The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another.")
parser.add_argument("--verbose_logging", action='store_true',
help="If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
parser.add_argument('--logging_steps', type=int, default=50,
help="Log every X updates steps.")
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
parser.add_argument("--no_cuda", action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--metric_for_choose_best_checkpoint', type=str, default=None,
help="Set the metric to choose the best checkpoint")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--fp16_init_loss_scale', type=float, default=128.0,
help="For fp16: initial value for loss scale.")
parser.add_argument("--local_rank", type=int, default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
with open(os.path.join(args.output_dir, 'training_args.json'), mode='w', encoding="utf-8") as writer:
writer.write(json.dumps(args.__dict__, indent=2, sort_keys=True))
args.device = device
# Setup logging
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None)
tokenizer_name_or_path = args.tokenizer_name_or_path if args.tokenizer_name_or_path else args.model_name_or_path
tokenizer = tokenizer_class.from_pretrained(tokenizer_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None)
model = model_class.from_pretrained(args.model_name_or_path,
from_tf=bool('.ckpt' in args.model_name_or_path),
model_type=args.model_type,
config=config,
cache_dir=args.cache_dir if args.cache_dir else None)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
logger.info("Model = %s" % str(model))
# Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set.
# Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will
# remove the need for this code, but it is still valid.
if args.fp16:
try:
import apex
apex.amp.register_half_function(torch, 'einsum')
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False)
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Save the trained model and the tokenizer
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
if not args.do_not_save:
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
# Load a trained model that you have fine-tuned
# model = model_class.from_pretrained(args.output_dir, force_download=True)
# model.to(args.device)
# Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory
results = []
if args.do_eval and args.local_rank in [-1, 0]:
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce model loading logs
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
# Reload the model
global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ""
model = model_class.from_pretrained(checkpoint, force_download=True)
model.to(args.device)
# Evaluate
result = evaluate(args, model, tokenizer, prefix=global_step)
result = dict((k + ('_{}'.format(global_step) if global_step else ''), v) for k, v in result.items())
result["checkpoint"] = checkpoint
results.append(result)
# logger.info("Results: %s" % (json.dumps(results, indent=2)))
result_file = os.path.join(args.output_dir, "results.json")
with open(result_file, mode="w", encoding="utf-8") as writer:
writer.write(json.dumps(results, indent=2))
return results
if __name__ == "__main__":
main()
|
COCO-LM/huggingface/run_squad.py/0
|
{
"file_path": "COCO-LM/huggingface/run_squad.py",
"repo_id": "COCO-LM",
"token_count": 16402
}
| 205 |
# ------------------------------------------
# CSWin Transformer
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# written By Xiaoyi Dong
# ------------------------------------------
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.registry import register_model
from einops.layers.torch import Rearrange
import numpy as np
import time
from mmcv_custom import load_checkpoint
from mmseg.utils import get_root_logger
from ..builder import BACKBONES
import torch.utils.checkpoint as checkpoint
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class LePEAttention(nn.Module):
def __init__(self, dim, resolution, idx, split_size=7, dim_out=None, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
"""Not supported now, since we have cls_tokens now.....
"""
super().__init__()
self.dim = dim
self.dim_out = dim_out or dim
self.resolution = resolution
self.split_size = split_size
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.idx = idx
if idx == -1:
H_sp, W_sp = self.resolution, self.resolution
elif idx == 0:
H_sp, W_sp = self.resolution, self.split_size
elif idx == 1:
W_sp, H_sp = self.resolution, self.split_size
else:
print ("ERROR MODE", idx)
exit(0)
self.H_sp = H_sp
self.W_sp = W_sp
self.H_sp_ = self.H_sp
self.W_sp_ = self.W_sp
stride = 1
self.get_v = nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=1,groups=dim)
self.attn_drop = nn.Dropout(attn_drop)
def im2cswin(self, x):
B, C, H, W = x.shape
x = img2windows(x, self.H_sp, self.W_sp)
x = x.reshape(-1, self.H_sp* self.W_sp, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3).contiguous()
return x
def get_rpe(self, x, func):
B, C, H, W = x.shape
H_sp, W_sp = self.H_sp, self.W_sp
x = x.view(B, C, H // H_sp, H_sp, W // W_sp, W_sp)
x = x.permute(0, 2, 4, 1, 3, 5).contiguous().reshape(-1, C, H_sp, W_sp) ### B', C, H', W'
rpe = func(x) ### B', C, H', W'
rpe = rpe.reshape(-1, self.num_heads, C // self.num_heads, H_sp * W_sp).permute(0, 1, 3, 2).contiguous()
x = x.reshape(-1, self.num_heads, C // self.num_heads, self.H_sp* self.W_sp).permute(0, 1, 3, 2).contiguous()
return x, rpe
def forward(self, temp):
"""
x: B N C
mask: B N N
"""
B, _, C, H, W = temp.shape
idx = self.idx
if idx == -1:
H_sp, W_sp = H, W
elif idx == 0:
H_sp, W_sp = H, self.split_size
elif idx == 1:
H_sp, W_sp = self.split_size, W
else:
print ("ERROR MODE in forward", idx)
exit(0)
self.H_sp = H_sp
self.W_sp = W_sp
### padding for split window
H_pad = (self.H_sp - H % self.H_sp) % self.H_sp
W_pad = (self.W_sp - W % self.W_sp) % self.W_sp
top_pad = H_pad//2
down_pad = H_pad - top_pad
left_pad = W_pad//2
right_pad = W_pad - left_pad
H_ = H + H_pad
W_ = W + W_pad
qkv = F.pad(temp, (left_pad, right_pad, top_pad, down_pad)) ### B,3,C,H',W'
qkv = qkv.permute(1, 0, 2, 3, 4)
q,k,v = qkv[0], qkv[1], qkv[2]
q = self.im2cswin(q)
k = self.im2cswin(k)
v, rpe = self.get_rpe(v, self.get_v)
### Local attention
q = q * self.scale
attn = (q @ k.transpose(-2, -1)) # B head N C @ B head C N --> B head N N
attn = nn.functional.softmax(attn, dim=-1, dtype=attn.dtype)
attn = self.attn_drop(attn)
x = (attn @ v) + rpe
x = x.transpose(1, 2).reshape(-1, self.H_sp* self.W_sp, C) # B head N N @ B head N C
### Window2Img
x = windows2img(x, self.H_sp, self.W_sp, H_, W_) # B H_ W_ C
x = x[:, top_pad:H+top_pad, left_pad:W+left_pad, :]
x = x.reshape(B, -1, C)
return x
class CSWinBlock(nn.Module):
def __init__(self, dim, patches_resolution, num_heads,
split_size=7, mlp_ratio=4., qkv_bias=False, qk_scale=None,
drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm,
last_stage=False):
super().__init__()
self.dim = dim
self.num_heads = num_heads
self.patches_resolution = patches_resolution
self.split_size = split_size
self.mlp_ratio = mlp_ratio
self.qkv = nn.Linear(dim, dim * 3, bias=True)
self.norm1 = norm_layer(dim)
if last_stage:
self.branch_num = 1
else:
self.branch_num = 2
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(drop)
if last_stage:
self.attns = nn.ModuleList([
LePEAttention(
dim, resolution=self.patches_resolution, idx = -1,
split_size=split_size, num_heads=num_heads, dim_out=dim,
qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop)
for i in range(self.branch_num)])
else:
self.attns = nn.ModuleList([
LePEAttention(
dim//2, resolution=self.patches_resolution, idx = i,
split_size=split_size, num_heads=num_heads//2, dim_out=dim//2,
qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop)
for i in range(self.branch_num)])
mlp_hidden_dim = int(dim * mlp_ratio)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, out_features=dim, act_layer=act_layer, drop=drop)
self.norm2 = norm_layer(dim)
atten_mask_matrix = None
self.register_buffer("atten_mask_matrix", atten_mask_matrix)
self.H = None
self.W = None
def forward(self, x):
"""
x: B, H*W, C
"""
B, L, C = x.shape
H = self.H
W = self.W
assert L == H * W, "flatten img_tokens has wrong size"
img = self.norm1(x)
temp = self.qkv(img).reshape(B, H, W, 3, C).permute(0, 3, 4, 1, 2)
if self.branch_num == 2:
x1 = self.attns[0](temp[:,:,:C//2,:,:])
x2 = self.attns[1](temp[:,:,C//2:,:,:])
attened_x = torch.cat([x1,x2], dim=2)
else:
attened_x = self.attns[0](temp)
attened_x = self.proj(attened_x)
x = x + self.drop_path(attened_x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def img2windows(img, H_sp, W_sp):
"""
img: B C H W
"""
B, C, H, W = img.shape
img_reshape = img.view(B, C, H // H_sp, H_sp, W // W_sp, W_sp)
img_perm = img_reshape.permute(0, 2, 4, 3, 5, 1).contiguous().reshape(-1, H_sp* W_sp, C)
return img_perm
def windows2img(img_splits_hw, H_sp, W_sp, H, W):
"""
img_splits_hw: B' H W C
"""
B = int(img_splits_hw.shape[0] / (H * W / H_sp / W_sp))
img = img_splits_hw.view(B, H // H_sp, W // W_sp, H_sp, W_sp, -1)
img = img.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return img
class Merge_Block(nn.Module):
def __init__(self, dim, dim_out, norm_layer=nn.LayerNorm):
super().__init__()
self.conv = nn.Conv2d(dim, dim_out, 3, 2, 1)
self.norm = norm_layer(dim_out)
def forward(self, x, H, W):
B, new_HW, C = x.shape
x = x.transpose(-2, -1).contiguous().view(B, C, H, W)
x = self.conv(x)
B, C, H, W = x.shape
x = x.view(B, C, -1).transpose(-2, -1).contiguous()
x = self.norm(x)
return x, H, W
@BACKBONES.register_module()
class CSWin(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=64, depth=[1,2,21,1], split_size = 7,
num_heads=[1,2,4,8], mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., hybrid_backbone=None, norm_layer=nn.LayerNorm, use_chk=False):
super().__init__()
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
heads=num_heads
self.use_chk = use_chk
self.stage1_conv_embed = nn.Sequential(
nn.Conv2d(in_chans, embed_dim, 7, 4, 2),
Rearrange('b c h w -> b (h w) c', h = img_size//4, w = img_size//4),
nn.LayerNorm(embed_dim)
)
self.norm1 = nn.LayerNorm(embed_dim)
curr_dim = embed_dim
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, np.sum(depth))] # stochastic depth decay rule
self.stage1 = nn.ModuleList([
CSWinBlock(
dim=curr_dim, num_heads=heads[0], patches_resolution=224//4, mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale, split_size=split_size[0],
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth[0])])
self.merge1 = Merge_Block(curr_dim, curr_dim*(heads[1]//heads[0]))
curr_dim = curr_dim*(heads[1]//heads[0])
self.norm2 = nn.LayerNorm(curr_dim)
self.stage2 = nn.ModuleList(
[CSWinBlock(
dim=curr_dim, num_heads=heads[1], patches_resolution=224//8, mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale, split_size=split_size[1],
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[np.sum(depth[:1])+i], norm_layer=norm_layer)
for i in range(depth[1])])
self.merge2 = Merge_Block(curr_dim, curr_dim*(heads[2]//heads[1]))
curr_dim = curr_dim*(heads[2]//heads[1])
self.norm3 = nn.LayerNorm(curr_dim)
temp_stage3 = []
temp_stage3.extend(
[CSWinBlock(
dim=curr_dim, num_heads=heads[2], patches_resolution=224//16, mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale, split_size=split_size[2],
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[np.sum(depth[:2])+i], norm_layer=norm_layer)
for i in range(depth[2])])
self.stage3 = nn.ModuleList(temp_stage3)
self.merge3 = Merge_Block(curr_dim, curr_dim*(heads[3]//heads[2]))
curr_dim = curr_dim*(heads[3]//heads[2])
self.stage4 = nn.ModuleList(
[CSWinBlock(
dim=curr_dim, num_heads=heads[3], patches_resolution=224//32, mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale, split_size=split_size[-1],
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[np.sum(depth[:-1])+i], norm_layer=norm_layer, last_stage=True)
for i in range(depth[-1])])
self.norm4 = norm_layer(curr_dim)
def init_weights(self, pretrained=None):
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.LayerNorm, nn.BatchNorm2d)):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
if isinstance(pretrained, str):
self.apply(_init_weights)
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
self.apply(_init_weights)
else:
raise TypeError('pretrained must be a str or None')
def save_out(self, x, norm, H, W):
x = norm(x)
B, N, C = x.shape
x = x.view(B, H, W, C).permute(0, 3, 1, 2).contiguous()
return x
def forward_features(self, x):
B = x.shape[0]
x = self.stage1_conv_embed[0](x) ### B, C, H, W
B, C, H, W = x.size()
x = x.reshape(B, C, -1).transpose(-1,-2).contiguous()
x = self.stage1_conv_embed[2](x)
out = []
for blk in self.stage1:
blk.H = H
blk.W = W
if self.use_chk:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
out.append(self.save_out(x, self.norm1, H, W))
for pre, blocks, norm in zip([self.merge1, self.merge2, self.merge3],
[self.stage2, self.stage3, self.stage4],
[self.norm2 , self.norm3 , self.norm4 ]):
x, H, W = pre(x, H, W)
for blk in blocks:
blk.H = H
blk.W = W
if self.use_chk:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
out.append(self.save_out(x, norm, H, W))
return tuple(out)
def forward(self, x):
x = self.forward_features(x)
return x
def _conv_filter(state_dict, patch_size=16):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
for k, v in state_dict.items():
if 'patch_embed.proj.weight' in k:
v = v.reshape((v.shape[0], 3, patch_size, patch_size))
out_dict[k] = v
return out_dict
|
CSWin-Transformer/segmentation/backbone/cswin_transformer.py/0
|
{
"file_path": "CSWin-Transformer/segmentation/backbone/cswin_transformer.py",
"repo_id": "CSWin-Transformer",
"token_count": 7882
}
| 206 |
name: climaX
channels:
- pytorch
- conda-forge
- defaults
dependencies:
- _libgcc_mutex=0.1=conda_forge
- _openmp_mutex=4.5=2_kmp_llvm
- appdirs=1.4.4=pyh9f0ad1d_0
- asciitree=0.3.3=py_2
- blas=1.0=mkl
- bokeh=2.4.3=pyhd8ed1ab_3
- bottleneck=1.3.6=py38h7e4f40d_0
- brotlipy=0.7.0=py38h27cfd23_1003
- bzip2=1.0.8=h7b6447c_0
- c-ares=1.18.1=h7f98852_0
- ca-certificates=2022.12.7=ha878542_0
- certifi=2022.12.7=pyhd8ed1ab_0
- cf_xarray=0.7.9=pyhd8ed1ab_0
- cffi=1.15.1=py38h5eee18b_3
- cftime=1.6.2=py38h26c90d9_1
- charset-normalizer=2.0.4=pyhd3eb1b0_0
- click=8.1.3=unix_pyhd8ed1ab_2
- cloudpickle=2.2.1=pyhd8ed1ab_0
- cryptography=38.0.4=py38h9ce1e76_0
- cudatoolkit=11.3.1=h2bc3f7f_2
- curl=7.87.0=h6312ad2_0
- cytoolz=0.12.0=py38h0a891b7_1
- dask=2023.1.1=pyhd8ed1ab_0
- dask-core=2023.1.1=pyhd8ed1ab_0
- distributed=2023.1.1=pyhd8ed1ab_0
- entrypoints=0.4=pyhd8ed1ab_0
- esmf=8.4.0=nompi_hdb2cfa9_3
- esmpy=8.4.0=nompi_py38h2b78397_1
- fasteners=0.17.3=pyhd8ed1ab_0
- ffmpeg=4.3=hf484d3e_0
- fftw=3.3.10=nompi_hf0379b8_106
- flit-core=3.6.0=pyhd3eb1b0_0
- freetype=2.12.1=h4a9f257_0
- fsspec=2023.1.0=pyhd8ed1ab_0
- geos=3.11.1=h27087fc_0
- giflib=5.2.1=h5eee18b_1
- gmp=6.2.1=h295c915_3
- gnutls=3.6.15=he1e5248_0
- hdf4=4.2.15=h9772cbc_5
- hdf5=1.12.2=nompi_h2386368_101
- heapdict=1.0.1=py_0
- idna=3.4=py38h06a4308_0
- importlib-metadata=6.0.0=pyha770c72_0
- intel-openmp=2021.4.0=h06a4308_3561
- jinja2=3.1.2=pyhd8ed1ab_1
- jpeg=9e=h7f8727e_0
- keyutils=1.6.1=h166bdaf_0
- krb5=1.20.1=hf9c8cef_0
- lame=3.100=h7b6447c_0
- lcms2=2.12=h3be6417_0
- ld_impl_linux-64=2.38=h1181459_1
- lerc=3.0=h295c915_0
- libaec=1.0.6=hcb278e6_1
- libcurl=7.87.0=h6312ad2_0
- libdeflate=1.8=h7f8727e_5
- libedit=3.1.20191231=he28a2e2_2
- libev=4.33=h516909a_1
- libffi=3.4.2=h6a678d5_6
- libgcc-ng=12.2.0=h65d4601_19
- libgfortran-ng=12.2.0=h69a702a_19
- libgfortran5=12.2.0=h337968e_19
- libiconv=1.16=h7f8727e_2
- libidn2=2.3.2=h7f8727e_0
- libllvm11=11.1.0=he0ac6c6_5
- libnetcdf=4.8.1=nompi_h21705cb_104
- libnghttp2=1.51.0=hdcd2b5c_0
- libpng=1.6.37=hbc83047_0
- libssh2=1.10.0=haa6b8db_3
- libstdcxx-ng=12.2.0=h46fd767_19
- libtasn1=4.16.0=h27cfd23_0
- libtiff=4.5.0=h6a678d5_1
- libunistring=0.9.10=h27cfd23_0
- libwebp=1.2.4=h11a3e52_0
- libwebp-base=1.2.4=h5eee18b_0
- libzip=1.9.2=hc869a4a_1
- libzlib=1.2.13=h166bdaf_4
- llvm-openmp=15.0.7=h0cdce71_0
- llvmlite=0.39.1=py38h38d86a4_1
- locket=1.0.0=pyhd8ed1ab_0
- lz4=4.2.0=py38hd012fdc_0
- lz4-c=1.9.4=h6a678d5_0
- markupsafe=2.1.2=py38h1de0b5d_0
- mkl=2021.4.0=h06a4308_640
- mkl-service=2.4.0=py38h7f8727e_0
- mkl_fft=1.3.1=py38hd3c417c_0
- mkl_random=1.2.2=py38h51133e4_0
- msgpack-python=1.0.4=py38h43d8883_1
- ncurses=6.4=h6a678d5_0
- netcdf-fortran=4.6.0=nompi_he1eeb6f_102
- netcdf4=1.6.2=nompi_py38h2250339_100
- nettle=3.7.3=hbbd107a_1
- numba=0.56.4=py38h9a4aae9_0
- numcodecs=0.11.0=py38h8dc9893_1
- numpy=1.23.5=py38h14f4228_0
- numpy-base=1.23.5=py38h31eccc5_0
- openh264=2.1.1=h4ff587b_0
- openssl=1.1.1s=h0b41bf4_1
- packaging=23.0=pyhd8ed1ab_0
- pandas=1.5.3=py38hdc8b05c_0
- partd=1.3.0=pyhd8ed1ab_0
- pillow=9.3.0=py38h6a678d5_2
- pip=22.3.1=py38h06a4308_0
- pooch=1.6.0=pyhd8ed1ab_0
- portalocker=2.3.0=py38h06a4308_0
- psutil=5.9.4=py38h0a891b7_0
- pycparser=2.21=pyhd3eb1b0_0
- pyopenssl=22.0.0=pyhd3eb1b0_0
- pysocks=1.7.1=py38h06a4308_0
- python=3.8.16=h7a1cb2a_2
- python-dateutil=2.8.2=pyhd8ed1ab_0
- python_abi=3.8=2_cp38
- pytorch=1.12.1=py3.8_cuda11.3_cudnn8.3.2_0
- pytorch-mutex=1.0=cuda
- pytz=2022.7.1=pyhd8ed1ab_0
- pyyaml=6.0=py38h0a891b7_5
- readline=8.2=h5eee18b_0
- requests=2.28.1=py38h06a4308_0
- scipy=1.10.0=py38h14f4228_0
- setuptools=65.6.3=py38h06a4308_0
- shapely=1.8.5=py38hafd38ec_2
- six=1.16.0=pyhd3eb1b0_1
- sortedcontainers=2.4.0=pyhd8ed1ab_0
- sparse=0.13.0=pyhd8ed1ab_0
- sqlite=3.40.1=h5082296_0
- tblib=1.7.0=pyhd8ed1ab_0
- tk=8.6.12=h1ccaba5_0
- toolz=0.12.0=pyhd8ed1ab_0
- torchaudio=0.12.1=py38_cu113
- torchdata=0.4.1=py38
- torchvision=0.13.1=py38_cu113
- tornado=6.2=py38h0a891b7_1
- typing_extensions=4.4.0=py38h06a4308_0
- urllib3=1.26.14=py38h06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xarray=2023.1.0=pyhd8ed1ab_0
- xesmf=0.7.0=pyhd8ed1ab_0
- xz=5.2.10=h5eee18b_1
- yaml=0.2.5=h7f98852_2
- zarr=2.13.6=pyhd8ed1ab_0
- zict=2.2.0=pyhd8ed1ab_0
- zipp=3.12.1=pyhd8ed1ab_0
- zlib=1.2.13=h166bdaf_4
- zstd=1.5.2=ha4553b6_0
- pip:
- absl-py==1.4.0
- aiohttp==3.8.3
- aiosignal==1.3.1
- antlr4-python3-runtime==4.9.3
- async-timeout==4.0.2
- attrs==22.2.0
- cachetools==5.3.0
- docstring-parser==0.15
- einops==0.6.0
- filelock==3.9.0
- fire==0.5.0
- frozenlist==1.3.3
- google-auth==2.16.0
- google-auth-oauthlib==0.4.6
- grpcio==1.52.0
- huggingface-hub==0.12.0
- importlib-resources==5.10.2
- jsonargparse==4.19.0
- lightning-lite==1.8.0
- lightning-utilities==0.3.0
- markdown==3.4.1
- multidict==6.0.4
- oauthlib==3.2.2
- omegaconf==2.3.0
- protobuf==3.20.3
- pyasn1==0.4.8
- pyasn1-modules==0.2.8
- pytorch-lightning==1.8.0
- requests-oauthlib==1.3.1
- rsa==4.9
- tensorboard==2.11.2
- tensorboard-data-server==0.6.1
- tensorboard-plugin-wit==1.8.1
- termcolor==2.2.0
- timm==0.6.12
- torchmetrics==0.11.1
- tqdm==4.64.1
- typeshed-client==2.2.0
- werkzeug==2.2.2
- yarl==1.8.2
|
ClimaX/docker/environment.yml/0
|
{
"file_path": "ClimaX/docker/environment.yml",
"repo_id": "ClimaX",
"token_count": 3683
}
| 207 |
# Usage
## Pretraining
### Data Preparation
First install `snakemake` following [these instructions](https://snakemake.readthedocs.io/en/stable/getting_started/installation.html)
To download and regrid a CMIP6 dataset to a common resolution (e.g., 1.406525 degree), go to the corresponding directory inside `snakemake_configs` and run
```bash
snakemake all --configfile config_2m_temperature.yml --cores 8
```
This script will download and regrid the `2m_temperature` data in parallel using 8 CPU cores. Modify `configfile` for other variables. After downloading and regrdding, run the following script to preprocess the `.nc` files into `.npz` format for pretraining ClimaX
```bash
python src/data_preprocessing/nc2np_equally_cmip6.py \
--dataset mpi
--path /data/CMIP6/MPI-ESM/1.40625deg/
--num_shards 10
--save_dir /data/CMIP6/MPI-ESM/1.40625deg_np_10shards
```
in which `num_shards` denotes the number of chunks to break each `.nc` file into.
### Training
```
python src/climax/pretrain/train.py --config <path/to/config>
```
For example, to pretrain ClimaX on MPI-ESM dataset on 8 GPUs use
```bash
python src/climax/pretrain/train.py --config configs/pretrain_climax.yaml \
--trainer.strategy=ddp --trainer.devices=8 \
--trainer.max_epochs=100 \
--data.batch_size=16 \
--model.lr=5e-4 --model.beta_1="0.9" --model.beta_2="0.95" \
--model.weight_decay=1e-5
```
!!! tip
Make sure to update the paths of the data directories in the config files (or override them via the CLI).
### Pretrained checkpoints
We provide two pretrained checkpoints, one was pretrained on [5.625deg](https://huggingface.co/tungnd/climax/resolve/main/5.625deg.ckpt) data, and the other was pretrained on [1.40625deg](https://huggingface.co/tungnd/climax/resolve/main/1.40625deg.ckpt) data. Both checkpoints were pretrained using all 5 CMIP6 datasets.
**Usage:** We can load the checkpoint by passing the checkpoint url to the training script. See below for examples.
## Global Forecasting
### Data Preparation
First, download ERA5 data from [WeatherBench](https://dataserv.ub.tum.de/index.php/s/m1524895). The data directory should look like the following
```
5.625deg
|-- 10m_u_component_of_wind
|-- 10m_v_component_of_wind
|-- 2m_temperature
|-- constants.nc
|-- geopotential
|-- relative_humidity
|-- specific_humidity
|-- temperature
|-- toa_incident_solar_radiation
|-- total_precipitation
|-- u_component_of_wind
|-- v_component_of_wind
```
Then, preprocess the netcdf data into small numpy files and compute important statistics
```bash
python src/data_preprocessing/nc2np_equally_era5.py \
--root_dir /mnt/data/5.625deg \
--save_dir /mnt/data/5.625deg_npz \
--start_train_year 1979 --start_val_year 2016 \
--start_test_year 2017 --end_year 2019 --num_shards 8
```
The preprocessed data directory will look like the following
```
5.625deg_npz
|-- train
|-- val
|-- test
|-- normalize_mean.npz
|-- normalize_std.npz
|-- lat.npy
|-- lon.npy
```
### Training
To finetune ClimaX for global forecasting, use
```
python src/climax/global_forecast/train.py --config <path/to/config>
```
For example, to finetune ClimaX on 8 GPUs use
```bash
python src/climax/global_forecast/train.py --config configs/global_forecast_climax.yaml \
--trainer.strategy=ddp --trainer.devices=8 \
--trainer.max_epochs=50 \
--data.root_dir=/mnt/data/5.625deg_npz \
--data.predict_range=72 --data.out_variables=['z_500','t_850','t2m'] \
--data.batch_size=16 \
--model.pretrained_path='https://huggingface.co/tungnd/climax/resolve/main/5.625deg.ckpt' \
--model.lr=5e-7 --model.beta_1="0.9" --model.beta_2="0.99" \
--model.weight_decay=1e-5
```
To train ClimaX from scratch, set `--model.pretrained_path=""`.
## Regional Forecasting
### Data Preparation
We use the same ERA5 data as in global forecasting and extract the regional data on the fly during training. If you have already downloaded and preprocessed the data, you do not have to do it again.
### Training
To finetune ClimaX for regional forecasting, use
```
python src/climax/regional_forecast/train.py --config <path/to/config>
```
For example, to finetune ClimaX on North America using 8 GPUs, use
```bash
python src/climax/regional_forecast/train.py --config configs/regional_forecast_climax.yaml \
--trainer.strategy=ddp --trainer.devices=8 \
--trainer.max_epochs=50 \
--data.root_dir=/mnt/data/5.625deg_npz \
--data.region="NorthAmerica"
--data.predict_range=72 --data.out_variables=['z_500','t_850','t2m'] \
--data.batch_size=16 \
--model.pretrained_path='https://huggingface.co/tungnd/climax/resolve/main/1.40625deg.ckpt' \
--model.lr=5e-7 --model.beta_1="0.9" --model.beta_2="0.99" \
--model.weight_decay=1e-5
```
To train ClimaX from scratch, set `--model.pretrained_path=""`.
## Climate Projection
### Data Preparation
First, download [ClimateBench](https://doi.org/10.5281/zenodo.5196512) data. ClimaX can work with either the original ClimateBench data or the regridded version. In the experiment in the paper, we regridded to ClimateBench data to 5.625 degree. To do that, run
```bash
python src/data_preprocessing/regrid_climatebench.py /mnt/data/climatebench/train_val \
--save_path /mnt/data/climatebench/5.625deg/train_val --ddeg_out 5.625
```
and
```bash
python src/data_preprocessing/regrid_climatebench.py /mnt/data/climatebench/test \
--save_path /mnt/data/climatebench/5.625deg/test --ddeg_out 5.625
```
### Training
To finetune ClimaX for climate projection, use
```
python src/climax/climate_projection/train.py --config <path/to/config>
```
For example, to finetune ClimaX on 8 GPUs use
```bash
python python src/climax/climate_projection/train.py --config configs/climate_projection.yaml \
--trainer.strategy=ddp --trainer.devices=8 \
--trainer.max_epochs=50 \
--data.root_dir=/mnt/data/climatebench/5.625deg \
--data.out_variables="tas" \
--data.batch_size=16 \
--model.pretrained_path='https://huggingface.co/tungnd/climax/resolve/main/5.625deg.ckpt' \
--model.out_vars="tas" \
--model.lr=5e-4 --model.beta_1="0.9" --model.beta_2="0.99" \
--model.weight_decay=1e-5
```
To train ClimaX from scratch, set `--model.pretrained_path=""`.
## Visualization
Coming soon
|
ClimaX/docs/usage.md/0
|
{
"file_path": "ClimaX/docs/usage.md",
"repo_id": "ClimaX",
"token_count": 2377
}
| 208 |
datadir: /data/CMIP6/CMCC
name: v_component_of_wind
cmip_name: va
era_name: v
run: r1i1p1f1
res:
- 1.40625
# - 5.625
|
ClimaX/snakemake_configs/CMCC/config_v_component_of_wind.yml/0
|
{
"file_path": "ClimaX/snakemake_configs/CMCC/config_v_component_of_wind.yml",
"repo_id": "ClimaX",
"token_count": 64
}
| 209 |
datadir: /data/CMIP6/MPI-ESM
server_prefix: http://esgf-data1.llnl.gov/thredds/fileServer/css03_data/CMIP6/CMIP
name: temperature
cmip_name: ta
era_name: t
output_type: 6hrPlevPt
run: r1i1p1f1
version: v20190815
res:
- 1.40625
# - 5.625
|
ClimaX/snakemake_configs/MPI-ESM/config_temperature.yml/0
|
{
"file_path": "ClimaX/snakemake_configs/MPI-ESM/config_temperature.yml",
"repo_id": "ClimaX",
"token_count": 116
}
| 210 |
from typing import Any, Dict
import numpy as np
import torch
from pytorch_lightning import LightningModule
from climax.climate_projection.arch import ClimaXClimateBench
from climax.utils.lr_scheduler import LinearWarmupCosineAnnealingLR
from climax.utils.metrics import (
mse,
lat_weighted_mse_val,
lat_weighted_nrmse,
lat_weighted_rmse,
)
from climax.utils.pos_embed import interpolate_pos_embed
from torchvision.transforms import transforms
class ClimateProjectionModule(LightningModule):
"""Lightning module for climate projection with the ClimaXClimateBench model.
Args:
net (ClimaXClimateBench): ClimaXClimateBench model.
pretrained_path (str, optional): Path to pre-trained checkpoint.
lr (float, optional): Learning rate.
beta_1 (float, optional): Beta 1 for AdamW.
beta_2 (float, optional): Beta 2 for AdamW.
weight_decay (float, optional): Weight decay for AdamW.
warmup_epochs (int, optional): Number of warmup epochs.
max_epochs (int, optional): Number of total epochs.
warmup_start_lr (float, optional): Starting learning rate for warmup.
eta_min (float, optional): Minimum learning rate.
"""
def __init__(
self,
net: ClimaXClimateBench,
pretrained_path: str = "",
lr: float = 5e-4,
beta_1: float = 0.9,
beta_2: float = 0.99,
weight_decay: float = 1e-5,
warmup_epochs: int = 60,
max_epochs: int = 600,
warmup_start_lr: float = 1e-8,
eta_min: float = 1e-8,
):
super().__init__()
self.save_hyperparameters(logger=False, ignore=["net"])
self.net = net
if len(pretrained_path) > 0:
self.load_mae_weights(pretrained_path)
def load_mae_weights(self, pretrained_path):
if pretrained_path.startswith("http"):
checkpoint = torch.hub.load_state_dict_from_url(pretrained_path, map_location=torch.device("cpu"))
else:
checkpoint = torch.load(pretrained_path, map_location=torch.device("cpu"))
print("Loading pre-trained checkpoint from: %s" % pretrained_path)
checkpoint_model = checkpoint["state_dict"]
# interpolate positional embedding
interpolate_pos_embed(self.net, checkpoint_model, new_size=self.net.img_size)
state_dict = self.state_dict()
if self.net.parallel_patch_embed:
if "token_embeds.proj_weights" not in checkpoint_model.keys():
raise ValueError(
"Pretrained checkpoint does not have token_embeds.proj_weights for parallel processing. Please convert the checkpoints first or disable parallel patch_embed tokenization."
)
for k in list(checkpoint_model.keys()):
if "channel" in k:
checkpoint_model[k.replace("channel", "var")] = checkpoint_model[k]
del checkpoint_model[k]
if 'token_embeds' in k or 'head' in k: # initialize embedding from scratch
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
continue
for k in list(checkpoint_model.keys()):
if k not in state_dict.keys() or checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
# load pre-trained model
msg = self.load_state_dict(checkpoint_model, strict=False)
print(msg)
def set_denormalization(self, mean, std):
self.denormalization = transforms.Normalize(mean, std)
def set_lat_lon(self, lat, lon):
self.lat = lat
self.lon = lon
def set_pred_range(self, r):
self.pred_range = r
def set_val_clim(self, clim):
self.val_clim = clim
def set_test_clim(self, clim):
self.test_clim = clim
def training_step(self, batch: Any, batch_idx: int):
x, y, lead_times, variables, out_variables = batch
loss_dict, _ = self.net.forward(x, y, lead_times, variables, out_variables, [mse], lat=self.lat)
loss_dict = loss_dict[0]
for var in loss_dict.keys():
self.log(
"train/" + var,
loss_dict[var],
on_step=True,
on_epoch=False,
prog_bar=True,
)
loss = loss_dict['loss']
return loss
def validation_step(self, batch: Any, batch_idx: int):
x, y, lead_times, variables, out_variables = batch
all_loss_dicts = self.net.evaluate(
x,
y,
lead_times,
variables,
out_variables,
transform=self.denormalization,
metrics=[lat_weighted_mse_val, lat_weighted_rmse],
lat=self.lat,
clim=self.val_clim,
log_postfix=None
)
loss_dict = {}
for d in all_loss_dicts:
for k in d.keys():
loss_dict[k] = d[k]
for var in loss_dict.keys():
self.log(
"val/" + var,
loss_dict[var],
on_step=False,
on_epoch=True,
prog_bar=False,
sync_dist=True,
)
return loss_dict
def test_step(self, batch: Any, batch_idx: int):
x, y, lead_times, variables, out_variables = batch
all_loss_dicts = self.net.evaluate(
x,
y,
lead_times,
variables,
out_variables,
transform=self.denormalization,
metrics=[lat_weighted_mse_val, lat_weighted_rmse, lat_weighted_nrmse],
lat=self.lat,
clim=self.test_clim,
log_postfix=None
)
loss_dict = {}
for d in all_loss_dicts:
for k in d.keys():
loss_dict[k] = d[k]
for var in loss_dict.keys():
self.log(
"test/" + var,
loss_dict[var],
on_step=False,
on_epoch=True,
prog_bar=False,
sync_dist=True,
)
return loss_dict
def configure_optimizers(self):
decay = []
no_decay = []
for name, m in self.named_parameters():
if "var_embed" in name or "pos_embed" in name or "time_pos_embed" in name:
no_decay.append(m)
else:
decay.append(m)
optimizer = torch.optim.AdamW(
[
{
"params": decay,
"lr": self.hparams.lr,
"betas": (self.hparams.beta_1, self.hparams.beta_2),
"weight_decay": self.hparams.weight_decay,
},
{
"params": no_decay,
"lr": self.hparams.lr,
"betas": (self.hparams.beta_1, self.hparams.beta_2),
"weight_decay": 0
},
]
)
lr_scheduler = LinearWarmupCosineAnnealingLR(
optimizer,
self.hparams.warmup_epochs,
self.hparams.max_epochs,
self.hparams.warmup_start_lr,
self.hparams.eta_min,
)
scheduler = {"scheduler": lr_scheduler, "interval": "step", "frequency": 1}
return {"optimizer": optimizer, "lr_scheduler": scheduler}
|
ClimaX/src/climax/climate_projection/module.py/0
|
{
"file_path": "ClimaX/src/climax/climate_projection/module.py",
"repo_id": "ClimaX",
"token_count": 3803
}
| 211 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
from climax.regional_forecast.datamodule import RegionalForecastDataModule
from climax.regional_forecast.module import RegionalForecastModule
from pytorch_lightning.cli import LightningCLI
def main():
# Initialize Lightning with the model and data modules, and instruct it to parse the config yml
cli = LightningCLI(
model_class=RegionalForecastModule,
datamodule_class=RegionalForecastDataModule,
seed_everything_default=42,
save_config_overwrite=True,
run=False,
auto_registry=True,
parser_kwargs={"parser_mode": "omegaconf", "error_handler": None},
)
os.makedirs(cli.trainer.default_root_dir, exist_ok=True)
cli.datamodule.set_patch_size(cli.model.get_patch_size())
normalization = cli.datamodule.output_transforms
mean_norm, std_norm = normalization.mean, normalization.std
mean_denorm, std_denorm = -mean_norm / std_norm, 1 / std_norm
cli.model.set_denormalization(mean_denorm, std_denorm)
cli.model.set_lat_lon(*cli.datamodule.get_lat_lon())
cli.model.set_pred_range(cli.datamodule.hparams.predict_range)
cli.model.set_val_clim(cli.datamodule.val_clim)
cli.model.set_test_clim(cli.datamodule.test_clim)
# fit() runs the training
cli.trainer.fit(cli.model, datamodule=cli.datamodule)
# test the trained model
cli.trainer.test(cli.model, datamodule=cli.datamodule, ckpt_path="best")
if __name__ == "__main__":
main()
|
ClimaX/src/climax/regional_forecast/train.py/0
|
{
"file_path": "ClimaX/src/climax/regional_forecast/train.py",
"repo_id": "ClimaX",
"token_count": 611
}
| 212 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
import torch.nn.functional as F
import models.networks as networks
import util.util as util
import itertools
try:
from torch.cuda.amp import autocast
except:
# dummy autocast for PyTorch < 1.6
class autocast:
def __init__(self, enabled):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
class Pix2PixModel(torch.nn.Module):
@staticmethod
def modify_commandline_options(parser, is_train):
networks.modify_commandline_options(parser, is_train)
return parser
def __init__(self, opt):
super().__init__()
self.opt = opt
self.FloatTensor = torch.cuda.FloatTensor if self.use_gpu() \
else torch.FloatTensor
self.ByteTensor = torch.cuda.ByteTensor if self.use_gpu() \
else torch.ByteTensor
self.net = torch.nn.ModuleDict(self.initialize_networks(opt))
# set loss functions
if opt.isTrain:
# vgg network
self.vggnet_fix = networks.architecture.VGG19_feature_color_torchversion(vgg_normal_correct=opt.vgg_normal_correct)
self.vggnet_fix.load_state_dict(torch.load('vgg/vgg19_conv.pth'))
self.vggnet_fix.eval()
for param in self.vggnet_fix.parameters():
param.requires_grad = False
self.vggnet_fix.to(self.opt.gpu_ids[0])
# contextual loss
self.contextual_forward_loss = networks.ContextualLoss_forward(opt)
# GAN loss
self.criterionGAN = networks.GANLoss(opt.gan_mode, tensor=self.FloatTensor, opt=self.opt)
# L1 loss
self.criterionFeat = torch.nn.L1Loss()
# L2 loss
self.MSE_loss = torch.nn.MSELoss()
# setting which layer is used in the perceptual loss
if opt.which_perceptual == '5_2':
self.perceptual_layer = -1
elif opt.which_perceptual == '4_2':
self.perceptual_layer = -2
def forward(self, data, mode, GforD=None):
input_label, input_semantics, real_image, self_ref, ref_image, ref_label, ref_semantics = self.preprocess_input(data, )
generated_out = {}
if mode == 'generator':
g_loss, generated_out = self.compute_generator_loss(input_label, \
input_semantics, real_image, ref_label, \
ref_semantics, ref_image, self_ref)
out = {}
out['fake_image'] = generated_out['fake_image']
out['input_semantics'] = input_semantics
out['ref_semantics'] = ref_semantics
out['warp_out'] = None if 'warp_out' not in generated_out else generated_out['warp_out']
out['adaptive_feature_seg'] = None if 'adaptive_feature_seg' not in generated_out else generated_out['adaptive_feature_seg']
out['adaptive_feature_img'] = None if 'adaptive_feature_img' not in generated_out else generated_out['adaptive_feature_img']
out['warp_cycle'] = None if 'warp_cycle' not in generated_out else generated_out['warp_cycle']
return g_loss, out
elif mode == 'discriminator':
d_loss = self.compute_discriminator_loss(input_semantics, \
real_image, GforD, label=input_label)
return d_loss
elif mode == 'inference':
out = {}
with torch.no_grad():
out = self.inference(input_semantics, ref_semantics=ref_semantics, \
ref_image=ref_image, self_ref=self_ref, \
real_image=real_image)
out['input_semantics'] = input_semantics
out['ref_semantics'] = ref_semantics
return out
else:
raise ValueError("|mode| is invalid")
def create_optimizers(self, opt):
if opt.no_TTUR:
beta1, beta2 = opt.beta1, opt.beta2
G_lr, D_lr = opt.lr, opt.lr
else:
beta1, beta2 = 0, 0.9
G_lr, D_lr = opt.lr / 2, opt.lr * 2
optimizer_G = torch.optim.Adam(itertools.chain(self.net['netG'].parameters(), \
self.net['netCorr'].parameters()), lr=G_lr, betas=(beta1, beta2), eps=1e-3)
optimizer_D = torch.optim.Adam(itertools.chain(self.net['netD'].parameters()), \
lr=D_lr, betas=(beta1, beta2))
return optimizer_G, optimizer_D
def save(self, epoch):
util.save_network(self.net['netG'], 'G', epoch, self.opt)
util.save_network(self.net['netD'], 'D', epoch, self.opt)
util.save_network(self.net['netCorr'], 'Corr', epoch, self.opt)
def initialize_networks(self, opt):
net = {}
net['netG'] = networks.define_G(opt)
net['netD'] = networks.define_D(opt) if opt.isTrain else None
net['netCorr'] = networks.define_Corr(opt)
if not opt.isTrain or opt.continue_train:
net['netCorr'] = util.load_network(net['netCorr'], 'Corr', opt.which_epoch, opt)
net['netG'] = util.load_network(net['netG'], 'G', opt.which_epoch, opt)
if opt.isTrain:
net['netD'] = util.load_network(net['netD'], 'D', opt.which_epoch, opt)
return net
def preprocess_input(self, data):
if self.use_gpu():
for k in data.keys():
try:
data[k] = data[k].cuda()
except:
continue
label = data['label'][:,:3,:,:].float()
label_ref = data['label_ref'][:,:3,:,:].float()
input_semantics = data['label'].float()
ref_semantics = data['label_ref'].float()
image = data['image']
ref = data['ref']
self_ref = data['self_ref']
return label, input_semantics, image, self_ref, ref, label_ref, ref_semantics
def get_ctx_loss(self, source, target):
contextual_style5_1 = torch.mean(self.contextual_forward_loss(source[-1], target[-1].detach())) * 8
contextual_style4_1 = torch.mean(self.contextual_forward_loss(source[-2], target[-2].detach())) * 4
contextual_style3_1 = torch.mean(self.contextual_forward_loss(F.avg_pool2d(source[-3], 2), F.avg_pool2d(target[-3].detach(), 2))) * 2
return contextual_style5_1 + contextual_style4_1 + contextual_style3_1
def compute_generator_loss(self, input_label, input_semantics, real_image, ref_label=None, ref_semantics=None, ref_image=None, self_ref=None):
G_losses = {}
generate_out = self.generate_fake(input_semantics, real_image, ref_semantics=ref_semantics, ref_image=ref_image, self_ref=self_ref)
generate_out['fake_image'] = generate_out['fake_image'].float()
weights = [1.0 / 32, 1.0 / 16, 1.0 / 8, 1.0 / 4, 1.0]
sample_weights = self_ref/(sum(self_ref)+1e-5)
sample_weights = sample_weights.view(-1, 1, 1, 1)
"""domain align"""
if 'loss_novgg_featpair' in generate_out and generate_out['loss_novgg_featpair'] is not None:
G_losses['no_vgg_feat'] = generate_out['loss_novgg_featpair']
"""warping cycle"""
if self.opt.weight_warp_cycle > 0:
warp_cycle = generate_out['warp_cycle']
scale_factor = ref_image.size()[-1] // warp_cycle.size()[-1]
ref = F.avg_pool2d(ref_image, scale_factor, stride=scale_factor)
G_losses['G_warp_cycle'] = F.l1_loss(warp_cycle, ref) * self.opt.weight_warp_cycle
"""warping loss"""
if self.opt.weight_warp_self > 0:
"""512x512"""
warp1, warp2, warp3, warp4 = generate_out['warp_out']
G_losses['G_warp_self'] = \
torch.mean(F.l1_loss(warp4, real_image, reduction='none') * sample_weights) * self.opt.weight_warp_self * 1.0 + \
torch.mean(F.l1_loss(warp3, F.avg_pool2d(real_image, 2, stride=2), reduction='none') * sample_weights) * self.opt.weight_warp_self * 1.0 + \
torch.mean(F.l1_loss(warp2, F.avg_pool2d(real_image, 4, stride=4), reduction='none') * sample_weights) * self.opt.weight_warp_self * 1.0 + \
torch.mean(F.l1_loss(warp1, F.avg_pool2d(real_image, 8, stride=8), reduction='none') * sample_weights) * self.opt.weight_warp_self * 1.0
"""gan loss"""
pred_fake, pred_real = self.discriminate(input_semantics, generate_out['fake_image'], real_image)
G_losses['GAN'] = self.criterionGAN(pred_fake, True, for_discriminator=False) * self.opt.weight_gan
if not self.opt.no_ganFeat_loss:
num_D = len(pred_fake)
GAN_Feat_loss = 0.0
for i in range(num_D):
# for each discriminator
# last output is the final prediction, so we exclude it
num_intermediate_outputs = len(pred_fake[i]) - 1
for j in range(num_intermediate_outputs):
# for each layer output
unweighted_loss = self.criterionFeat(pred_fake[i][j], pred_real[i][j].detach())
GAN_Feat_loss += unweighted_loss * self.opt.weight_ganFeat / num_D
G_losses['GAN_Feat'] = GAN_Feat_loss
"""feature matching loss"""
fake_features = self.vggnet_fix(generate_out['fake_image'], ['r12', 'r22', 'r32', 'r42', 'r52'], preprocess=True)
loss = 0
for i in range(len(generate_out['real_features'])):
loss += weights[i] * util.weighted_l1_loss(fake_features[i], generate_out['real_features'][i].detach(), sample_weights)
G_losses['fm'] = loss * self.opt.weight_vgg * self.opt.weight_fm_ratio
"""perceptual loss"""
feat_loss = util.mse_loss(fake_features[self.perceptual_layer], generate_out['real_features'][self.perceptual_layer].detach())
G_losses['perc'] = feat_loss * self.opt.weight_perceptual
"""contextual loss"""
G_losses['contextual'] = self.get_ctx_loss(fake_features, generate_out['ref_features']) * self.opt.weight_vgg * self.opt.weight_contextual
return G_losses, generate_out
def compute_discriminator_loss(self, input_semantics, real_image, GforD, label=None):
D_losses = {}
with torch.no_grad():
fake_image = GforD['fake_image'].detach()
fake_image.requires_grad_()
pred_fake, pred_real = self.discriminate(input_semantics, fake_image, real_image)
D_losses['D_Fake'] = self.criterionGAN(pred_fake, False, for_discriminator=True) * self.opt.weight_gan
D_losses['D_real'] = self.criterionGAN(pred_real, True, for_discriminator=True) * self.opt.weight_gan
return D_losses
def encode_z(self, real_image):
mu, logvar = self.net['netE'](real_image)
z = self.reparameterize(mu, logvar)
return z, mu, logvar
def generate_fake(self, input_semantics, real_image, ref_semantics=None, ref_image=None, self_ref=None):
generate_out = {}
generate_out['ref_features'] = self.vggnet_fix(ref_image, ['r12', 'r22', 'r32', 'r42', 'r52'], preprocess=True)
generate_out['real_features'] = self.vggnet_fix(real_image, ['r12', 'r22', 'r32', 'r42', 'r52'], preprocess=True)
with autocast(enabled=self.opt.amp):
corr_out = self.net['netCorr'](ref_image, real_image, input_semantics, ref_semantics)
generate_out['fake_image'] = self.net['netG'](input_semantics, warp_out=corr_out['warp_out'])
generate_out = {**generate_out, **corr_out}
return generate_out
def inference(self, input_semantics, ref_semantics=None, ref_image=None, self_ref=None, real_image=None):
generate_out = {}
with autocast(enabled=self.opt.amp):
corr_out = self.net['netCorr'](ref_image, real_image, input_semantics, ref_semantics)
generate_out['fake_image'] = self.net['netG'](input_semantics, warp_out=corr_out['warp_out'])
generate_out = {**generate_out, **corr_out}
return generate_out
def discriminate(self, input_semantics, fake_image, real_image):
fake_concat = torch.cat([input_semantics, fake_image], dim=1)
real_concat = torch.cat([input_semantics, real_image], dim=1)
fake_and_real = torch.cat([fake_concat, real_concat], dim=0)
with autocast(enabled=self.opt.amp):
discriminator_out = self.net['netD'](fake_and_real)
pred_fake, pred_real = self.divide_pred(discriminator_out)
return pred_fake, pred_real
def divide_pred(self, pred):
if type(pred) == list:
fake = []
real = []
for p in pred:
fake.append([tensor[:tensor.size(0) // 2] for tensor in p])
real.append([tensor[tensor.size(0) // 2:] for tensor in p])
else:
fake = pred[:pred.size(0) // 2]
real = pred[pred.size(0) // 2:]
return fake, real
def use_gpu(self):
return len(self.opt.gpu_ids) > 0
|
CoCosNet-v2/models/pix2pix_model.py/0
|
{
"file_path": "CoCosNet-v2/models/pix2pix_model.py",
"repo_id": "CoCosNet-v2",
"token_count": 6207
}
| 213 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import torch
import numpy as np
from PIL import Image
from data.pix2pix_dataset import Pix2pixDataset
from data.base_dataset import get_params, get_transform
class CelebAHQDataset(Pix2pixDataset):
#hair, skin, l_brow, r_blow, l_eye, r_eye, l_ear, r_ear, nose, u_lip, mouth, l_lip, neck,
#cloth, hat, eye_g, ear_r, neck_l
@staticmethod
def modify_commandline_options(parser, is_train):
parser = Pix2pixDataset.modify_commandline_options(parser, is_train)
parser.set_defaults(preprocess_mode='resize_and_crop')
parser.set_defaults(no_pairing_check=True)
if is_train:
parser.set_defaults(load_size=286)
else:
parser.set_defaults(load_size=256)
parser.set_defaults(crop_size=256)
parser.set_defaults(display_winsize=256)
parser.set_defaults(label_nc=19)
parser.set_defaults(contain_dontcare_label=False)
parser.set_defaults(cache_filelist_read=False)
parser.set_defaults(cache_filelist_write=False)
return parser
def get_paths(self, opt):
if opt.phase == 'train':
fd = open(os.path.join(opt.dataroot, 'train.txt'))
lines = fd.readlines()
fd.close()
elif opt.phase == 'test':
fd = open(os.path.join(opt.dataroot, 'val.txt'))
lines = fd.readlines()
fd.close()
image_paths = []
label_paths = []
for i in range(len(lines)):
image_paths.append(os.path.join(opt.dataroot, 'CelebA-HQ-img', lines[i].strip() + '.jpg'))
label_paths.append(os.path.join(opt.dataroot, 'CelebAMask-HQ-mask-anno', 'all_parts_except_glasses', lines[i].strip().zfill(5) + '.png'))
return label_paths, image_paths
def get_ref(self, opt):
extra = ''
if opt.phase == 'test':
extra = '_test'
with open('./data/celebahq_ref{}.txt'.format(extra)) as fd:
lines = fd.readlines()
ref_dict = {}
for i in range(len(lines)):
items = lines[i].strip().split(',')
key = items[0]
if opt.phase == 'test':
val = items[1:]
else:
val = [items[1], items[-1]]
ref_dict[key] = val
train_test_folder = ('', '')
return ref_dict, train_test_folder
def get_label_tensor(self, path):
# parts = ['skin', 'hair', 'l_brow', 'r_brow', 'l_eye', 'r_eye', 'l_ear', 'r_ear', 'nose', 'u_lip', 'mouth', 'l_lip', 'neck',
# 'cloth', 'hat', 'eye_g', 'ear_r', 'neck_l']
label_except_glasses = Image.open(path).convert('L')
root, name = path.replace('\\', '/').split('all_parts_except_glasses/')
idx = name.split('.')[0]
subfolder = str(int(idx) // 2000)
if os.path.exists(os.path.join(root, subfolder, idx + '_eye_g.png')):
glasses = Image.open(os.path.join(root, subfolder, idx + '_eye_g.png')).convert('L')
else:
glasses = Image.fromarray(np.zeros(label_except_glasses.size, dtype=np.uint8))
params = get_params(self.opt, label_except_glasses.size)
transform_label = get_transform(self.opt, params, method=Image.NEAREST, normalize=False)
label_except_glasses_tensor = transform_label(label_except_glasses) * 255.0
glasses_tensor = transform_label(glasses)
label_tensor = torch.cat((label_except_glasses_tensor, glasses_tensor), dim=0)
return label_tensor, params
def imgpath_to_labelpath(self, path):
root, name = path.split('CelebA-HQ-img/')
label_path = os.path.join(root, 'CelebAMask-HQ-mask-anno', 'all_parts_except_glasses', name.split('.')[0].zfill(5) + '.png')
return label_path
|
CoCosNet/data/celebahq_dataset.py/0
|
{
"file_path": "CoCosNet/data/celebahq_dataset.py",
"repo_id": "CoCosNet",
"token_count": 1839
}
| 214 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.networks.base_network import BaseNetwork
from models.networks.generator import AdaptiveFeatureGenerator, DomainClassifier, ReverseLayerF
from util.util import vgg_preprocess
import util.util as util
class ResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, padding=1, stride=1):
super(ResidualBlock, self).__init__()
self.padding1 = nn.ReflectionPad2d(padding)
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=0, stride=stride)
self.bn1 = nn.InstanceNorm2d(out_channels)
self.prelu = nn.PReLU()
self.padding2 = nn.ReflectionPad2d(padding)
self.conv2 = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=0, stride=stride)
self.bn2 = nn.InstanceNorm2d(out_channels)
def forward(self, x):
residual = x
out = self.padding1(x)
out = self.conv1(out)
out = self.bn1(out)
out = self.prelu(out)
out = self.padding2(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.prelu(out)
return out
class WTA_scale(torch.autograd.Function):
"""
We can implement our own custom autograd Functions by subclassing
torch.autograd.Function and implementing the forward and backward passes
which operate on Tensors.
"""
@staticmethod
def forward(ctx, input, scale=1e-4):
"""
In the forward pass we receive a Tensor containing the input and return a
Tensor containing the output. You can cache arbitrary Tensors for use in the
backward pass using the save_for_backward method.
"""
activation_max, index_max = torch.max(input, -1, keepdim=True)
input_scale = input * scale # default: 1e-4
# input_scale = input * scale # default: 1e-4
output_max_scale = torch.where(input == activation_max, input, input_scale)
mask = (input == activation_max).type(torch.float)
ctx.save_for_backward(input, mask)
return output_max_scale
@staticmethod
def backward(ctx, grad_output):
"""
In the backward pass we receive a Tensor containing the gradient of the loss
with respect to the output, and we need to compute the gradient of the loss
with respect to the input.
"""
# import pdb
# pdb.set_trace()
input, mask = ctx.saved_tensors
mask_ones = torch.ones_like(mask)
mask_small_ones = torch.ones_like(mask) * 1e-4
# mask_small_ones = torch.ones_like(mask) * 1e-4
grad_scale = torch.where(mask == 1, mask_ones, mask_small_ones)
grad_input = grad_output.clone() * grad_scale
return grad_input, None
class VGG19_feature_color_torchversion(nn.Module):
'''
NOTE: there is no need to pre-process the input
input tensor should range in [0,1]
'''
def __init__(self, pool='max', vgg_normal_correct=False, ic=3):
super(VGG19_feature_color_torchversion, self).__init__()
self.vgg_normal_correct = vgg_normal_correct
self.conv1_1 = nn.Conv2d(ic, 64, kernel_size=3, padding=1)
self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1)
self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.conv3_4 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1)
self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv4_4 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_4 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
if pool == 'max':
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool5 = nn.MaxPool2d(kernel_size=2, stride=2)
elif pool == 'avg':
self.pool1 = nn.AvgPool2d(kernel_size=2, stride=2)
self.pool2 = nn.AvgPool2d(kernel_size=2, stride=2)
self.pool3 = nn.AvgPool2d(kernel_size=2, stride=2)
self.pool4 = nn.AvgPool2d(kernel_size=2, stride=2)
self.pool5 = nn.AvgPool2d(kernel_size=2, stride=2)
def forward(self, x, out_keys, preprocess=True):
'''
NOTE: input tensor should range in [0,1]
'''
out = {}
if preprocess:
x = vgg_preprocess(x, vgg_normal_correct=self.vgg_normal_correct)
out['r11'] = F.relu(self.conv1_1(x))
out['r12'] = F.relu(self.conv1_2(out['r11']))
out['p1'] = self.pool1(out['r12'])
out['r21'] = F.relu(self.conv2_1(out['p1']))
out['r22'] = F.relu(self.conv2_2(out['r21']))
out['p2'] = self.pool2(out['r22'])
out['r31'] = F.relu(self.conv3_1(out['p2']))
out['r32'] = F.relu(self.conv3_2(out['r31']))
out['r33'] = F.relu(self.conv3_3(out['r32']))
out['r34'] = F.relu(self.conv3_4(out['r33']))
out['p3'] = self.pool3(out['r34'])
out['r41'] = F.relu(self.conv4_1(out['p3']))
out['r42'] = F.relu(self.conv4_2(out['r41']))
out['r43'] = F.relu(self.conv4_3(out['r42']))
out['r44'] = F.relu(self.conv4_4(out['r43']))
out['p4'] = self.pool4(out['r44'])
out['r51'] = F.relu(self.conv5_1(out['p4']))
out['r52'] = F.relu(self.conv5_2(out['r51']))
out['r53'] = F.relu(self.conv5_3(out['r52']))
out['r54'] = F.relu(self.conv5_4(out['r53']))
out['p5'] = self.pool5(out['r54'])
return [out[key] for key in out_keys]
class NoVGGCorrespondence(BaseNetwork):
# input is Al, Bl, channel = 1, range~[0,255]
def __init__(self, opt):
self.opt = opt
super().__init__()
opt.spade_ic = opt.semantic_nc
self.adaptive_model_seg = AdaptiveFeatureGenerator(opt)
opt.spade_ic = 3
self.adaptive_model_img = AdaptiveFeatureGenerator(opt)
del opt.spade_ic
if opt.weight_domainC > 0 and (not opt.domain_rela):
self.domain_classifier = DomainClassifier(opt)
if 'down' not in opt:
opt.down = 4
if opt.warp_stride == 2:
opt.down = 2
assert (opt.down == 2) or (opt.down == 4)
self.down = opt.down
self.feature_channel = 64
self.in_channels = self.feature_channel * 4
self.inter_channels = 256
coord_c = 3 if opt.use_coordconv else 0
label_nc = opt.semantic_nc if opt.maskmix else 0
self.layer = nn.Sequential(
ResidualBlock(self.feature_channel * 4 + label_nc + coord_c, self.feature_channel * 4 + label_nc + coord_c, kernel_size=3, padding=1, stride=1),
ResidualBlock(self.feature_channel * 4 + label_nc + coord_c, self.feature_channel * 4 + label_nc + coord_c, kernel_size=3, padding=1, stride=1),
ResidualBlock(self.feature_channel * 4 + label_nc + coord_c, self.feature_channel * 4 + label_nc + coord_c, kernel_size=3, padding=1, stride=1),
ResidualBlock(self.feature_channel * 4 + label_nc + coord_c, self.feature_channel * 4 + label_nc + coord_c, kernel_size=3, padding=1, stride=1))
self.phi = nn.Conv2d(in_channels=self.in_channels + label_nc + coord_c, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0)
self.theta = nn.Conv2d(in_channels=self.in_channels + label_nc + coord_c, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0)
self.upsampling_bi = nn.Upsample(scale_factor=opt.down, mode='bilinear') #for show
if opt.warp_bilinear:
self.upsampling = nn.Upsample(scale_factor=opt.down, mode='bilinear')
else:
self.upsampling = nn.Upsample(scale_factor=opt.down)
self.zero_tensor = None
# model = [nn.ReflectionPad2d(1),
# nn.Conv2d(opt.semantic_nc, 128, kernel_size=3, padding=0, stride=1),
# nn.InstanceNorm2d(128),
# nn.PReLU(),
# nn.ReflectionPad2d(1),
# nn.Conv2d(128, self.feature_channel * 2, kernel_size=3, padding=0, stride=1),
# nn.InstanceNorm2d(self.feature_channel * 2),
# nn.PReLU()]
# self.layer_mask_head = nn.Sequential(*model)
# self.layer_mix = nn.Conv2d(in_channels=self.feature_channel * 6, out_channels=self.feature_channel * 4, kernel_size=1, stride=1, padding=0)
def addcoords(self, x):
bs, _, h, w = x.shape
xx_ones = torch.ones([bs, h, 1], dtype=x.dtype, device=x.device)
xx_range = torch.arange(w, dtype=x.dtype, device=x.device).unsqueeze(0).repeat([bs, 1]).unsqueeze(1)
xx_channel = torch.matmul(xx_ones, xx_range).unsqueeze(1)
yy_ones = torch.ones([bs, 1, w], dtype=x.dtype, device=x.device)
yy_range = torch.arange(h, dtype=x.dtype, device=x.device).unsqueeze(0).repeat([bs, 1]).unsqueeze(-1)
yy_channel = torch.matmul(yy_range, yy_ones).unsqueeze(1)
xx_channel = xx_channel.float() / (w - 1)
yy_channel = yy_channel.float() / (h - 1)
xx_channel = 2 * xx_channel - 1
yy_channel = 2 * yy_channel - 1
rr_channel = torch.sqrt(torch.pow(xx_channel, 2) + torch.pow(yy_channel, 2))
concat = torch.cat((x, xx_channel, yy_channel, rr_channel), dim=1)
return concat
def forward(self,
ref_img,
real_img,
seg_map,
ref_seg_map,
temperature=0.01,
detach_flag=False,
WTA_scale_weight=1,
alpha=1,
return_corr=False):
coor_out = {}
batch_size = ref_img.shape[0]
image_height = ref_img.shape[2]
image_width = ref_img.shape[3]
feature_height = int(image_height / self.opt.down)
feature_width = int(image_width / self.opt.down)
if self.opt.mask_noise: #add noise to mask
noise = torch.randn_like(seg_map, requires_grad=False) * 0.1
noise[seg_map == 0] = 0
seg_input = seg_map + noise
else:
seg_input = seg_map
adaptive_feature_seg = self.adaptive_model_seg(seg_input, seg_input)
adaptive_feature_img = self.adaptive_model_img(ref_img, ref_img)
adaptive_feature_seg = util.feature_normalize(adaptive_feature_seg)
adaptive_feature_img = util.feature_normalize(adaptive_feature_img)
if self.opt.isTrain and self.opt.novgg_featpair > 0:
adaptive_feature_img_pair = self.adaptive_model_img(real_img, real_img)
adaptive_feature_img_pair = util.feature_normalize(adaptive_feature_img_pair)
coor_out['loss_novgg_featpair'] = F.l1_loss(adaptive_feature_seg, adaptive_feature_img_pair) * self.opt.novgg_featpair
if self.opt.use_coordconv:
adaptive_feature_seg = self.addcoords(adaptive_feature_seg)
adaptive_feature_img = self.addcoords(adaptive_feature_img)
seg = F.interpolate(seg_map, size=adaptive_feature_seg.size()[2:], mode='nearest')
ref_seg = F.interpolate(ref_seg_map, size=adaptive_feature_img.size()[2:], mode='nearest')
if self.opt.maskmix:
cont_features = self.layer(torch.cat((adaptive_feature_seg, seg), 1))
if self.opt.noise_for_mask and ((not self.opt.isTrain) or (self.opt.isTrain and self.opt.epoch > self.opt.mask_epoch)):
noise = torch.randn_like(ref_seg, requires_grad=False) * 0.01
ref_features = self.layer(torch.cat((adaptive_feature_img, noise), 1))
else:
ref_features = self.layer(torch.cat((adaptive_feature_img, ref_seg), 1))
else:
cont_features = self.layer(adaptive_feature_seg)
ref_features = self.layer(adaptive_feature_img)
# pairwise cosine similarity
theta = self.theta(cont_features)
if self.opt.match_kernel == 1:
theta = theta.view(batch_size, self.inter_channels, -1) # 2*256*(feature_height*feature_width)
else:
theta = F.unfold(theta, kernel_size=self.opt.match_kernel, padding=int(self.opt.match_kernel // 2))
dim_mean = 1 if self.opt.PONO_C else -1
theta = theta - theta.mean(dim=dim_mean, keepdim=True) # center the feature
theta_norm = torch.norm(theta, 2, 1, keepdim=True) + sys.float_info.epsilon
theta = torch.div(theta, theta_norm)
theta_permute = theta.permute(0, 2, 1) # 2*(feature_height*feature_width)*256
phi = self.phi(ref_features)
if self.opt.match_kernel == 1:
phi = phi.view(batch_size, self.inter_channels, -1) # 2*256*(feature_height*feature_width)
else:
phi = F.unfold(phi, kernel_size=self.opt.match_kernel, padding=int(self.opt.match_kernel // 2))
phi = phi - phi.mean(dim=dim_mean, keepdim=True) # center the feature
phi_norm = torch.norm(phi, 2, 1, keepdim=True) + sys.float_info.epsilon
phi = torch.div(phi, phi_norm)
f = torch.matmul(theta_permute, phi) # 2*(feature_height*feature_width)*(feature_height*feature_width)
if detach_flag:
f = f.detach()
#f_similarity = f.unsqueeze(dim=1)
# similarity_map = torch.max(f_similarity, -1, keepdim=True)[0]
# similarity_map = similarity_map.view(batch_size, 1, feature_height, feature_width)
# f can be negative
if WTA_scale_weight == 1:
f_WTA = f
else:
f_WTA = WTA_scale.apply(f, WTA_scale_weight)
f_WTA = f_WTA / temperature
if return_corr:
return f_WTA
f_div_C = F.softmax(f_WTA.squeeze(), dim=-1) # 2*1936*1936; softmax along the horizontal line (dim=-1)
# downsample the reference color
if self.opt.warp_patch:
ref = F.unfold(ref_img, self.opt.down, stride=self.opt.down)
else:
ref = F.avg_pool2d(ref_img, self.opt.down)
channel = ref.shape[1]
ref = ref.view(batch_size, channel, -1)
ref = ref.permute(0, 2, 1)
y = torch.matmul(f_div_C, ref) # 2*1936*channel
if self.opt.warp_patch:
y = y.permute(0, 2, 1)
y = F.fold(y, 256, self.opt.down, stride=self.opt.down)
else:
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, channel, feature_height, feature_width) # 2*3*44*44
if (not self.opt.isTrain) and self.opt.show_corr:
coor_out['warp_out_bi'] = y if self.opt.warp_patch else self.upsampling_bi(y)
coor_out['warp_out'] = y if self.opt.warp_patch else self.upsampling(y)
if self.opt.warp_mask_losstype == 'direct' or self.opt.show_warpmask:
ref_seg = F.interpolate(ref_seg_map, scale_factor= 1/self.opt.down, mode='nearest')
channel = ref_seg.shape[1]
ref_seg = ref_seg.view(batch_size, channel, -1)
ref_seg = ref_seg.permute(0, 2, 1)
warp_mask = torch.matmul(f_div_C, ref_seg) # 2*1936*channel
warp_mask = warp_mask.permute(0, 2, 1).contiguous()
coor_out['warp_mask'] = warp_mask.view(batch_size, channel, feature_height, feature_width) # 2*3*44*44
elif self.opt.warp_mask_losstype == 'cycle':
f_div_C_v = F.softmax(f_WTA.transpose(1, 2), dim=-1) # 2*1936*1936; softmax along the vertical line
seg = F.interpolate(seg_map, scale_factor=1 / self.opt.down, mode='nearest')
channel = seg.shape[1]
seg = seg.view(batch_size, channel, -1)
seg = seg.permute(0, 2, 1)
warp_mask_to_ref = torch.matmul(f_div_C_v, seg) # 2*1936*channel
warp_mask = torch.matmul(f_div_C, warp_mask_to_ref) # 2*1936*channel
warp_mask = warp_mask.permute(0, 2, 1).contiguous()
coor_out['warp_mask'] = warp_mask.view(batch_size, channel, feature_height, feature_width) # 2*3*44*44
else:
warp_mask = None
if self.opt.warp_cycle_w > 0:
f_div_C_v = F.softmax(f_WTA.transpose(1, 2), dim=-1)
if self.opt.warp_patch:
y = F.unfold(y, self.opt.down, stride=self.opt.down)
y = y.permute(0, 2, 1)
warp_cycle = torch.matmul(f_div_C_v, y)
warp_cycle = warp_cycle.permute(0, 2, 1)
warp_cycle = F.fold(warp_cycle, 256, self.opt.down, stride=self.opt.down)
coor_out['warp_cycle'] = warp_cycle
else:
channel = y.shape[1]
y = y.view(batch_size, channel, -1).permute(0, 2, 1)
warp_cycle = torch.matmul(f_div_C_v, y).permute(0, 2, 1).contiguous()
coor_out['warp_cycle'] = warp_cycle.view(batch_size, channel, feature_height, feature_width)
if self.opt.two_cycle:
real_img = F.avg_pool2d(real_img, self.opt.down)
real_img = real_img.view(batch_size, channel, -1)
real_img = real_img.permute(0, 2, 1)
warp_i2r = torch.matmul(f_div_C_v, real_img).permute(0, 2, 1).contiguous() #warp input to ref
warp_i2r = warp_i2r.view(batch_size, channel, feature_height, feature_width)
warp_i2r2i = torch.matmul(f_div_C, warp_i2r.view(batch_size, channel, -1).permute(0, 2, 1))
coor_out['warp_i2r'] = warp_i2r
coor_out['warp_i2r2i'] = warp_i2r2i.permute(0, 2, 1).contiguous().view(batch_size, channel, feature_height, feature_width)
return coor_out
|
CoCosNet/models/networks/correspondence.py/0
|
{
"file_path": "CoCosNet/models/networks/correspondence.py",
"repo_id": "CoCosNet",
"token_count": 9137
}
| 215 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
from collections import OrderedDict
import torch
import torchvision.utils as vutils
import torch.nn.functional as F
import data
import numpy as np
from util.util import masktorgb
from options.test_options import TestOptions
from models.pix2pix_model import Pix2PixModel
opt = TestOptions().parse()
torch.manual_seed(0)
dataloader = data.create_dataloader(opt)
dataloader.dataset[0]
model = Pix2PixModel(opt)
model.eval()
save_root = os.path.join(os.path.dirname(opt.checkpoints_dir), 'output')
# test
for i, data_i in enumerate(dataloader):
print('{} / {}'.format(i, len(dataloader)))
if i * opt.batchSize >= opt.how_many:
break
imgs_num = data_i['label'].shape[0]
#data_i['stage1'] = torch.ones_like(data_i['stage1'])
out = model(data_i, mode='inference')
if opt.save_per_img:
root = save_root + '/test_per_img/'
if not os.path.exists(root + opt.name):
os.makedirs(root + opt.name)
imgs = out['fake_image'].data.cpu()
try:
imgs = (imgs + 1) / 2
for i in range(imgs.shape[0]):
if opt.dataset_mode == 'deepfashion':
name = data_i['path'][i].split('Dataset/DeepFashion/')[-1].replace('/', '_')
else:
name = os.path.basename(data_i['path'][i])
vutils.save_image(imgs[i:i+1], root + opt.name + '/' + name,
nrow=1, padding=0, normalize=False)
except OSError as err:
print(err)
else:
if not os.path.exists(save_root + '/test/' + opt.name):
os.makedirs(save_root + '/test/' + opt.name)
if opt.dataset_mode == 'deepfashion':
label = data_i['label'][:,:3,:,:]
elif opt.dataset_mode == 'celebahqedge':
label = data_i['label'].expand(-1, 3, -1, -1).float()
else:
label = masktorgb(data_i['label'].cpu().numpy())
label = torch.from_numpy(label).float() / 128 - 1
imgs = torch.cat((label.cpu(), data_i['ref'].cpu(), out['fake_image'].data.cpu()), 0)
try:
imgs = (imgs + 1) / 2
vutils.save_image(imgs, save_root + '/test/' + opt.name + '/' + str(i) + '.png',
nrow=imgs_num, padding=0, normalize=False)
except OSError as err:
print(err)
|
CoCosNet/test.py/0
|
{
"file_path": "CoCosNet/test.py",
"repo_id": "CoCosNet",
"token_count": 1185
}
| 216 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch.nn as nn
import torch
class Model(nn.Module):
def __init__(self, encoder):
super(Model, self).__init__()
self.encoder = encoder
def forward(self, code_inputs=None, nl_inputs=None, cls=False):
if code_inputs is not None:
outputs = self.encoder(code_inputs,attention_mask=code_inputs.ne(1))[0]
outputs = (outputs * code_inputs.ne(1)[:,:,None]).sum(1)/code_inputs.ne(1).sum(1)[:,None]
return torch.nn.functional.normalize(outputs, p=2, dim=1)
else:
outputs = self.encoder(nl_inputs,attention_mask=nl_inputs.ne(1))[0]
outputs = (outputs * nl_inputs.ne(1)[:,:,None]).sum(1)/nl_inputs.ne(1).sum(1)[:,None]
return torch.nn.functional.normalize(outputs, p=2, dim=1)
|
CodeBERT/CodeExecutor/downstream/model_unixcoder.py/0
|
{
"file_path": "CodeBERT/CodeExecutor/downstream/model_unixcoder.py",
"repo_id": "CodeBERT",
"token_count": 422
}
| 217 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# https://github.com/microsoft/CodeXGLUE/tree/main/Code-Code/code-to-code-trans/evaluator/CodeBLEU
# -*- coding:utf-8 -*-
import argparse
import os
from evaluator.CodeBLEU import bleu, weighted_ngram_match, syntax_match, dataflow_match
def get_codebleu(refs, hyp, lang, params='0.25,0.25,0.25,0.25'):
if not isinstance(refs, list):
refs = [refs]
alpha, beta, gamma, theta = [float(x) for x in params.split(',')]
# preprocess inputs
pre_references = [[x.strip() for x in open(file, 'r', encoding='utf-8').readlines()] for file in refs]
hypothesis = [x.strip() for x in open(hyp, 'r', encoding='utf-8').readlines()]
for i in range(len(pre_references)):
assert len(hypothesis) == len(pre_references[i])
references = []
for i in range(len(hypothesis)):
ref_for_instance = []
for j in range(len(pre_references)):
ref_for_instance.append(pre_references[j][i])
references.append(ref_for_instance)
assert len(references) == len(pre_references) * len(hypothesis)
# calculate ngram match (BLEU)
tokenized_hyps = [x.split() for x in hypothesis]
tokenized_refs = [[x.split() for x in reference] for reference in references]
ngram_match_score = bleu.corpus_bleu(tokenized_refs, tokenized_hyps)
# calculate weighted ngram match
root_dir = os.path.dirname(__file__)
keywords = [x.strip() for x in open(root_dir + '/keywords/' + lang + '.txt', 'r', encoding='utf-8').readlines()]
def make_weights(reference_tokens, key_word_list):
return {token: 1 if token in key_word_list else 0.2 for token in reference_tokens}
tokenized_refs_with_weights = [[[reference_tokens, make_weights(reference_tokens, keywords)] \
for reference_tokens in reference] for reference in tokenized_refs]
weighted_ngram_match_score = weighted_ngram_match.corpus_bleu(tokenized_refs_with_weights, tokenized_hyps)
# calculate syntax match
syntax_match_score = syntax_match.corpus_syntax_match(references, hypothesis, lang)
# calculate dataflow match
dataflow_match_score = dataflow_match.corpus_dataflow_match(references, hypothesis, lang)
print('ngram match: {0}, weighted ngram match: {1}, syntax_match: {2}, dataflow_match: {3}'. \
format(ngram_match_score, weighted_ngram_match_score, syntax_match_score, dataflow_match_score))
code_bleu_score = alpha * ngram_match_score \
+ beta * weighted_ngram_match_score \
+ gamma * syntax_match_score \
+ theta * dataflow_match_score
return code_bleu_score
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--refs', type=str, nargs='+', required=True,
help='reference files')
parser.add_argument('--hyp', type=str, required=True,
help='hypothesis file')
parser.add_argument('--lang', type=str, required=True,
choices=['java', 'js', 'c_sharp', 'php', 'go', 'python', 'ruby'],
help='programming language')
parser.add_argument('--params', type=str, default='0.25,0.25,0.25,0.25',
help='alpha, beta and gamma')
args = parser.parse_args()
code_bleu_score = get_codebleu(args.refs, args.hyp, args.lang, args.params)
print('CodeBLEU score: ', code_bleu_score)
|
CodeBERT/CodeReviewer/code/evaluator/CodeBLEU/calc_code_bleu.py/0
|
{
"file_path": "CodeBERT/CodeReviewer/code/evaluator/CodeBLEU/calc_code_bleu.py",
"repo_id": "CodeBERT",
"token_count": 1452
}
| 218 |
import os
import torch.nn as nn
import torch
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss, BCEWithLogitsLoss
import numpy as np
from utils import MyTokenizer
from transformers import (
RobertaConfig,
RobertaModel,
RobertaTokenizer,
BartConfig,
BartForConditionalGeneration,
BartTokenizer,
T5Config,
T5ForConditionalGeneration,
T5Tokenizer,
)
import logging
logger = logging.getLogger(__name__)
class ReviewerModel(T5ForConditionalGeneration):
def __init__(self, config):
super().__init__(config)
self.cls_head = nn.Linear(self.config.d_model, 2, bias=True)
self.init()
def init(self):
nn.init.xavier_uniform_(self.lm_head.weight)
factor = self.config.initializer_factor
self.cls_head.weight.data.normal_(mean=0.0, \
std=factor * ((self.config.d_model) ** -0.5))
self.cls_head.bias.data.zero_()
def forward(
self, *argv, **kwargs
):
r"""
Doc from Huggingface transformers:
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[-100, 0, ...,
config.vocab_size - 1]`. All labels set to ``-100`` are ignored (masked), the loss is only computed for
labels in ``[0, ..., config.vocab_size]``
Returns:
Examples::
>>> from transformers import T5Tokenizer, T5ForConditionalGeneration
>>> tokenizer = T5Tokenizer.from_pretrained('t5-small')
>>> model = T5ForConditionalGeneration.from_pretrained('t5-small')
>>> # training
>>> input_ids = tokenizer('The <extra_id_0> walks in <extra_id_1> park', return_tensors='pt').input_ids
>>> labels = tokenizer('<extra_id_0> cute dog <extra_id_1> the <extra_id_2>', return_tensors='pt').input_ids
>>> outputs = model(input_ids=input_ids, labels=labels)
>>> loss = outputs.loss
>>> logits = outputs.logits
>>> # inference
>>> input_ids = tokenizer("summarize: studies have shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1
>>> outputs = model.generate(input_ids)
>>> print(tokenizer.decode(outputs[0], skip_special_tokens=True))
>>> # studies have shown that owning a dog is good for you.
"""
if "cls" in kwargs:
assert (
"input_ids" in kwargs and \
"labels" in kwargs and \
"attention_mask" in kwargs
)
return self.cls(
input_ids=kwargs["input_ids"],
labels=kwargs["labels"],
attention_mask=kwargs["attention_mask"],
)
if "input_labels" in kwargs:
assert (
"input_ids" in kwargs and \
"input_labels" in kwargs and \
"decoder_input_ids" in kwargs and \
"attention_mask" in kwargs and \
"decoder_attention_mask" in kwargs
), "Please give these arg keys."
input_ids = kwargs["input_ids"]
input_labels = kwargs["input_labels"]
decoder_input_ids = kwargs["decoder_input_ids"]
attention_mask = kwargs["attention_mask"]
decoder_attention_mask = kwargs["decoder_attention_mask"]
if "encoder_loss" not in kwargs:
encoder_loss = True
else:
encoder_loss = kwargs["encoder_loss"]
return self.review_forward(input_ids, input_labels, decoder_input_ids, attention_mask, decoder_attention_mask, encoder_loss)
return super().forward(*argv, **kwargs)
def cls(
self,
input_ids,
labels,
attention_mask,
):
encoder_outputs = self.encoder( \
input_ids=input_ids,
attention_mask=attention_mask,
output_attentions=False,
return_dict=False
)
hidden_states = encoder_outputs[0]
first_hidden = hidden_states[:, 0, :]
first_hidden = nn.Dropout(0.3)(first_hidden)
logits = self.cls_head(first_hidden)
loss_fct = CrossEntropyLoss()
if labels != None:
loss = loss_fct(logits, labels)
return loss
return logits
def review_forward(
self,
input_ids,
input_labels,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
encoder_loss=True
):
encoder_outputs = self.encoder( \
input_ids=input_ids,
attention_mask=attention_mask,
output_attentions=False,
return_dict=False
)
hidden_states = encoder_outputs[0]
decoder_inputs = self._shift_right(decoder_input_ids)
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_inputs,
attention_mask=decoder_attention_mask,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
output_attentions=False,
return_dict=False
)
sequence_output = decoder_outputs[0]
if self.config.tie_word_embeddings: # this is True default
sequence_output = sequence_output * (self.model_dim ** -0.5)
if encoder_loss:
# print(self.encoder.get_input_embeddings().weight.shape)
cls_logits = nn.functional.linear(hidden_states, self.encoder.get_input_embeddings().weight)
# cls_logits = self.cls_head(hidden_states)
lm_logits = self.lm_head(sequence_output)
if decoder_input_ids is not None:
lm_loss_fct = CrossEntropyLoss(ignore_index=0) # Warning: PAD_ID should be 0
loss = lm_loss_fct(lm_logits.view(-1, lm_logits.size(-1)), decoder_input_ids.view(-1))
if encoder_loss and input_labels is not None:
cls_loss_fct = CrossEntropyLoss(ignore_index=-100)
loss += cls_loss_fct(cls_logits.view(-1, cls_logits.size(-1)), input_labels.view(-1))
return loss
return cls_logits, lm_logits
def get_model_size(model):
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
model_size = sum([np.prod(p.size()) for p in model_parameters])
return "{}M".format(round(model_size / 1e6))
def build_or_load_gen_model(args):
config_class, model_class, tokenizer_class = T5Config, ReviewerModel, RobertaTokenizer
config = config_class.from_pretrained(args.model_name_or_path)
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
model = model_class.from_pretrained(args.model_name_or_path, config=config)
tokenizer.special_dict = {
f"<e{i}>" : tokenizer.get_vocab()[f"<e{i}>"] for i in range(99, -1, -1)
}
tokenizer.mask_id = tokenizer.get_vocab()["<mask>"]
tokenizer.bos_id = tokenizer.get_vocab()["<s>"]
tokenizer.pad_id = tokenizer.get_vocab()["<pad>"]
tokenizer.eos_id = tokenizer.get_vocab()["</s>"]
tokenizer.msg_id = tokenizer.get_vocab()["<msg>"]
tokenizer.keep_id = tokenizer.get_vocab()["<keep>"]
tokenizer.add_id = tokenizer.get_vocab()["<add>"]
tokenizer.del_id = tokenizer.get_vocab()["<del>"]
tokenizer.start_id = tokenizer.get_vocab()["<start>"]
tokenizer.end_id = tokenizer.get_vocab()["<end>"]
logger.info(
"Finish loading model [%s] from %s",
get_model_size(model),
args.model_name_or_path,
)
if args.load_model_path is not None:
model_path = os.path.join(args.load_model_path, "pytorch_model.bin")
logger.info("Reload model from {}".format(model_path))
try:
model.load_state_dict(torch.load(model_path, map_location="cpu"))
except RuntimeError:
saved = model.cls_head
model.cls_head = None
model.load_state_dict(torch.load(model_path, map_location="cpu"))
model.cls_head = saved
model.to(args.local_rank)
return config, model, tokenizer
|
CodeBERT/CodeReviewer/code/models.py/0
|
{
"file_path": "CodeBERT/CodeReviewer/code/models.py",
"repo_id": "CodeBERT",
"token_count": 3886
}
| 219 |
import argparse
import torch
from configs import add_args
from models import ReviewerModel, build_or_load_gen_model
MAX_SOURCE_LENGTH=512
def pad_assert(tokenizer, source_ids):
source_ids = source_ids[:MAX_SOURCE_LENGTH - 2]
source_ids = [tokenizer.bos_id] + source_ids + [tokenizer.eos_id]
pad_len = MAX_SOURCE_LENGTH - len(source_ids)
source_ids += [tokenizer.pad_id] * pad_len
assert len(source_ids) == MAX_SOURCE_LENGTH, "Not equal length."
return source_ids
def encode_diff(tokenizer, diff):
difflines = diff.split("\n")[1:] # remove start @@
difflines = [line for line in difflines if len(line.strip()) > 0]
map_dic = {"-": 0, "+": 1, " ": 2}
def f(s):
if s in map_dic:
return map_dic[s]
else:
return 2
labels = [f(line[0]) for line in difflines]
difflines = [line[1:].strip() for line in difflines]
inputstr = ""
for label, line in zip(labels, difflines):
if label == 1:
inputstr += "<add>" + line
elif label == 0:
inputstr += "<del>" + line
else:
inputstr += "<keep>" + line
source_ids = tokenizer.encode(inputstr, max_length=MAX_SOURCE_LENGTH, truncation=True)[1:-1]
source_ids = pad_assert(tokenizer, source_ids)
return source_ids
parser = argparse.ArgumentParser()
args = add_args(parser)
args.model_name_or_path = "microsoft/codereviewer"
config, model, tokenizer = build_or_load_gen_model(args)
model.to("cuda")
model.eval()
code_diff = """@@ -11,6 +11,8 @@\n \n invoiceDtoCopy.setState(InvoiceState.OPEN);\n _invoiceAggregateRepository.updateInvoiceState(invoiceCopy, InvoiceState.OPEN);\n+ _erpIntegrationService.createAndSendInvoiceEvent(invoiceCopy);\n+\n }\n }\n \n"""
inputs = torch.tensor([encode_diff(tokenizer, code_diff)], dtype=torch.long).to("cuda")
inputs_mask = inputs.ne(tokenizer.pad_id)
preds = model.generate(inputs,
attention_mask=inputs_mask,
use_cache=True,
num_beams=5,
early_stopping=True,
max_length=100,
num_return_sequences=2
)
preds = list(preds.cpu().numpy())
pred_nls = [tokenizer.decode(id[2:], skip_special_tokens=True, clean_up_tokenization_spaces=False) for id in preds]
print(pred_nls[0])
|
CodeBERT/CodeReviewer/code/test_model.py/0
|
{
"file_path": "CodeBERT/CodeReviewer/code/test_model.py",
"repo_id": "CodeBERT",
"token_count": 1115
}
| 220 |
git clone https://github.com/tree-sitter/tree-sitter-go
git clone https://github.com/tree-sitter/tree-sitter-javascript
git clone https://github.com/tree-sitter/tree-sitter-python
git clone https://github.com/tree-sitter/tree-sitter-ruby
git clone https://github.com/tree-sitter/tree-sitter-php
git clone https://github.com/tree-sitter/tree-sitter-java
git clone https://github.com/tree-sitter/tree-sitter-c-sharp
python build.py
|
CodeBERT/GraphCodeBERT/refinement/parser/build.sh/0
|
{
"file_path": "CodeBERT/GraphCodeBERT/refinement/parser/build.sh",
"repo_id": "CodeBERT",
"token_count": 147
}
| 221 |
# LongCoder
This repo will provide the code for reproducing the experiments on LCC datasets in [LongCoder: A Long-Range Pre-trained Language Model for Code Completion](https://arxiv.org/abs/2306.14893). LongCoder is a sparse and efficient pre-trained Transformer model for long code modeling.
## 1. Dependency
- pip install torch==1.9.0+cu111 torchvision==0.10.0+cu111 torchaudio==0.9.0 -f https://download.pytorch.org/whl/torch_stable.html
- pip install --upgrade transformers fuzzywuzzy tree_sitter datasets
## 2. Dataset
In this repo, the LCC dataset will be automatically downloaded when running the fine-tuning script. If you want to download LCC datasets by yourself, you can find them in the following links:
```
https://huggingface.co/datasets/microsoft/LCC_python
https://huggingface.co/datasets/microsoft/LCC_java
https://huggingface.co/datasets/microsoft/LCC_csharp
```
## 3. Fine-Tune Setting
Here we provide fine-tune settings for code completion on LCC datasets in C# programming language, whose results are reported in the paper.
Note that it requires 8 v100-32G GPUs, and you can adjust batch size or source length based on your requirements.
```shell
lang=csharp #csharp, python, java
lr=2e-4
batch_size=16
beam_size=5
source_length=3968
target_length=128
global_length=64
window_size=512
epochs=10
output_dir=saved_models/$lang
mkdir -p $output_dir
python run.py \
--do_train \
--do_eval \
--lang $lang \
--output_dir $output_dir \
--model_name_or_path microsoft/longcoder-base \
--filename microsoft/LCC_$lang \
--max_source_length $source_length \
--max_target_length $target_length \
--max_global_length $global_length \
--window_size $window_size \
--beam_size $beam_size \
--train_batch_size $batch_size \
--eval_batch_size $batch_size \
--learning_rate $lr \
--num_train_epochs $epochs 2>&1| tee $output_dir/train.log
```
## 4. Evaluating LongCoder
```shell
lang=csharp #csharp, python, java
batch_size=16
beam_size=5
source_length=3968
target_length=128
global_length=64
window_size=512
output_dir=saved_models/$lang
reload_model=$output_dir/checkpoint-best-acc/model.bin
python run.py \
--do_test \
--lang $lang \
--load_model_path $reload_model \
--output_dir $output_dir \
--model_name_or_path microsoft/longcoder-base \
--filename microsoft/LCC_$lang \
--max_source_length $source_length \
--max_target_length $target_length \
--max_global_length $global_length \
--window_size $window_size \
--beam_size $beam_size \
--train_batch_size $batch_size \
--eval_batch_size $batch_size \
--num_train_epochs $epochs 2>&1| tee $output_dir/test.log
```
# Reference
If you use this code or LongCoder, please consider citing us.
<pre><code>@article{longcoder,
title={LongCoder: A Long-Range Pre-trained Language Model for Code Completion},
author={Daya Guo and Canwen Xu and Nan Duan and Jian Yin and Julian McAuley},
journal={arXiv preprint arXiv:2306.14893},
year={2023}
}</code></pre>
|
CodeBERT/LongCoder/README.md/0
|
{
"file_path": "CodeBERT/LongCoder/README.md",
"repo_id": "CodeBERT",
"token_count": 1014
}
| 222 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
import torch.nn as nn
import torch
from torch.autograd import Variable
import copy
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss, MSELoss
class RobertaClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size*2, config.hidden_size)
self.dropout = nn.Dropout(0.1)
self.out_proj = nn.Linear(config.hidden_size, 2)
def forward(self, x):
x = x.reshape(-1,x.size(-1)*2)
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
class Model(nn.Module):
def __init__(self, encoder,config,tokenizer,args):
super(Model, self).__init__()
self.encoder = encoder
self.config = config
self.tokenizer = tokenizer
self.classifier = RobertaClassificationHead(config)
self.args = args
def forward(self, input_ids=None,labels=None):
input_ids = input_ids.view(-1,self.args.block_size)
outputs = self.encoder(input_ids,attention_mask=input_ids.ne(1))[0]
outputs = (outputs * input_ids.ne(1)[:,:,None]).sum(1)/input_ids.ne(1).sum(1)[:,None]
outputs = outputs.reshape(-1,2,outputs.size(-1))
outputs = torch.nn.functional.normalize(outputs, p=2, dim=-1)
cos_sim = (outputs[:,0]*outputs[:,1]).sum(-1)
if labels is not None:
loss = ((cos_sim-labels.float())**2).mean()
return loss,cos_sim
else:
return cos_sim
|
CodeBERT/UniXcoder/downstream-tasks/clone-detection/BCB/model.py/0
|
{
"file_path": "CodeBERT/UniXcoder/downstream-tasks/clone-detection/BCB/model.py",
"repo_id": "CodeBERT",
"token_count": 820
}
| 223 |
# Code Search
## Data Download
#### 1. AdvTest dataset
```bash
mkdir dataset && cd dataset
wget https://github.com/microsoft/CodeXGLUE/raw/main/Text-Code/NL-code-search-Adv/dataset.zip
unzip dataset.zip && rm -r dataset.zip && mv dataset AdvTest && cd AdvTest
wget https://zenodo.org/record/7857872/files/python.zip
unzip python.zip && python preprocess.py && rm -r python && rm -r *.pkl && rm python.zip
cd ../..
```
#### 2. CosQA dataset
```bash
cd dataset
mkdir cosqa && cd cosqa
wget https://github.com/Jun-jie-Huang/CoCLR/raw/main/data/search/code_idx_map.txt
wget https://github.com/Jun-jie-Huang/CoCLR/raw/main/data/search/cosqa-retrieval-dev-500.json
wget https://github.com/Jun-jie-Huang/CoCLR/raw/main/data/search/cosqa-retrieval-test-500.json
wget https://github.com/Jun-jie-Huang/CoCLR/raw/main/data/search/cosqa-retrieval-train-19604.json
cd ../..
```
#### 3. CSN dataset
```bash
cd dataset
wget https://github.com/microsoft/CodeBERT/raw/master/GraphCodeBERT/codesearch/dataset.zip
unzip dataset.zip && rm -r dataset.zip && mv dataset CSN && cd CSN
bash run.sh
cd ../..
```
## Dependency
- pip install torch
- pip install transformers
## Zero-Shot Setting
We first provide scripts for zero-shot code search. The similarity between code and nl we use is cosine distance of hidden states of UniXcoder.
#### 1. AdvTest dataset
```bash
python run.py \
--output_dir saved_models/AdvTest \
--model_name_or_path microsoft/unixcoder-base \
--do_zero_shot \
--do_test \
--test_data_file dataset/AdvTest/test.jsonl \
--codebase_file dataset/AdvTest/test.jsonl \
--num_train_epochs 2 \
--code_length 256 \
--nl_length 128 \
--train_batch_size 64 \
--eval_batch_size 64 \
--learning_rate 2e-5 \
--seed 123456
```
#### 2. CosQA dataset
```bash
python run.py \
--output_dir saved_models/cosqa \
--model_name_or_path microsoft/unixcoder-base \
--do_zero_shot \
--do_test \
--test_data_file dataset/cosqa/cosqa-retrieval-test-500.json \
--codebase_file dataset/cosqa/code_idx_map.txt \
--num_train_epochs 10 \
--code_length 256 \
--nl_length 128 \
--train_batch_size 64 \
--eval_batch_size 64 \
--learning_rate 2e-5 \
--seed 123456
```
#### 3. CSN dataset
```bash
lang=python
python run.py \
--output_dir saved_models/CSN/$lang \
--model_name_or_path microsoft/unixcoder-base \
--do_zero_shot \
--do_test \
--test_data_file dataset/CSN/$lang/test.jsonl \
--codebase_file dataset/CSN/$lang/codebase.jsonl \
--num_train_epochs 10 \
--code_length 256 \
--nl_length 128 \
--train_batch_size 64 \
--eval_batch_size 64 \
--learning_rate 2e-5 \
--seed 123456
```
## Fine-Tune Setting
Here we provide fine-tune settings for code search, whose results are reported in the paper.
#### 1. AdvTest dataset
```shell
# Training
python run.py \
--output_dir saved_models/AdvTest \
--model_name_or_path microsoft/unixcoder-base \
--do_train \
--train_data_file dataset/AdvTest/train.jsonl \
--eval_data_file dataset/AdvTest/valid.jsonl \
--codebase_file dataset/AdvTest/valid.jsonl \
--num_train_epochs 2 \
--code_length 256 \
--nl_length 128 \
--train_batch_size 64 \
--eval_batch_size 64 \
--learning_rate 2e-5 \
--seed 123456
# Evaluating
python run.py \
--output_dir saved_models/AdvTest \
--model_name_or_path microsoft/unixcoder-base \
--do_test \
--test_data_file dataset/AdvTest/test.jsonl \
--codebase_file dataset/AdvTest/test.jsonl \
--num_train_epochs 2 \
--code_length 256 \
--nl_length 128 \
--train_batch_size 64 \
--eval_batch_size 64 \
--learning_rate 2e-5 \
--seed 123456
```
#### 2. CosQA dataset
```bash
# Training
python run.py \
--output_dir saved_models/cosqa \
--model_name_or_path microsoft/unixcoder-base \
--do_train \
--train_data_file dataset/cosqa/cosqa-retrieval-train-19604.json \
--eval_data_file dataset/cosqa/cosqa-retrieval-dev-500.json \
--codebase_file dataset/cosqa/code_idx_map.txt \
--num_train_epochs 10 \
--code_length 256 \
--nl_length 128 \
--train_batch_size 64 \
--eval_batch_size 64 \
--learning_rate 2e-5 \
--seed 123456
# Evaluating
python run.py \
--output_dir saved_models/cosqa \
--model_name_or_path microsoft/unixcoder-base \
--do_eval \
--do_test \
--eval_data_file dataset/cosqa/cosqa-retrieval-dev-500.json \
--test_data_file dataset/cosqa/cosqa-retrieval-test-500.json \
--codebase_file dataset/cosqa/code_idx_map.txt \
--num_train_epochs 10 \
--code_length 256 \
--nl_length 128 \
--train_batch_size 64 \
--eval_batch_size 64 \
--learning_rate 2e-5 \
--seed 123456
```
#### 3. CSN dataset
```bash
# Training
lang=python
python run.py \
--output_dir saved_models/CSN/$lang \
--model_name_or_path microsoft/unixcoder-base \
--do_train \
--train_data_file dataset/CSN/$lang/train.jsonl \
--eval_data_file dataset/CSN/$lang/valid.jsonl \
--codebase_file dataset/CSN/$lang/codebase.jsonl \
--num_train_epochs 10 \
--code_length 256 \
--nl_length 128 \
--train_batch_size 64 \
--eval_batch_size 64 \
--learning_rate 2e-5 \
--seed 123456
# Evaluating
python run.py \
--output_dir saved_models/CSN/$lang \
--model_name_or_path microsoft/unixcoder-base \
--do_eval \
--do_test \
--eval_data_file dataset/CSN/$lang/valid.jsonl \
--test_data_file dataset/CSN/$lang/test.jsonl \
--codebase_file dataset/CSN/$lang/codebase.jsonl \
--num_train_epochs 10 \
--code_length 256 \
--nl_length 128 \
--train_batch_size 64 \
--eval_batch_size 64 \
--learning_rate 2e-5 \
--seed 123456
```
|
CodeBERT/UniXcoder/downstream-tasks/code-search/README.md/0
|
{
"file_path": "CodeBERT/UniXcoder/downstream-tasks/code-search/README.md",
"repo_id": "CodeBERT",
"token_count": 2425
}
| 224 |
# On the Advance of Making Language Models Better Reasoners
[[Paper]](https://arxiv.org/abs/2206.02336)
## News
- [August, 2022] Data release: `GSM8K` and `StrategyQA`, generated by `code-davinci-002`.
## Dataset Details
Each subfolder in the `/data` folder corresponds to a reasoning benchmark. You can find more details about the benchmark from the `README.md` file of the subfolder.
Generally, these subfolders consist of ``train.jsonl`` and ``test.jsonl`` files.
Each line of these files shares the same format:
```javascript
{
// context: the prompt sequence we provide to the language model.
// {Qi}/{Ei}/{Ai} represents the question/chain-of-thought/answer of the i-th exemplar.
// {Q} represents the question for inference.
"context": "Question:\n{Q1}\n{E1}\n#### {A1}\n\n{...}Question:\n{Qk}\n{Ek}\n#### {Ak}\n\nQuestion:\n{Q}\nAnswer:\n",
// samples: multiple output sequences sampled from the language model, given the prompt sequence as input
"samples": [
"{E}\n#### {A}\n\n",
"{E'}\n#### {A'}\n\n",
"{E''}\n#### {A'''}\n\n"
...
],
// {E*}/{A*} represents the ground truth chain-of-thought/answer of {Q}.
// if the dataset doesn't provide ground truth chain-of-thoughts, {E*} will be "No chain-of-thought provided.".
"metadata": {
"question": "{Q}",
"ground_truth": "{E*}#### {A*}"
}
}
```
Currently, all data we release in this repository are generated by the `code-davinci-002` model provided by OpenAI.
## Usage
Given the `train.jsonl` and `test.jsonl` files that are generated by large-scale pretrained language models, you can use code provided in the `code` folder to reproduce our results. Here we take the `gsm8k` dataset as an example.
### Prerequisites
- Install dependencies according to the `environment` properties of `code/verifier_data_prepare.yaml` and `verifier_train.yaml`.
- Register a [wandb](https://wandb.ai/site) account and get a wandb API key.
- Create a new folder (denoted as `{EXEC_DIR}`) and initialize this folder as follows:
```shell
$ {EXEC_DIR}
.
├── train_dir
│ └── train.jsonl
├── test_dir
│ └── test.jsonl
├── train_preprocessed // this is an empty folder
├── test_preprocessed // this is an empty folder
└── exec // this is an empty folder
```
### Data Pre-Processing
In the `code/src` folder, run these two commands:
```shell
python verifier_data_prepare.py
--generator_result_file {EXEC_DIR}/train_dir
--output_dir {EXEC_DIR}/train_preprocessed
--split train
--random_seed 233
--dataset_name GSM8K
python verifier_data_prepare.py
--generator_result_file {EXEC_DIR}/test_dir
--output_dir {EXEC_DIR}/test_preprocessed
--split dev
--random_seed 233
--dataset_name GSM8K
```
You can find the detailed parameter specifications in `code/verifier_data_prepare.yaml`.
### Training and Evaluation
In the ``code/src`` folder, run these commands:
```shell
export WANDB_API_KEY={your_wandb_api_key_here}
export WANDB_PROJECT=deberta-verifier
export WANDB_RUN_ID=gsm8k-codedavinci002
export WANDB_TAGS=deberta_verifier
export NCCL_DEBUG=INFO
deepspeed --num_gpus=8 run_ner.py
--task_type NER
--dataset_name GSM8K
--train_data {EXEC_DIR}/train_preprocessed
--test_data {EXEC_DIR}/test_preprocessed
--output_dir {EXEC_DIR}/exec
--max_seq_length 512
--per_device_train_batch_size 8
--per_device_eval_batch_size 64
--lr_scheduler_type constant
--seed 233
--logging_steps 10
--overwrite_output_dir
--alpha 0.1
--deepspeed ds_config.json
```
You can find the detailed parameter specifications in `code/verifier_train.yaml`.
### Logs
All the training/evaluation logs will be uploaded to your wandb account.
Key logged metrics include:
- `eval_weighted_voting_top1_accuracy@100`: solve rate of DIVERSE (our approach);
- `eval_voting_top1_accuracy@100`: solve rate of DIVERSE w/o verifier (i.e., each candidate is weighted equally);
- `eval_verifier_top1_accuracy@100`: solve rate of DIVERSE w/o voting (i.e., selecting the candidate with highest verifier score).
## Citation
If our work is useful for you, please consider citing our paper:
```bibtex
@article{li2022advance,
title={On the Advance of Making Language Models Better Reasoners},
author={Li, Yifei and Lin, Zeqi and Zhang, Shizhuo and Fu, Qiang and Chen, Bei and Lou, Jian-Guang and Chen, Weizhu},
journal={arXiv preprint arXiv:2206.02336},
year={2022}
}
```
## Contributing
This project welcomes contributions and suggestions. Most contributions require you to agree to a
Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
the rights to use your contribution. For details, visit [https://cla.opensource.microsoft.com](https://cla.opensource.microsoft.com).
When you submit a pull request, a CLA bot will automatically determine whether you need to provide
a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions
provided by the bot. You will only need to do this once across all repos using our CLA.
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
contact [[email protected]](mailto:[email protected]) with any additional questions or comments.
## License
Please note that this repo is under [MIT License](./LICENSE).
|
CodeT/DIVERSE/README.md/0
|
{
"file_path": "CodeT/DIVERSE/README.md",
"repo_id": "CodeT",
"token_count": 1828
}
| 225 |
https://openai.com/blog/grade-school-math/
|
CodeT/DIVERSE/data/gsm8k/README.md/0
|
{
"file_path": "CodeT/DIVERSE/data/gsm8k/README.md",
"repo_id": "CodeT",
"token_count": 15
}
| 226 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import itertools
import functools
from utils import Tools, FilePathBuilder, CONSTANTS
from collections import defaultdict
class RepoWindowMaker:
def __init__(self, repo, window_size, slice_size):
self.repo = repo
self.window_size = window_size
self.slice_size = slice_size
self.slice_step = 1 if window_size // slice_size == 0 else window_size // slice_size
self.source_code_files = Tools.iterate_repository(repo)
def _buid_windows_for_a_file(self, fpath_tuple, code):
code_windows = []
code_lines = code.splitlines()
delta_size = self.window_size // 2
for line_no in range(0, len(code_lines), self.slice_step): # line_no starts from 0
start_line_no = max(0, line_no - delta_size)
end_line_no = min(len(code_lines), line_no + self.window_size - delta_size)
window_lines = [i for i in code_lines[start_line_no:end_line_no]]
if not window_lines: # all empty lines
continue
window_text = '\n'.join(window_lines)
code_windows.append({
'context': window_text,
'metadata': {
'fpath_tuple': fpath_tuple,
'line_no': line_no,
'start_line_no': start_line_no,
'end_line_no': end_line_no,
'window_size': self.window_size,
'repo': self.repo,
'slice_size': self.slice_size,
}
})
return code_windows
def _merge_windows_with_same_context(self, code_windows):
merged_code_windows = defaultdict(list)
for code_window in code_windows:
context = code_window['context']
metadata = code_window['metadata']
merged_code_windows[context].append(metadata)
json_lines = []
for context, metadata_list in merged_code_windows.items():
json_lines.append({
'context': context,
'metadata': metadata_list
})
return json_lines
def build_windows(self):
all_code_windows = []
for fpath_tuple, code in self.source_code_files.items():
all_code_windows += self._buid_windows_for_a_file(fpath_tuple, code)
merged_code_windows = self._merge_windows_with_same_context(all_code_windows)
print(f'build {len(merged_code_windows)} windows for {self.repo} with window size {self.window_size} and slice {self.slice_size}')
output_path = FilePathBuilder.repo_windows_path(self.repo, self.window_size, self.slice_size)
Tools.dump_pickle(merged_code_windows, output_path)
class BaselineWindowMaker:
'''the retrieve-and-generate approach'''
def __init__(self, benchmark, repo, window_size, tasks):
self.benchmark = benchmark
self.repo = repo
self.window_size = window_size
self.tasks = tasks
self.source_code = Tools.iterate_repository(repo)
def build_window(self):
code_windows = []
for task in self.tasks:
if task['metadata']['task_id'].split('/')[0] != self.repo:
continue
fpath_tuple = tuple(task['metadata']['fpath_tuple'])
line_no = task['metadata']['line_no']
original_code = self.source_code[fpath_tuple]
code_lines = original_code.splitlines()
context_start_lineno = task['metadata']['context_start_lineno']
start_line_no = max(context_start_lineno, line_no - self.window_size)
window_lines = [i for i in code_lines[start_line_no:line_no]]
code_windows.append({
'context': '\n'.join(window_lines),
'metadata': {
'fpath_tuple': fpath_tuple,
'line_no': line_no, # line_no starts from 0
'task_id': task['metadata']['task_id'],
'start_line_no': start_line_no,
'end_line_no': line_no,
'window_size': self.window_size,
'context_start_lineno': context_start_lineno,
'repo': self.repo
}
})
print(f'build {len(code_windows)} baseline windows for {self.repo} with window size {self.window_size}')
output_path = FilePathBuilder.search_first_window_path(self.benchmark, CONSTANTS.rg, self.repo, self.window_size)
Tools.dump_pickle(code_windows, output_path)
class GroundTruthWindowMaker:
def __init__(self, benchmark, repo, window_size, tasks):
self.benchmark = benchmark
self.repo = repo
self.window_size = window_size
self.tasks = tasks
self.source_code = Tools.iterate_repository(repo)
def build_window(self):
code_windows = []
delta_size = self.window_size // 2
for task in self.tasks:
if task['metadata']['task_id'].split('/')[0] != self.repo:
continue
fpath_tuple = tuple(task['metadata']['fpath_tuple'])
line_no = task['metadata']['line_no']
original_code = self.source_code[fpath_tuple]
code_lines = original_code.splitlines()
context_start_lineno = task['metadata']['context_start_lineno']
start_line_no = max(context_start_lineno, line_no - delta_size)
end_line_no = min(len(code_lines), line_no + self.window_size - delta_size)
window_lines = [i for i in code_lines[start_line_no:end_line_no]]
code_windows.append({
'context': '\n'.join(window_lines),
'metadata': {
'fpath_tuple': fpath_tuple,
'line_no': line_no, # line_no starts from 0
'task_id': task['metadata']['task_id'],
'start_line_no': start_line_no,
'end_line_no': end_line_no,
'window_size': self.window_size,
'context_start_lineno': context_start_lineno,
'repo': self.repo
}
})
print(f'build {len(code_windows)} ground truth windows for {self.repo} with window size {self.window_size}')
output_path = FilePathBuilder.search_first_window_path(self.benchmark, CONSTANTS.rg, self.repo, self.window_size)
Tools.dump_pickle(code_windows, output_path)
class PredictionWindowMaker:
def __init__(self, repo, window_size, prediction_path, window_path_builder):
self.repo = repo
self.window_size = window_size
self.prediction_path = prediction_path
self.source_code = Tools.iterate_repository(repo)
self.predictions = Tools.load_jsonl(prediction_path)
self.window_path_builder = window_path_builder
def build_window(self, type='centered'):
code_windows = []
delta_size = self.window_size // 2
for prediction in self.predictions:
if prediction['metadata']['task_id'].split('/')[0] != self.repo:
continue
fpath_tuple = tuple(prediction['metadata']['fpath_tuple'])
line_no = prediction['metadata']['line_no'] # line_no in prediction file starts from 0
original_code = self.source_code[fpath_tuple]
code_lines = original_code.splitlines()
context_start_lineno = prediction['metadata']['context_start_lineno']
start_line_no = max(context_start_lineno, line_no - delta_size)
for sample in [prediction['choices'][i]['text'] for i in range(len(prediction['choices']))]:
# TODO actually only one sample is generated
sample_lines = [i for i in sample.splitlines() if i.strip()]
new_code_lines = code_lines[:line_no] + sample_lines
end_line_no = min(len(new_code_lines), line_no + self.window_size - delta_size)
window_lines = [i for i in new_code_lines[start_line_no:end_line_no] if i.strip()]
if not window_lines: # all empty lines
continue
code_windows.append({
'context': '\n'.join(window_lines),
'metadata': {
'fpath_tuple': fpath_tuple,
'line_no': line_no, # line_no starts from 0
'prediction': sample,
'task_id': prediction['metadata']['task_id'],
'start_line_no': start_line_no,
'end_line_no': end_line_no,
'window_size': self.window_size,
'context_start_lineno': context_start_lineno,
'repo': self.repo
}
})
print(f'build {len(code_windows)} prediction windows for {self.repo} with window size {self.window_size}')
output_path = self.window_path_builder(self.prediction_path, self.repo, self.window_size)
Tools.dump_pickle(code_windows, output_path)
class MakeWindowWrapper:
def __init__(self, benchmark, repos, window_sizes, slice_sizes):
self.repos = repos
self.window_sizes = window_sizes
self.slice_sizes = slice_sizes
self.benchmark = benchmark
if benchmark == CONSTANTS.line_benchmark:
self.task_file_path = FilePathBuilder.random_line_completion_benchmark
elif benchmark == CONSTANTS.api_benchmark:
self.task_file_path = FilePathBuilder.api_completion_benchmark
elif benchmark == CONSTANTS.short_line_benchmark:
self.task_file_path = FilePathBuilder.short_random_line_completion_benchmark
elif benchmark == CONSTANTS.short_api_benchmark:
self.task_file_path = FilePathBuilder.short_api_completion_benchmark
def window_for_repo_files(self):
for window_size, slice_size in itertools.product(self.window_sizes, self.slice_sizes):
for repo in self.repos:
repo_window_maker = RepoWindowMaker(repo, window_size, slice_size)
repo_window_maker.build_windows()
def window_for_baseline_and_ground(self):
tasks = Tools.load_jsonl(self.task_file_path)
for window_size in self.window_sizes:
for repo in self.repos:
baseline_window_maker = BaselineWindowMaker(self.benchmark, repo, window_size, tasks)
ground_window_maker = GroundTruthWindowMaker(self.benchmark, repo, window_size, tasks)
baseline_window_maker.build_window()
ground_window_maker.build_window()
def window_for_prediction(self, mode, prediction_path_template):
for window_size, slice_size in itertools.product(self.window_sizes, self.slice_sizes):
prediction_path = prediction_path_template.format(window_size=window_size, slice_size=slice_size)
for repo in self.repos:
window_path_builder = functools.partial(FilePathBuilder.gen_first_window_path, self.benchmark, mode)
pred_window_maker = PredictionWindowMaker(repo, window_size, prediction_path, window_path_builder)
pred_window_maker.build_window()
|
CodeT/RepoCoder/make_window.py/0
|
{
"file_path": "CodeT/RepoCoder/make_window.py",
"repo_id": "CodeT",
"token_count": 5371
}
| 227 |
import os
import time
import configparser
from pathlib import Path
API_KEYS_LOCATION = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'openaiapirc')
class PromptFile:
context_source_filename = ""
default_context_filename = "current_context.txt"
default_file_path = os.path.join(os.path.dirname(__file__), "..", default_context_filename)
default_config_path = os.path.join(os.path.dirname(__file__), "..", "current_context.config")
def __init__(self, file_name, config):
self.context_source_filename = "{}-context.txt".format(config['shell']) # feel free to set your own default context path here
self.file_path = self.default_file_path
self.config_path = self.default_config_path
# loading in one of the saved contexts
if file_name != self.default_context_filename:
self.load_context(file_name, True)
def has_config(self):
"""
Check if the prompt file has a corresponding config file
"""
return os.path.isfile(self.config_path)
def read_config(self):
"""
Read the prompt config and return a dictionary
"""
if self.has_config() == False:
self.set_config(self.config)
return self.config
with open(self.config_path, 'r') as f:
lines = f.readlines()
config = {
'engine': lines[0].split(':')[1].strip(),
'temperature': float(lines[1].split(':')[1].strip()),
'max_tokens': int(lines[2].split(':')[1].strip()),
'shell': lines[3].split(':')[1].strip(),
'multi_turn': lines[4].split(':')[1].strip(),
'token_count': int(lines[5].split(':')[1].strip())
}
self.config = config
return self.config
def set_config(self, config):
"""
Set the prompt headers with the new config
"""
self.config = config
with open(self.config_path, 'w') as f:
f.write('engine: {}\n'.format(self.config['engine']))
f.write('temperature: {}\n'.format(self.config['temperature']))
f.write('max_tokens: {}\n'.format(self.config['max_tokens']))
f.write('shell: {}\n'.format(self.config['shell']))
f.write('multi_turn: {}\n'.format(self.config['multi_turn']))
f.write('token_count: {}\n'.format(self.config['token_count']))
def show_config(self):
print('\n')
# read the dictionary into a list of # lines
lines = []
for key, value in self.config.items():
lines.append('# {}: {}\n'.format(key, value))
print(''.join(lines))
def add_input_output_pair(self, user_query, prompt_response):
"""
Add lines to file_name and update the token_count
"""
with open(self.file_path, 'a') as f:
f.write(user_query)
f.write(prompt_response)
if self.config['multi_turn'] == 'on':
self.config['token_count'] += len(user_query.split()) + len(prompt_response.split())
self.set_config(self.config)
def read_prompt_file(self, input):
"""
Get the updated prompt file
Checks for token overflow and appends the current input
Returns: the prompt file after appending the input
"""
input_tokens_count = len(input.split())
need_to_refresh = (self.config['token_count'] + input_tokens_count > 2048)
if need_to_refresh:
# delete first 2 lines of prompt context file
with open(self.file_path, 'r') as f:
lines = f.readlines()
prompt = lines[2:] # drop first 2 lines of prompt
with open(self.file_path, 'w') as f:
f.writelines(prompt)
# get input from prompt file
with open(self.file_path, 'r') as f:
lines = f.readlines()
return ''.join(lines)
def get_token_count(self):
"""
Get the actual token count
"""
token_count = 0
if self.has_config():
with open(self.config_path, 'r') as f:
lines = f.readlines()
token_count = int(lines[5].split(':')[1].strip())
true_token_count = 0
with open(self.file_path, 'r') as f:
lines = f.readlines()
# count the number of words in the prompt file
for line in lines:
true_token_count += len(line.split())
if true_token_count != token_count:
self.config['token_count'] = true_token_count
self.set_config(self.config)
return true_token_count
def clear(self):
"""
Clear the prompt file, while keeping the config
Note: saves a copy to the deleted folder
"""
config = self.read_config()
filename = time.strftime("%Y-%m-%d_%H-%M-%S") + ".txt"
with open(self.file_path, 'r') as f:
lines = f.readlines()
filename = os.path.join(os.path.dirname(__file__), "..", "deleted", filename)
with Path(filename).open('w') as f:
f.writelines(lines)
# delete the prompt file
with open(self.file_path, 'w') as f:
f.write('')
print("\n# Context has been cleared, temporarily saved to {}".format(filename))
self.set_config(config)
def clear_last_interaction(self):
"""
Clear the last interaction from the prompt file
"""
with open(self.file_path, 'r') as f:
lines = f.readlines()
if len(lines) > 1:
lines.pop()
lines.pop()
with open(self.file_path, 'w') as f:
f.writelines(lines)
print("\n# Unlearned interaction")
def save_to(self, save_name):
"""
Save the prompt file to a new location with the config
"""
if not save_name.endswith('.txt'):
save_name = save_name + '.txt'
save_path = os.path.join(os.path.dirname(__file__), "..", "contexts", save_name)
# first write the config
with open(self.config_path, 'r') as f:
lines = f.readlines()
lines = ['## ' + line for line in lines]
with Path(save_path).open('w') as f:
f.writelines(lines)
# then write the prompt file
with open(self.file_path, 'r') as f:
lines = f.readlines()
with Path(save_path).open('a') as f:
f.writelines(lines)
print('\n# Context saved to {}'.format(save_name))
def start_multi_turn(self):
"""
Turn on context mode
"""
self.config['multi_turn'] = 'on'
self.set_config(self.config)
print("\n# Multi turn mode is on")
def stop_multi_turn(self):
"""
Turn off context mode
"""
self.config['multi_turn'] = 'off'
self.set_config(self.config)
print("\n# Multi turn mode is off")
def default_context(self):
"""
Go to default context
"""
self.load_context(self.context_source_filename)
def load_context(self, filename, initialize=False):
"""
Loads a context file into current_context
"""
if not filename.endswith('.txt'):
filename = filename + '.txt'
filepath = Path(os.path.join(os.path.dirname(__file__), "..", "contexts", filename))
# check if the file exists
if filepath.exists():
with filepath.open('r') as f:
lines = f.readlines()
# read in the engine name from openaiapirc
config = configparser.ConfigParser()
config.read(API_KEYS_LOCATION)
ENGINE = config['openai']['engine'].strip('"').strip("'")
config = {
'engine': ENGINE,
'temperature': float(lines[1].split(':')[1].strip()),
'max_tokens': int(lines[2].split(':')[1].strip()),
'shell': lines[3].split(':')[1].strip(),
'multi_turn': lines[4].split(':')[1].strip(),
'token_count': int(lines[5].split(':')[1].strip())
}
# use new config if old config doesn't exist
if initialize == False or self.has_config() == False:
self.set_config(config)
else:
self.config = self.read_config()
lines = lines[6:]
# write to the current prompt file if we are in multi-turn mode
if initialize == False or self.config['multi_turn'] == "off":
with open(self.file_path, 'w') as f:
f.writelines(lines)
if initialize == False:
print('\n# Context loaded from {}'.format(filename))
else:
print("\n# File not found")
return False
|
Codex-CLI/src/prompt_file.py/0
|
{
"file_path": "Codex-CLI/src/prompt_file.py",
"repo_id": "Codex-CLI",
"token_count": 4429
}
| 228 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: __init__.py
Description: Unittests for Python SDK of the Cognitive Face API.
"""
try:
from . import config
except ImportError:
raise Exception(
'Please setup unittest configuration `config.py` properly by '
'referring to `config.sample.py` so as to perform the unittests.')
import cognitive_face as CF
from . import util
def setUpModule():
# pylint: disable=invalid-name
"""Setup for the whole unitests.
- Set Subscription Key.
- Set Base URL.
- Setup needed data for unitests.
"""
print("setUpModule Begin.")
CF.Key.set(config.KEY)
CF.BaseUrl.set(config.BASE_URL)
util.DataStore.setup_face()
util.DataStore.setup_face_list()
util.DataStore.setup_large_face_list()
util.DataStore.setup_large_person_group()
util.DataStore.setup_person_group()
print("setUpModule End.")
def tearDownModule():
# pylint: disable=invalid-name
"""TearDown for the whole unittests.
- Remove all the created persisted data.
"""
print("tearDownModule Begin.")
CF.util.clear_face_lists()
CF.util.clear_person_groups()
CF.util.clear_large_face_lists()
CF.util.clear_large_person_groups()
print("tearDownModule End.")
|
Cognitive-Face-Python/cognitive_face/tests/__init__.py/0
|
{
"file_path": "Cognitive-Face-Python/cognitive_face/tests/__init__.py",
"repo_id": "Cognitive-Face-Python",
"token_count": 480
}
| 229 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: face.py
Description: Face model for Python SDK Sample.
"""
import wx
import util
class Rect(object):
"""Face Rectangle."""
def __init__(self, rect):
super(Rect, self).__init__()
self.set_rect(rect)
def set_rect(self, rect):
"""Set rectangle."""
self.left = int(rect['left'])
self.top = int(rect['top'])
self.width = int(rect['width'])
self.height = int(rect['height'])
class Attribute(object):
"""Attributes for face."""
def __init__(self, attr):
super(Attribute, self).__init__()
self.set_attr(attr)
def set_attr(self, attr):
"""Set the attribute value."""
self.gender = attr['gender']
self.age = int(attr['age'])
if not attr['hair']['hairColor']:
if attr['hair']['invisible']:
self.hair = 'Invisible'
else:
self.hair = 'Bald'
else:
self.hair = max(
attr['hair']['hairColor'],
key=lambda x: x['confidence'])['color']
self.facial_hair = sum(attr['facialHair'].values()) > 0 and 'Yes' \
or 'No'
self.makeup = any(attr['makeup'].values())
self.emotion = util.key_with_max_value(attr['emotion'])
self.occlusion = any(attr['occlusion'].values())
self.exposure = attr['exposure']['exposureLevel']
self.head_pose = "Pitch: {}, Roll:{}, Yaw:{}".format(
attr['headPose']['pitch'], attr['headPose']['roll'],
attr['headPose']['yaw'])
if not attr['accessories']:
self.accessories = 'NoAccessories'
else:
self.accessories = ' '.join(
[str(x['type']) for x in attr['accessories']])
class Face(object):
"""Face Model for each face."""
def __init__(self, res, path, size=util.MAX_THUMBNAIL_SIZE):
super(Face, self).__init__()
self.path = path
img = util.rotate_image(path)
self.bmp = img.ConvertToBitmap()
self.name = None
if res.get('faceId'):
self.id = res['faceId']
if res.get('persistedFaceId'):
self.persisted_id = res['persistedFaceId']
if res.get('faceRectangle'):
self.rect = Rect(res['faceRectangle'])
self.bmp = self.bmp.GetSubBitmap(
wx.Rect(self.rect.left, self.rect.top, self.rect.width,
self.rect.height))
if res.get('faceAttributes'):
self.attr = Attribute(res['faceAttributes'])
self.bmp = util.scale_image(
self.bmp.ConvertToImage(), size=size).ConvertToBitmap()
def set_name(self, name):
"""Set the name for the face."""
self.name = name
|
Cognitive-Face-Python/sample/model/face.py/0
|
{
"file_path": "Cognitive-Face-Python/sample/model/face.py",
"repo_id": "Cognitive-Face-Python",
"token_count": 1376
}
| 230 |
export CUDA_VISIBLE_DEVICES=2
python t5_run_eval.py \
--model_name_or_path ./checkpoint/Com/ControlExp_finetune_set1_seed1/checkpoint-50000 \
--subtask Com \
--validation_file test \
--ebatch_size 16 \
--set set1
|
ContextualSP/abstraction_probing/code/t5_code/Com_ControlExp_test.sh/0
|
{
"file_path": "ContextualSP/abstraction_probing/code/t5_code/Com_ControlExp_test.sh",
"repo_id": "ContextualSP",
"token_count": 84
}
| 231 |
# coding=utf-8
# Copyright (c) Microsoft. All rights reserved.
import argparse
import json
import os
import random
from datetime import datetime
from pprint import pprint
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader, BatchSampler
from pretrained_models import *
# from tensorboardX import SummaryWriter
# from torch.utils.tensorboard import SummaryWriter
from experiments.exp_def import TaskDefs
from mt_dnn.inference import eval_model, extract_encoding
from data_utils.log_wrapper import create_logger
from data_utils.task_def import EncoderModelType
from data_utils.utils import set_environment
from mt_dnn.batcher import (
SingleTaskDataset,
MultiTaskDataset,
Collater,
MultiTaskBatchSampler,
DistMultiTaskBatchSampler,
DistSingleTaskBatchSampler,
TaskIterBatchSampler
)
from mt_dnn.batcher import DistTaskDataset
from mt_dnn.adapter_diff_model import MTDNNModel
from typing import Optional
from dataclasses import dataclass, field
from transformers import (
HfArgumentParser,
MultiLingAdapterArguments
)
@dataclass
class ModelArguments:
update_bert_opt: int = field(
default=0, metadata={"help": "BERT freeze or not"}
)
multi_gpu_on: bool = field(
default=False, metadata={"help": "distributed training"}
)
mem_cum_type: str = field(
default='simple', metadata={"help": "bilinear/simple/defualt"}
)
answer_num_turn: int = field(
default=5, metadata={"help": "answer_num_turn"}
)
answer_mem_drop_p: float = field(
default=0.1, metadata={"help": "answer_mem_drop_p"}
)
answer_att_hidden_size: int = field(
default=128, metadata={"help": "answer_att_hidden_size"}
)
answer_att_type: str = field(
default='bilinear', metadata={"help": "bilinear/simple/defualt"}
)
answer_rnn_type: str = field(
default='gru', metadata={"help": "rnn/gru/lstm"}
)
answer_sum_att_type: str = field(
default='bilinear', metadata={"help": "bilinear/simple/defualt"}
)
answer_merge_opt: int = field(
default=1, metadata={"help": "answer_merge_opt"}
)
answer_mem_type: int = field(
default=1, metadata={"help": "answer_mem_type"}
)
max_answer_len: int = field(
default=10, metadata={"help": "max_answer_len"}
)
answer_dropout_p: float = field(
default=0.1, metadata={"help": "answer_dropout_p"}
)
answer_weight_norm_on: bool = field(
default=False, metadata={"help": "answer_weight_norm_on"}
)
dump_state_on: bool = field(
default=False, metadata={"help": "dump_state_on"}
)
answer_opt: int = field(
default=1, metadata={"help": "0,1"}
)
pooler_actf: str = field(
default='tanh', metadata={"help": "tanh/relu/gelu"}
)
mtl_opt: int = field(
default=0, metadata={"help": "mtl_opt"}
)
ratio: float = field(
default=0, metadata={"help": "ratio"}
)
mix_opt: int = field(
default=0, metadata={"help": "mix_opt"}
)
max_seq_len: int = field(
default=512, metadata={"help": "max_seq_len"}
)
init_ratio: int = field(
default=1, metadata={"help": "init_ratio"}
)
encoder_type: int = field(
default=EncoderModelType.BERT, metadata={"help": "encoder_type"}
)
num_hidden_layers: int = field(
default=-1, metadata={"help": "num_hidden_layers"}
)
# BERT pre-training
bert_model_type: str = field(
default='bert-base-uncased', metadata={"help": "bert_model_type"}
)
do_lower_case: bool = field(
default=True, metadata={"help": "do_lower_case"}
)
masked_lm_prob: float = field(
default=0.15, metadata={"help": "masked_lm_prob"}
)
short_seq_prob: float = field(
default=0.2, metadata={"help": "short_seq_prob"}
)
max_predictions_per_seq: int = field(
default=128, metadata={"help": "max_predictions_per_seq"}
)
# bin samples
bin_on: bool = field(
default=False, metadata={"help": "bin_on"}
)
bin_size: int = field(
default=64, metadata={"help": "bin_size"}
)
bin_grow_ratio: float = field(
default=0.5, metadata={"help": "bin_size"}
)
# dist training
local_rank: int = field(
default=-1, metadata={"help": "For distributed training: local_rank"}
)
world_size: int = field(
default=1, metadata={"help": "For distributed training: world_size"}
)
master_addr: str = field(
default='localhost', metadata={"help": "master_addr"}
)
master_port: str = field(
default='6600', metadata={"help": "master_port"}
)
backend: str = field(
default='nccl', metadata={"help": "backend"}
)
@dataclass
class DataArguments:
log_file: str = field(
default='mt-dnn-train.log', metadata={"help": "path for log file."}
)
tensorboard: bool = field(
default=False, metadata={"help": "tensorboard"}
)
tensorboard_logdir: str = field(
default='tensorboard_logdir', metadata={"help": "tensorboard_logdir"}
)
data_dir: str = field(
default='data/canonical_data/bert_uncased_lower', metadata={"help": "data_dir"}
)
data_sort_on: bool = field(
default=False, metadata={"help": "data_sort_on"}
)
name: str = field(
default='farmer', metadata={"help": "name"}
)
task_def: str = field(
default='experiments/glue/glue_task_def.yml', metadata={"help": "task_def"}
)
train_datasets: str = field(
default='mnli,mrpc', metadata={"help": "train_datasets"}
)
test_datasets: str = field(
default='mnli_matched,mnli_mismatched', metadata={"help": "test_datasets"}
)
glue_format_on: bool = field(
default=False, metadata={"help": "glue_format_on"}
)
mkd_opt: int = field(
default=0, metadata={"help": ">0 to turn on knowledge distillation, requires 'softlabel' column in input data"}
)
do_padding: bool = field(
default=False, metadata={"help": "do_padding"}
)
@dataclass
class TrainingArguments:
cuda: bool = field(
default=torch.cuda.is_available(), metadata={"help": "whether to use GPU acceleration."}
)
init_checkpoint: str = field(
default='bert-base-uncased', metadata={"help": "init_checkpoint"}
)
log_per_updates: int = field(
default=500, metadata={"help": "log_per_updates"}
)
save_per_updates: int = field(
default=10000, metadata={"help": "save_per_updates"}
)
save_per_updates_on: bool = field(
default=True, metadata={"help": "save_per_updates_on"}
)
epochs: int = field(
default=5, metadata={"help": "epochs"}
)
batch_size: int = field(
default=8, metadata={"help": "batch_size"}
)
batch_size_eval: int = field(
default=8, metadata={"help": "batch_size_eval"}
)
optimizer: str = field(
default='adamax', metadata={"help": "supported optimizer: adamax, sgd, adadelta, adam"}
)
grad_clipping: float = field(
default=0, metadata={"help": "grad_clipping"}
)
global_grad_clipping: float = field(
default=1.0, metadata={"help": "global_grad_clipping"}
)
weight_decay: float = field(
default=0, metadata={"help": "weight_decay"}
)
learning_rate: float = field(
default=5e-5, metadata={"help": "learning_rate"}
)
momentum: float = field(
default=0, metadata={"help": "momentum"}
)
warmup: float = field(
default=0.1, metadata={"help": "warmup"}
)
warmup_schedule: str = field(
default='warmup_linear', metadata={"help": "warmup_schedule"}
)
adam_eps: float = field(
default=1e-6, metadata={"help": "adam_eps"}
)
vb_dropout: bool = field(
default=False, metadata={"help": "vb_dropout"}
)
dropout_p: float = field(
default=0.1, metadata={"help": "dropout_p"}
)
dropout_w: float = field(
default=0.000, metadata={"help": "dropout_w"}
)
bert_dropout_p: float = field(
default=0.1, metadata={"help": "bert_dropout_p"}
)
# loading
model_ckpt: str = field(
default='checkpoints/model_0.pt', metadata={"help": "model_ckpt"}
)
resume: bool = field(
default=False, metadata={"help": "resume"}
)
# scheduler
scheduler_type: int = field(
default=0, metadata={"help": "0: linear, 1: cosine, 2 constant"}
)
output_dir: str = field(
default='checkpoint', metadata={"help": "output_dir"}
)
seed: int = field(
default=2018, metadata={"help": "random seed for data shuffling, embedding init, etc."}
)
grad_accumulation_step: int = field(
default=21018, metadata={"help": "grad_accumulation_step"}
)
ite_batch_num: int = field(
default=500, metadata={"help": "ite_batch_num"}
)
# fp 16
fp16: bool = field(
default=False, metadata={"help": "Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit"}
)
fp16_opt_level: str = field(
default='O1', metadata={"help": "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. See details at https://nvidia.github.io/apex/amp.html"}
)
# adv training
adv_train: bool = field(
default=False, metadata={"help": "adv_train"}
)
# the current release only includes smart perturbation
adv_opt: int = field(
default=0, metadata={"help": "adv_opt"}
)
adv_norm_level: int = field(
default=0, metadata={"help": "adv_norm_level"}
)
adv_p_norm: str = field(
default='inf', metadata={"help": "adv_p_norm"}
)
adv_alpha: float = field(
default=1, metadata={"help": "adv_alpha"}
)
adv_k: int = field(
default=1, metadata={"help": "adv_k"}
)
adv_step_size: float = field(
default=1e-5, metadata={"help": "adv_step_size"}
)
adv_noise_var: float = field(
default=1e-5, metadata={"help": "adv_noise_var"}
)
adv_epsilon: float = field(
default=1e-6, metadata={"help": "adv_epsilon"}
)
encode_mode: bool = field(
default=False, metadata={"help": "only encode test data"}
)
debug: bool = field(
default=False, metadata={"help": "print debug info"}
)
# transformer cache
transformer_cache: str = field(
default='.cache', metadata={"help": "transformer_cache"}
)
@dataclass
class DUMTLArguments(MultiLingAdapterArguments):
train_adapter_fusion: bool = field(
default=False, metadata={"help": "Train an adapter fusion for target task."}
)
finetune: bool = field(
default=False, metadata={"help": "Finetuning on target task."}
)
load_adapter_fusion: Optional[str] = field(
default="", metadata={"help": "Pre-trained adapter fusion module to be loaded from Hub."}
)
diff_structure_init: str = field(
default=None,
metadata={"help": "The initial differentitated adapter path."},
)
adapter_diff: bool = field(
default=True,
metadata={"help": "Differentitated adapter training mode."},
)
adapter_cache_path: str = field(
default='checkpoint',
metadata={"help": "The initial differentitated adapter path."},
)
min_intra_simiarity: int = field(
default=2, metadata={"help": "math.cos(math.pi / min_intra_simiarity)"}
)
max_entropy_threshold: int = field(
default=3, metadata={"help": "math.cos(math.pi / max_entropy_threshold)"}
)
max_interference_degree: float = field(
default=0.5, metadata={"help": "max_interference_degree"}
)
hfparser = HfArgumentParser(
(ModelArguments, DataArguments, TrainingArguments, DUMTLArguments)
)
model_args, data_args, training_args, adapter_args = hfparser.parse_args_into_dataclasses()
output_dir = training_args.output_dir
data_dir = data_args.data_dir
data_args.train_datasets = data_args.train_datasets.split(",")
data_args.test_datasets = data_args.test_datasets.split(",")
os.makedirs(output_dir, exist_ok=True)
output_dir = os.path.abspath(output_dir)
set_environment(training_args.seed, training_args.cuda)
log_path = data_args.log_file
logger = create_logger(__name__, to_disk=True, log_file=log_path)
task_defs = TaskDefs(data_args.task_def)
encoder_type = model_args.encoder_type
def dump(path, data):
with open(path, "w") as f:
json.dump(data, f)
def evaluation(
model,
datasets,
data_list,
task_defs,
output_dir="checkpoints",
epoch=0,
n_updates=-1,
with_label=False,
tensorboard=None,
glue_format_on=False,
test_on=False,
device=None,
logger=None,
):
# eval on rank 1
print_message(logger, "Evaluation")
test_prefix = "Test" if test_on else "Dev"
if n_updates > 0:
updates_str = "updates"
else:
updates_str = "epoch"
updates = model.updates if n_updates > 0 else epoch
for idx, dataset in enumerate(datasets):
prefix = dataset.split("_")[0]
model._switch_model_task_mode(prefix)
task_def = task_defs.get_task_def(prefix)
label_dict = task_def.label_vocab
test_data = data_list[idx]
if test_data is not None:
with torch.no_grad():
(
test_metrics,
test_predictions,
test_scores,
test_golds,
test_ids,
) = eval_model(
model,
test_data,
metric_meta=task_def.metric_meta,
device=device,
with_label=with_label,
label_mapper=label_dict,
task_type=task_def.task_type,
)
for key, val in test_metrics.items():
if tensorboard:
tensorboard.add_scalar(
"{}/{}/{}".format(test_prefix, dataset, key),
val,
global_step=updates,
)
if isinstance(val, str):
print_message(
logger,
"Task {0} -- {1} {2} -- {3} {4}: {5}".format(
dataset, updates_str, updates, test_prefix, key, val
),
level=1,
)
elif isinstance(val, float):
print_message(
logger,
"Task {0} -- {1} {2} -- {3} {4}: {5:.3f}".format(
dataset, updates_str, updates, test_prefix, key, val
),
level=1,
)
else:
test_metrics[key] = str(val)
print_message(
logger,
"Task {0} -- {1} {2} -- {3} {4}: \n{5}".format(
dataset, updates_str, updates, test_prefix, key, val
),
level=1,
)
if model_args.local_rank in [-1, 0]:
score_file = os.path.join(
output_dir,
"{}_{}_scores_{}_{}.json".format(
dataset, test_prefix.lower(), updates_str, updates
),
)
results = {
"metrics": test_metrics,
"predictions": test_predictions,
"uids": test_ids,
"scores": test_scores,
}
dump(score_file, results)
if glue_format_on:
from experiments.glue.glue_utils import submit
official_score_file = os.path.join(
output_dir,
"{}_{}_scores_{}.tsv".format(
dataset, test_prefix.lower(), updates_str
),
)
submit(official_score_file, results, label_dict)
def initialize_distributed(logger):
"""Initialize torch.distributed."""
model_args.rank = int(os.getenv("RANK", "0"))
model_args.world_size = int(os.getenv("WORLD_SIZE", "1"))
batch_size_pre_gpu = int(training_args.batch_size / model_args.world_size)
print_message(logger, "Batch Size Per GPU: {}".format(batch_size_pre_gpu))
device = model_args.rank % torch.cuda.device_count()
if model_args.local_rank is not None:
device = model_args.local_rank
torch.cuda.set_device(device)
device = torch.device("cuda", model_args.local_rank)
# Call the init process
init_method = "tcp://"
master_ip = os.getenv("MASTER_ADDR", "localhost")
master_port = os.getenv("MASTER_PORT", "6600")
init_method += master_ip + ":" + master_port
torch.distributed.init_process_group(
backend=model_args.backend,
world_size=model_args.world_size,
rank=model_args.rank,
init_method=init_method,
)
return device
def print_message(logger, message, level=0):
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
do_logging = True
else:
do_logging = False
else:
do_logging = True
if do_logging:
if level == 1:
logger.warning(message)
else:
logger.info(message)
def main():
# set up dist
device = torch.device("cuda")
if model_args.local_rank > -1:
device = initialize_distributed(logger)
elif torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
opt = [(k, eval(f'training_args.{k}')) for k in vars(training_args)]
opt.extend([(k, eval(f'model_args.{k}')) for k in vars(model_args)])
opt.extend([(k, eval(f'data_args.{k}')) for k in vars(data_args)])
opt.extend([(k, eval(f'adapter_args.{k}')) for k in vars(adapter_args)])
opt = dict(opt)
# update data dir
opt["data_dir"] = data_dir
batch_size = training_args.batch_size
print_message(logger, "Launching the MT-DNN training")
# return
tasks = {}
task_def_list = []
dropout_list = []
printable = model_args.local_rank in [-1, 0]
train_datasets = []
for dataset in data_args.train_datasets:
prefix = dataset.split("_")[0]
if prefix in tasks:
continue
task_id = len(tasks)
tasks[prefix] = task_id
task_def = task_defs.get_task_def(prefix)
task_def_list.append(task_def)
train_path = os.path.join(data_dir, "{}_train.json".format(dataset))
print_message(logger, "Loading {} as task {}".format(train_path, task_id))
train_data_set = SingleTaskDataset(
train_path,
True,
maxlen=model_args.max_seq_len,
task_id=task_id,
task_def=task_def,
printable=printable,
)
train_datasets.append(train_data_set)
train_collater = Collater(
dropout_w=training_args.dropout_w,
encoder_type=encoder_type,
soft_label=data_args.mkd_opt > 0,
max_seq_len=model_args.max_seq_len,
do_padding=data_args.do_padding,
)
multi_task_train_dataset = MultiTaskDataset(train_datasets)
if model_args.local_rank != -1:
multi_task_batch_sampler = DistMultiTaskBatchSampler(
train_datasets,
training_args.batch_size,
model_args.mix_opt,
model_args.ratio,
rank=model_args.local_rank,
world_size=model_args.world_size,
)
else:
multi_task_batch_sampler = TaskIterBatchSampler(
train_datasets,
training_args.batch_size,
model_args.mix_opt,
model_args.ratio,
bin_on=model_args.bin_on,
bin_size=model_args.bin_size,
bin_grow_ratio=model_args.bin_grow_ratio,
ite_batch_num=training_args.ite_batch_num
)
multi_task_train_data = DataLoader(
multi_task_train_dataset,
batch_sampler=multi_task_batch_sampler,
collate_fn=train_collater.collate_fn,
pin_memory=training_args.cuda,
)
id_task_map = dict([(v, k) for k, v in tasks.items()])
opt["task_def_list"] = task_def_list
dev_data_list = []
test_data_list = []
heldout_eval_data_list = []
test_collater = Collater(
is_train=False,
encoder_type=encoder_type,
max_seq_len=model_args.max_seq_len,
do_padding=data_args.do_padding,
)
for dataset in data_args.test_datasets:
prefix = dataset.split("_")[0]
task_def = task_defs.get_task_def(prefix)
task_id = tasks[prefix]
task_type = task_def.task_type
data_type = task_def.data_type
dev_path = os.path.join(data_dir, "{}_dev.json".format(dataset))
dev_data = None
if os.path.exists(dev_path):
dev_data_set = SingleTaskDataset(
dev_path,
False,
maxlen=model_args.max_seq_len,
task_id=task_id,
task_def=task_def,
printable=printable,
)
if model_args.local_rank != -1:
dev_data_set = DistTaskDataset(dev_data_set, task_id)
single_task_batch_sampler = DistSingleTaskBatchSampler(
dev_data_set,
training_args.batch_size_eval,
rank=model_args.local_rank,
world_size=model_args.world_size,
)
dev_data = DataLoader(
dev_data_set,
batch_sampler=single_task_batch_sampler,
collate_fn=test_collater.collate_fn,
pin_memory=training_args.cuda,
)
else:
dev_data = DataLoader(
dev_data_set,
batch_size=training_args.batch_size_eval,
collate_fn=test_collater.collate_fn,
pin_memory=training_args.cuda,
)
dev_data_list.append(dev_data)
tmp_heldout_eval_data_list = []
if os.path.exists(dev_path):
for hs in [0, 10]:
dev_data_set = SingleTaskDataset(
dev_path,
True,
maxlen=model_args.max_seq_len,
task_id=task_id,
task_def=task_def,
printable=printable,
heldout_start=hs
)
# if model_args.local_rank != -1:
# dev_data_set = DistTaskDataset(dev_data_set, task_id)
# single_task_batch_sampler = DistSingleTaskBatchSampler(
# dev_data_set,
# training_args.batch_size_eval,
# rank=model_args.local_rank,
# world_size=model_args.world_size,
# )
# heldout_eval_data = DataLoader(
# dev_data_set,
# batch_sampler=single_task_batch_sampler,
# collate_fn=test_collater.collate_fn,
# pin_memory=training_args.cuda,
# )
# else:
# heldout_eval_data = DataLoader(
# dev_data_set,
# batch_size=training_args.batch_size_eval,
# collate_fn=test_collater.collate_fn,
# pin_memory=training_args.cuda,
# )
tmp_heldout_eval_data_list.append(dev_data_set)
heldout_eval_data_list.append(tmp_heldout_eval_data_list)
test_path = os.path.join(data_dir, "{}_test.json".format(dataset))
test_data = None
if os.path.exists(test_path):
test_data_set = SingleTaskDataset(
test_path,
False,
maxlen=model_args.max_seq_len,
task_id=task_id,
task_def=task_def,
printable=printable,
)
if model_args.local_rank != -1:
test_data_set = DistTaskDataset(test_data_set, task_id)
single_task_batch_sampler = DistSingleTaskBatchSampler(
test_data_set,
training_args.batch_size_eval,
rank=model_args.local_rank,
world_size=model_args.world_size,
)
test_data = DataLoader(
test_data_set,
batch_sampler=single_task_batch_sampler,
collate_fn=test_collater.collate_fn,
pin_memory=training_args.cuda,
)
else:
test_data = DataLoader(
test_data_set,
batch_size=training_args.batch_size_eval,
collate_fn=test_collater.collate_fn,
pin_memory=training_args.cuda,
)
test_data_list.append(test_data)
heldout_eval_data_list = [[hs[0] for hs in heldout_eval_data_list], [hs[1] for hs in heldout_eval_data_list]]
heldout_eval_data_list1 = MultiTaskDataset(heldout_eval_data_list[0])
heldout_eval_data_list2 = MultiTaskDataset(heldout_eval_data_list[1])
heldout_eval_dataset_list = [heldout_eval_data_list1, heldout_eval_data_list2]
tmp_heldout_eval_data_list = []
# TODO
for hi, heldout_datasets in enumerate(heldout_eval_data_list):
multi_task_batch_sampler = MultiTaskBatchSampler(
heldout_datasets,
training_args.batch_size,
model_args.mix_opt,
model_args.ratio,
bin_on=model_args.bin_on,
bin_size=model_args.bin_size,
bin_grow_ratio=model_args.bin_grow_ratio,
heldout=True
)
multi_task_heldout_data = DataLoader(
heldout_eval_dataset_list[hi],
batch_sampler=multi_task_batch_sampler,
collate_fn=train_collater.collate_fn,
pin_memory=training_args.cuda,
)
tmp_heldout_eval_data_list.append(multi_task_heldout_data)
heldout_eval_data_list = tmp_heldout_eval_data_list
# for data_loader in heldout_eval_data_list1:
# for (batch_meta, batch_data) in data_loader:
# batch_meta, batch_data = Collater.patch_data(device, batch_meta, batch_data)
# print(id_task_map[batch_meta["task_id"]])
# print(len(batch_data))
# exit(0)
print_message(logger, "#" * 20)
print_message(logger, opt)
print_message(logger, "#" * 20)
# div number of grad accumulation.
num_all_batches = (
training_args.epochs * len(multi_task_train_data) // training_args.grad_accumulation_step
)
print_message(logger, "############# Gradient Accumulation Info #############")
print_message(
logger, "number of step: {}".format(training_args.epochs * len(multi_task_train_data))
)
print_message(
logger,
"number of grad grad_accumulation step: {}".format(training_args.grad_accumulation_step),
)
print_message(logger, "adjusted number of step: {}".format(num_all_batches))
print_message(logger, "############# Gradient Accumulation Info #############")
init_model = training_args.init_checkpoint
state_dict = None
if os.path.exists(init_model):
if (
encoder_type == EncoderModelType.BERT
or encoder_type == EncoderModelType.DEBERTA
or encoder_type == EncoderModelType.ELECTRA
):
state_dict = torch.load(init_model, map_location=device)
config = state_dict["config"]
elif (
encoder_type == EncoderModelType.ROBERTA
or encoder_type == EncoderModelType.XLM
):
model_path = "{}/model.pt".format(init_model)
state_dict = torch.load(model_path, map_location=device)
arch = state_dict["args"].arch
arch = arch.replace("_", "-")
if encoder_type == EncoderModelType.XLM:
arch = "xlm-{}".format(arch)
# convert model arch
from data_utils.roberta_utils import update_roberta_keys
from data_utils.roberta_utils import patch_name_dict
state = update_roberta_keys(
state_dict["model"], nlayer=state_dict["args"].encoder_layers
)
state = patch_name_dict(state)
literal_encoder_type = EncoderModelType(opt["encoder_type"]).name.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[
literal_encoder_type
]
config = config_class.from_pretrained(arch).to_dict()
state_dict = {"state": state}
else:
if opt["encoder_type"] not in EncoderModelType._value2member_map_:
raise ValueError("encoder_type is out of pre-defined types")
literal_encoder_type = EncoderModelType(opt["encoder_type"]).name.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[literal_encoder_type]
config = config_class.from_pretrained(
init_model, cache_dir=training_args.transformer_cache
).to_dict()
config["attention_probs_dropout_prob"] = training_args.bert_dropout_p
config["hidden_dropout_prob"] = training_args.bert_dropout_p
config["multi_gpu_on"] = model_args.multi_gpu_on
if model_args.num_hidden_layers > 0:
config["num_hidden_layers"] = model_args.num_hidden_layers
# opt.update(config)
model = MTDNNModel(
opt,
device=device,
state_dict=state_dict,
num_train_step=num_all_batches,
adapter_args=adapter_args,
adapter=True,
task_name='-'.join(list(tasks.keys())),
id_task_map=id_task_map,
heldout_eval_dataset=heldout_eval_data_list
)
if training_args.resume and training_args.model_ckpt:
print_message(logger, "loading model from {}".format(training_args.model_ckpt))
model.load(training_args.model_ckpt)
#### model meta str
headline = "############# Model Arch of MT-DNN #############"
### print network
print_message(logger, "\n{}\n{}\n".format(headline, model.network))
# dump config
config_file = os.path.join(output_dir, "config.json")
with open(config_file, "w", encoding="utf-8") as writer:
writer.write("{}\n".format(json.dumps(opt)))
writer.write("\n{}\n{}\n".format(headline, model.network))
print_message(logger, "Total number of params: {}".format(model.total_param))
# tensorboard
tensorboard = None
# if args.tensorboard:
# args.tensorboard_logdir = os.path.join(args.output_dir, args.tensorboard_logdir)
# tensorboard = SummaryWriter(log_dir=args.tensorboard_logdir)
if training_args.encode_mode:
for idx, dataset in enumerate(data_args.test_datasets):
prefix = dataset.split("_")[0]
test_data = test_data_list[idx]
with torch.no_grad():
encoding = extract_encoding(model, test_data, use_cuda=training_args.cuda)
torch.save(
encoding, os.path.join(output_dir, "{}_encoding.pt".format(dataset))
)
return
training_args.ite_batch_num = 5
diff_operation = True
differentiate_detect_step = training_args.ite_batch_num * len(id_task_map)
differentiate_start_step = 0
differentiate_rate_threshold = len(id_task_map)
for epoch in range(0, training_args.epochs):
print_message(logger, "At epoch {}".format(epoch), level=1)
start = datetime.now()
for i, (batch_meta, batch_data) in enumerate(multi_task_train_data):
batch_meta, batch_data = Collater.patch_data(device, batch_meta, batch_data)
task_id = batch_meta["task_id"]
if id_task_map[task_id] != model.current_task:
model._switch_model_task_mode(id_task_map[task_id])
print(f'>>> Switch to {model.current_task} Task Successed !!!')
model.update(batch_meta, batch_data)
if (model.updates) % (training_args.log_per_updates) == 0 or model.updates == 1:
ramaining_time = str(
(datetime.now() - start)
/ (i + 1)
* (len(multi_task_train_data) - i - 1)
).split(".")[0]
if training_args.adv_train and training_args.debug:
debug_info = " adv loss[%.5f] emb val[%.8f] eff_perturb[%.8f] " % (
model.adv_loss.avg,
model.emb_val.avg,
model.eff_perturb.avg,
)
else:
debug_info = " "
print_message(
logger,
"Task [{0:2}] updates[{1:6}] train loss[{2:.5f}]{3}remaining[{4}]".format(
task_id,
model.updates,
model.train_loss.avg,
debug_info,
ramaining_time,
),
)
if data_args.tensorboard:
tensorboard.add_scalar(
"train/loss", model.train_loss.avg, global_step=model.updates
)
# Differentiation Operation
if model.updates > differentiate_start_step and model.updates % differentiate_detect_step == 0 and diff_operation:
model._differentiate_operate()
print(f'>>> Differentiation Operation Successed !!!')
current_diff_rate = model._calculate_differentiated_rate()
print(f'>>> current_diff_rate: {current_diff_rate}')
if current_diff_rate >= differentiate_rate_threshold:
diff_operation = False
model._switch_model_task_mode(model.current_task)
# exit(0)
if (
training_args.save_per_updates_on
and (
(model.local_updates)
% (training_args.save_per_updates * training_args.grad_accumulation_step)
== 0
)
and model_args.local_rank in [-1, 0]
):
ckpt_dir = f'checkpoint-{epoch}-{model.updates}'
ckpt_dir = os.path.join(output_dir, ckpt_dir)
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
model_file = os.path.join(
ckpt_dir, "model_{}_{}.pt".format(epoch, model.updates)
)
evaluation(
model,
data_args.test_datasets,
dev_data_list,
task_defs,
ckpt_dir,
epoch,
n_updates=training_args.save_per_updates,
with_label=True,
tensorboard=tensorboard,
glue_format_on=data_args.glue_format_on,
test_on=False,
device=device,
logger=logger,
)
evaluation(
model,
data_args.test_datasets,
test_data_list,
task_defs,
ckpt_dir,
epoch,
n_updates=training_args.save_per_updates,
with_label=False,
tensorboard=tensorboard,
glue_format_on=data_args.glue_format_on,
test_on=True,
device=device,
logger=logger,
)
print_message(logger, "Saving mt-dnn model to {}".format(model_file))
model.save(model_file)
ckpt_dir = f'checkpoint-{epoch}'
ckpt_dir = os.path.join(output_dir, ckpt_dir)
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
evaluation(
model,
data_args.test_datasets,
dev_data_list,
task_defs,
ckpt_dir,
epoch,
with_label=True,
tensorboard=tensorboard,
glue_format_on=data_args.glue_format_on,
test_on=False,
device=device,
logger=logger,
)
evaluation(
model,
data_args.test_datasets,
test_data_list,
task_defs,
ckpt_dir,
epoch,
with_label=False,
tensorboard=tensorboard,
glue_format_on=data_args.glue_format_on,
test_on=True,
device=device,
logger=logger,
)
print_message(logger, "[new test scores at {} saved.]".format(epoch))
if model_args.local_rank in [-1, 0]:
model_file = os.path.join(ckpt_dir, "model_{}.pt".format(epoch))
model.save(model_file)
if data_args.tensorboard:
tensorboard.close()
if __name__ == "__main__":
main()
|
ContextualSP/adaptershare/adapter_diff_train.py/0
|
{
"file_path": "ContextualSP/adaptershare/adapter_diff_train.py",
"repo_id": "ContextualSP",
"token_count": 19113
}
| 232 |
# Copyright (c) Microsoft. All rights reserved.
import tqdm
import unicodedata
PAD = "PADPAD"
UNK = "UNKUNK"
STA = "BOSBOS"
END = "EOSEOS"
PAD_ID = 0
UNK_ID = 1
STA_ID = 2
END_ID = 3
class Vocabulary(object):
INIT_LEN = 4
def __init__(self, neat=False):
self.neat = neat
if not neat:
self.tok2ind = {PAD: PAD_ID, UNK: UNK_ID, STA: STA_ID, END: END_ID}
self.ind2tok = {PAD_ID: PAD, UNK_ID: UNK, STA_ID: STA, END_ID: END}
else:
self.tok2ind = {}
self.ind2tok = {}
def __len__(self):
return len(self.tok2ind)
def __iter__(self):
return iter(self.tok2ind)
def __contains__(self, key):
if type(key) == int:
return key in self.ind2tok
elif type(key) == str:
return key in self.tok2ind
def __getitem__(self, key):
if type(key) == int:
return (
self.ind2tok.get(key, -1) if self.neat else self.ind2tok.get(key, UNK)
)
if type(key) == str:
return (
self.tok2ind.get(key, None)
if self.neat
else self.tok2ind.get(key, self.tok2ind.get(UNK))
)
def __setitem__(self, key, item):
if type(key) == int and type(item) == str:
self.ind2tok[key] = item
elif type(key) == str and type(item) == int:
self.tok2ind[key] = item
else:
raise RuntimeError("Invalid (key, item) types.")
def add(self, token):
if token not in self.tok2ind:
index = len(self.tok2ind)
self.tok2ind[token] = index
self.ind2tok[index] = token
def get_vocab_list(self, with_order=True):
if with_order:
words = [self[k] for k in range(0, len(self))]
else:
words = [k for k in self.tok2ind.keys() if k not in {PAD, UNK, STA, END}]
return words
def toidx(self, tokens):
return [self[tok] for tok in tokens]
def copy(self):
"""Deep copy"""
new_vocab = Vocabulary(self.neat)
for w in self:
new_vocab.add(w)
return new_vocab
def build(words, neat=False):
vocab = Vocabulary(neat)
for w in words:
vocab.add(w)
return vocab
|
ContextualSP/adaptershare/data_utils/vocab.py/0
|
{
"file_path": "ContextualSP/adaptershare/data_utils/vocab.py",
"repo_id": "ContextualSP",
"token_count": 1248
}
| 233 |
#!/usr/bin/env bash
###############################
# Data prepro pipeline for MT-DNN.
# By xiaodong
###############################
## dump original data into tsv
python experiments/glue/glue_prepro.py
declare -a PLMS=('bert-base-uncased' 'roberta-base' 'microsoft/deberta-base' 't5-base')
# prepro GLUE data for all PLMs.
for plm in "${PLMS[@]}"
do
echo "Prepro GLUE data for $plm"
python prepro_std.py --model $plm --root_dir data/canonical_data --task_def experiments/glue/glue_task_def.yml --workers 32
done
|
ContextualSP/adaptershare/experiments/glue/prepro.sh/0
|
{
"file_path": "ContextualSP/adaptershare/experiments/glue/prepro.sh",
"repo_id": "ContextualSP",
"token_count": 193
}
| 234 |
import collections
import json
def load_xnli(file, header=True):
lang_dict = collections.defaultdict(list)
label_dict = {}
cnt = 0
label_map = {"contradiction": 0, "neutral": 1, "entailment": 2}
with open(file, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
# if blocks[1] == '-': continue
lab = label_map[blocks[1]]
if lab is None:
import pdb
pdb.set_trace()
uid = str(cnt)
label_dict[uid] = lab
lang_dict[blocks[0]].append(uid)
cnt += 1
print(cnt)
return lang_dict, label_dict
fin = "data/XNLI/xnli.dev.tsv"
fout = "data/XNLI/xnli_dev_cat.json"
lang_dict, label_dict = load_xnli(fin)
data = {"lang_map": lang_dict, "label_map": label_dict}
with open(fout, "w") as f:
json.dump(data, f)
# cnt = 0
# for key, val in lang_dict.items():
# cnt += len(val)
# for uid in val:
# assert uid in label_dict
# print(cnt)
|
ContextualSP/adaptershare/experiments/xnli/extract_cat.py/0
|
{
"file_path": "ContextualSP/adaptershare/experiments/xnli/extract_cat.py",
"repo_id": "ContextualSP",
"token_count": 565
}
| 235 |
# coding=utf-8
# Copyright (c) Microsoft. All rights reserved.
import os
import torch
import torch.nn as nn
from pretrained_models import MODEL_CLASSES
from module.dropout_wrapper import DropoutWrapper
from module.san import SANClassifier, MaskLmHeader
from module.san_model import SanModel
from module.pooler import Pooler
from torch.nn.modules.normalization import LayerNorm
from data_utils.task_def import EncoderModelType, TaskType
import tasks
from experiments.exp_def import TaskDef
from transformers import AdapterConfig
def generate_decoder_opt(enable_san, max_opt):
opt_v = 0
if enable_san and max_opt < 2:
opt_v = max_opt
return opt_v
class SANBertNetwork(nn.Module):
def __init__(self, opt, bert_config=None, initial_from_local=False, adapter=False, adapter_args=None, task_name='adapter'):
super(SANBertNetwork, self).__init__()
self.dropout_list = nn.ModuleList()
if opt["encoder_type"] not in EncoderModelType._value2member_map_:
raise ValueError("encoder_type is out of pre-defined types")
self.encoder_type = opt["encoder_type"]
self.preloaded_config = None
literal_encoder_type = EncoderModelType(self.encoder_type).name.lower()
config_class, model_class, _ = MODEL_CLASSES[literal_encoder_type]
if not initial_from_local:
# self.bert = model_class.from_pretrained(opt['init_checkpoint'], config=self.preloaded_config)
self.bert = model_class.from_pretrained(
opt["init_checkpoint"], cache_dir=opt["transformer_cache"]
)
else:
self.preloaded_config = config_class.from_dict(opt) # load config from opt
self.preloaded_config.output_hidden_states = (
True # return all hidden states
)
self.bert = model_class(self.preloaded_config)
hidden_size = self.bert.config.hidden_size
if opt.get("dump_feature", False):
self.config = opt
return
if opt["update_bert_opt"] > 0:
for p in self.bert.parameters():
p.requires_grad = False
if adapter:
if adapter_args.adapter_diff:
base_model_layers = len(self.bert.encoder.layer)
active_adapters = []
for i in range(base_model_layers):
adapter_config = AdapterConfig.load(
adapter_args.adapter_config,
non_linearity=adapter_args.adapter_non_linearity,
reduction_factor=adapter_args.adapter_reduction_factor,
leave_out=[j for j in range(base_model_layers) if i!=j]
)
self.bert.add_adapter(f'{task_name}-L{str(i)}', config=adapter_config)
active_adapters.append(f'{task_name}-L{str(i)}')
self.bert.train_adapter(active_adapters)
else:
if not adapter_args.train_adapter_fusion:
# resolve the adapter config
adapter_config = AdapterConfig.load(
adapter_args.adapter_config,
non_linearity=adapter_args.adapter_non_linearity,
reduction_factor=adapter_args.adapter_reduction_factor,
)
# load a pre-trained from Hub if specified
if adapter_args.load_adapter:
self.bert.load_adapter(
adapter_args.load_adapter,
config=adapter_config,
load_as=task_name,
)
# otherwise, add a fresh adapter
else:
self.bert.add_adapter(task_name, config=adapter_config)
# Set the adapters to be used in every forward pass
self.bert.set_active_adapters(task_name)
# Freeze all model weights except of those of this adapter
self.bert.train_adapter([task_name])
task_def_list = opt["task_def_list"]
self.task_def_list = task_def_list
self.decoder_opt = []
self.task_types = []
for task_id, task_def in enumerate(task_def_list):
self.decoder_opt.append(
generate_decoder_opt(task_def.enable_san, opt["answer_opt"])
)
self.task_types.append(task_def.task_type)
# create output header
self.scoring_list = nn.ModuleList()
self.dropout_list = nn.ModuleList()
for task_id in range(len(task_def_list)):
task_def: TaskDef = task_def_list[task_id]
lab = task_def.n_class
decoder_opt = self.decoder_opt[task_id]
task_type = self.task_types[task_id]
task_dropout_p = (
opt["dropout_p"] if task_def.dropout_p is None else task_def.dropout_p
)
dropout = DropoutWrapper(task_dropout_p, opt["vb_dropout"])
self.dropout_list.append(dropout)
task_obj = tasks.get_task_obj(task_def)
if task_obj is not None:
# Move this to task_obj
self.pooler = Pooler(
hidden_size, dropout_p=opt["dropout_p"], actf=opt["pooler_actf"]
)
out_proj = task_obj.train_build_task_layer(
decoder_opt, hidden_size, lab, opt, prefix="answer", dropout=dropout
)
elif task_type == TaskType.Span:
assert decoder_opt != 1
out_proj = nn.Linear(hidden_size, 2)
elif task_type == TaskType.SpanYN:
assert decoder_opt != 1
out_proj = nn.Linear(hidden_size, 2)
elif task_type == TaskType.SeqenceLabeling:
out_proj = nn.Linear(hidden_size, lab)
# elif task_type == TaskType.MaskLM:
# if opt["encoder_type"] == EncoderModelType.ROBERTA:
# # TODO: xiaodl
# out_proj = MaskLmHeader(self.bert.embeddings.word_embeddings.weight)
# else:
# out_proj = MaskLmHeader(self.bert.embeddings.word_embeddings.weight)
elif task_type == TaskType.SeqenceGeneration:
# use orginal header
out_proj = None
elif task_type == TaskType.ClozeChoice:
self.pooler = Pooler(
hidden_size, dropout_p=opt["dropout_p"], actf=opt["pooler_actf"]
)
out_proj = nn.Linear(hidden_size, lab)
else:
if decoder_opt == 1:
out_proj = SANClassifier(
hidden_size,
hidden_size,
lab,
opt,
prefix="answer",
dropout=dropout,
)
else:
out_proj = nn.Linear(hidden_size, lab)
self.scoring_list.append(out_proj)
self.config = opt
def embed_encode(self, input_ids, token_type_ids=None, attention_mask=None):
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
embedding_output = self.bert.embeddings(input_ids, token_type_ids)
return embedding_output
def encode(
self,
input_ids,
token_type_ids,
attention_mask,
inputs_embeds=None,
y_input_ids=None,
):
if self.encoder_type == EncoderModelType.T5:
outputs = self.bert(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
)
last_hidden_state = outputs.last_hidden_state
all_hidden_states = outputs.hidden_states # num_layers + 1 (embeddings)
elif self.encoder_type == EncoderModelType.T5G:
outputs = self.bert(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=y_input_ids,
)
# return logits from LM header
last_hidden_state = outputs.logits
all_hidden_states = (
outputs.encoder_last_hidden_state
) # num_layers + 1 (embeddings)
else:
outputs = self.bert(
input_ids=input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
)
last_hidden_state = outputs.last_hidden_state
all_hidden_states = outputs.hidden_states # num_layers + 1 (embeddings)
return last_hidden_state, all_hidden_states
def forward(
self,
input_ids,
token_type_ids,
attention_mask,
premise_mask=None,
hyp_mask=None,
task_id=0,
y_input_ids=None,
fwd_type=0,
embed=None,
):
if fwd_type == 3:
generated = self.bert.generate(
input_ids=input_ids,
attention_mask=attention_mask,
max_length=self.config["max_answer_len"],
num_beams=self.config["num_beams"],
repetition_penalty=self.config["repetition_penalty"],
length_penalty=self.config["length_penalty"],
early_stopping=True,
)
return generated
elif fwd_type == 2:
assert embed is not None
last_hidden_state, all_hidden_states = self.encode(
None, token_type_ids, attention_mask, embed, y_input_ids
)
elif fwd_type == 1:
return self.embed_encode(input_ids, token_type_ids, attention_mask)
else:
last_hidden_state, all_hidden_states = self.encode(
input_ids, token_type_ids, attention_mask, y_input_ids=y_input_ids
)
decoder_opt = self.decoder_opt[task_id]
task_type = self.task_types[task_id]
task_obj = tasks.get_task_obj(self.task_def_list[task_id])
if task_obj is not None:
pooled_output = self.pooler(last_hidden_state)
logits = task_obj.train_forward(
last_hidden_state,
pooled_output,
premise_mask,
hyp_mask,
decoder_opt,
self.dropout_list[task_id],
self.scoring_list[task_id],
)
return logits
elif task_type == TaskType.Span:
assert decoder_opt != 1
last_hidden_state = self.dropout_list[task_id](last_hidden_state)
logits = self.scoring_list[task_id](last_hidden_state)
start_scores, end_scores = logits.split(1, dim=-1)
start_scores = start_scores.squeeze(-1)
end_scores = end_scores.squeeze(-1)
return start_scores, end_scores
elif task_type == TaskType.SpanYN:
assert decoder_opt != 1
last_hidden_state = self.dropout_list[task_id](last_hidden_state)
logits = self.scoring_list[task_id](last_hidden_state)
start_scores, end_scores = logits.split(1, dim=-1)
start_scores = start_scores.squeeze(-1)
end_scores = end_scores.squeeze(-1)
return start_scores, end_scores
elif task_type == TaskType.SeqenceLabeling:
pooled_output = last_hidden_state
pooled_output = self.dropout_list[task_id](pooled_output)
pooled_output = pooled_output.contiguous().view(-1, pooled_output.size(2))
logits = self.scoring_list[task_id](pooled_output)
return logits
elif task_type == TaskType.MaskLM:
last_hidden_state = self.dropout_list[task_id](last_hidden_state)
logits = self.scoring_list[task_id](last_hidden_state)
return logits
elif task_type == TaskType.SeqenceGeneration:
logits = last_hidden_state.view(-1, last_hidden_state.size(-1))
return logits
elif task_type == TaskType.ClozeChoice:
pooled_output = self.pooler(last_hidden_state)
pooled_output = self.dropout_list[task_id](pooled_output)
logits = self.scoring_list[task_id](pooled_output)
return logits
else:
if decoder_opt == 1:
max_query = hyp_mask.size(1)
assert max_query > 0
assert premise_mask is not None
assert hyp_mask is not None
hyp_mem = last_hidden_state[:, :max_query, :]
logits = self.scoring_list[task_id](
last_hidden_state, hyp_mem, premise_mask, hyp_mask
)
else:
pooled_output = self.dropout_list[task_id](pooled_output)
logits = self.scoring_list[task_id](pooled_output)
return logits
|
ContextualSP/adaptershare/mt_dnn/matcher.py/0
|
{
"file_path": "ContextualSP/adaptershare/mt_dnn/matcher.py",
"repo_id": "ContextualSP",
"token_count": 6851
}
| 236 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import BertModel
from models.nn_layers import RelationalEncoder
from models.nn_utils import *
from collections import defaultdict
from typing import Dict, List
from utils.data_iter import MetaIndex
class SpiderAlignmentModel(nn.Module):
def __init__(self, bert_version: str, dropout_prob: float) -> None:
super().__init__()
self.bert = BertModel.from_pretrained(bert_version)
self.hidden_size = get_bert_hidden_size(bert_version)
# self.rat_encoder = RelationalEncoder(num_layers=2, hidden_size=self.hidden_size, num_relations=len(SchemaRelation), num_heads=8, dropout_prob=dropout_prob)
self.linear_out_tbl = nn.Linear(self.hidden_size, 2)
self.linear_out_col = nn.Linear(self.hidden_size, 2)
self.linear_out_val = nn.Linear(self.hidden_size, 2)
self.align_pointer = AttentivePointer(self.hidden_size)
self.dropout = nn.Dropout(dropout_prob)
def forward(self, **inputs) -> Dict:
bert_outputs = self.bert(
inputs['input_token_ids'],
token_type_ids=inputs['input_token_types'],
attention_mask=inputs['input_token_ids'].ne(0))["last_hidden_state"]
bert_outputs = self.dropout(bert_outputs)
batched_tbl_logits, batched_col_logits, batched_val_logits = [], [], []
batched_align_weights = []
for batch_idx in range(len(bert_outputs)):
meta_index: MetaIndex = inputs['meta_index'][batch_idx]
question_outputs = bert_outputs[batch_idx][
meta_index.question_encode_indices + [meta_index.question_sep_index]]
tbl_outputs = bert_outputs[batch_idx][meta_index.tbl_encode_indices]
col_outputs = bert_outputs[batch_idx][meta_index.col_encode_indices]
val_outputs = bert_outputs[batch_idx][meta_index.val_encode_indices]
alignment_outputs, alignment_weights = self.align_pointer.forward(
torch.cat((tbl_outputs, col_outputs, val_outputs), dim=0).unsqueeze(0),
question_outputs.unsqueeze(0),
question_outputs.unsqueeze(0))
# Use attentive outputs to do identification
# tbl_outputs, col_outputs, val_outputs = meta_index.split(alignment_outputs.squeeze(0))
batched_tbl_logits += [self.linear_out_tbl(tbl_outputs)]
batched_col_logits += [self.linear_out_col(col_outputs)]
batched_val_logits += [self.linear_out_val(val_outputs)]
batched_align_weights += [alignment_weights.squeeze(0)[:, :-1]]
return {
'table_logits': batched_tbl_logits,
'column_logits': batched_col_logits,
'value_logits': batched_val_logits,
'alignment_weights': batched_align_weights
}
def compute_loss(self, **inputs):
outputs = self.forward(**inputs)
table_logits, column_logits, value_logits = outputs['table_logits'], outputs['column_logits'], outputs[
'value_logits']
total_loss = 0
identify_loss = self._calculate_identification_loss(table_logits, column_logits, value_logits, **inputs)
total_loss += identify_loss
outputs['identify_loss'] = identify_loss
alignment_loss_weight = inputs['align_loss_weight'] if 'align_loss_weight' in inputs else 0.0
if alignment_loss_weight > 1e-3:
align_loss = self._calculate_alignment_loss(table_logits, column_logits, value_logits,
outputs['alignment_weights'], **inputs)
total_loss += align_loss * alignment_loss_weight
outputs['align_loss'] = align_loss
outputs['loss'] = total_loss
return outputs
def _calculate_identification_loss(self, tbl_logits, col_logits, val_logits, **inputs):
tbl_labels, col_labels, val_labels = inputs['table_labels'], inputs['column_labels'], inputs['value_labels']
assert len(tbl_logits) == len(col_logits)
assert len(tbl_labels) == len(tbl_logits)
assert len(col_labels) == len(col_logits)
total_loss = 0
criterion = LabelSmoothingLoss(0.05) if inputs['label_smoothing'] else nn.CrossEntropyLoss()
for batch_idx in range(len(tbl_labels)):
total_loss += criterion(tbl_logits[batch_idx], tbl_labels[batch_idx])
total_loss += criterion(col_logits[batch_idx], col_labels[batch_idx])
if len(val_labels[batch_idx]) > 0:
total_loss += criterion(val_logits[batch_idx], val_labels[batch_idx])
return total_loss / len(tbl_labels) / 3
def _calculate_alignment_loss(self, tbl_logits, col_logits, val_logits, align_weights, **inputs):
assert len(tbl_logits) == len(col_logits)
assert len(tbl_logits) == len(align_weights)
total_alignment_loss = 0
for batch_idx in range(len(tbl_logits)):
meta_index: MetaIndex = inputs['meta_index'][batch_idx]
tbl_labels, col_labels, val_labels, = inputs['table_labels'][batch_idx], \
inputs['column_labels'][batch_idx], \
inputs['value_labels'][batch_idx]
with torch.no_grad():
masking_inputs = self._generate_masking_inputs(
input_token_ids=inputs['input_token_ids'][batch_idx].detach(),
input_token_types=inputs['input_token_types'][batch_idx].detach(),
meta_index=meta_index,
example=inputs['example'][batch_idx])
masking_scores = None
if 'masking_infer_func' not in inputs:
masking_scores = self._run_masking_outputs(
input_token_ids=masking_inputs['input_token_ids'],
input_token_types=masking_inputs['input_token_types'],
meta_index=meta_index,
batch_size=len(tbl_logits))
else:
masking_scores = inputs['masking_infer_func'](masking_inputs)
masking_rewards = self._calculate_masking_rewards(
labels={'tbl': tbl_labels, 'col': col_labels, 'val': val_labels, },
base_scores={'tbl': F.softmax(tbl_logits[batch_idx], dim=-1),
'col': F.softmax(col_logits[batch_idx], dim=-1),
'val': F.softmax(val_logits[batch_idx], dim=-1)},
masking_scores=masking_scores,
masking_spans=masking_inputs['masking_spans'],
question_length=meta_index.num_question_tokens)
tbl_align_weights, col_align_weights, val_align_weights = meta_index.split(align_weights[batch_idx], dim=0)
question_length = meta_index.num_question_tokens
total_alignment_loss += F.binary_cross_entropy(
tbl_align_weights,
tbl_labels.to(torch.float).repeat_interleave(question_length).view(-1, question_length),
weight=masking_rewards['tbl'])
total_alignment_loss += F.binary_cross_entropy(col_align_weights,
col_labels.to(torch.float).repeat_interleave(
question_length).view(-1, question_length),
weight=masking_rewards['col'])
if len(val_align_weights) > 0:
total_alignment_loss += F.binary_cross_entropy(val_align_weights,
val_labels.to(torch.float).repeat_interleave(
question_length).view(-1, question_length),
weight=masking_rewards['val'])
return total_alignment_loss / len(tbl_logits) / 3
@staticmethod
def _generate_masking_inputs(input_token_ids: torch.Tensor, input_token_types: torch.Tensor, meta_index: MetaIndex,
example: Dict):
all_masking_input_token_ids, all_masking_spans = [], []
for i, j, _ in example['masking_ngrams']:
p_start, p_end = meta_index.question_spans[i][1], meta_index.question_spans[j][2]
masking_input_token_ids = input_token_ids.clone()
masking_input_token_ids[p_start:p_end + 1] = 100 # unk
all_masking_input_token_ids += [masking_input_token_ids]
all_masking_spans += [(i, j)]
return {
'input_token_ids': torch.stack(all_masking_input_token_ids, dim=0),
'input_token_types': torch.stack([input_token_types for _ in all_masking_spans]),
'meta_index': [meta_index for _ in all_masking_spans],
'masking_spans': all_masking_spans
}
def _run_masking_outputs(self, input_token_ids: torch.Tensor, input_token_types: torch.Tensor,
meta_index: MetaIndex, batch_size: int):
index = 0
batched_tbl_scores, batched_col_scores, batched_val_scores = [], [], []
while index < len(input_token_ids):
bert_outputs = self.bert(
input_token_ids[index:index + batch_size],
attention_mask=input_token_ids[index:index + batch_size].ne(0),
token_type_ids=input_token_types[index:index + batch_size])["last_hidden_state"]
tbl_outputs = bert_outputs[:, meta_index.tbl_encode_indices]
tbl_scores = F.softmax(self.linear_out_tbl(tbl_outputs), dim=-1)
col_outputs = bert_outputs[:, meta_index.col_encode_indices]
col_scores = F.softmax(self.linear_out_col(col_outputs), dim=-1)
val_outputs = bert_outputs[:, meta_index.val_encode_indices]
val_scores = F.softmax(self.linear_out_val(val_outputs), dim=-1)
index += batch_size
batched_tbl_scores.append(tbl_scores)
batched_col_scores.append(col_scores)
batched_val_scores.append(val_scores)
return {
'tbl': torch.cat(batched_tbl_scores, dim=0),
'col': torch.cat(batched_col_scores, dim=0),
'val': torch.cat(batched_val_scores, dim=0)
}
def _calculate_masking_rewards(self,
labels: Dict[str, torch.LongTensor],
base_scores: Dict[str, torch.Tensor],
masking_scores: Dict[str, torch.Tensor],
masking_spans: List[Tuple[int, int]],
question_length: int,
default_weight: float = 0.01,
):
masking_rewards = {}
for e_type in ['tbl', 'col', 'val']:
e_labels, e_base_scores, e_masking_scores = labels[e_type], base_scores[e_type], masking_scores[e_type]
reward = torch.zeros((len(e_labels), question_length), device=e_labels.device)
for idx in range(len(e_labels)):
label = e_labels[idx].item()
if label == 0:
reward[idx] = default_weight
continue
ngram_rewards = defaultdict(list)
for m_i, (start, end) in enumerate(masking_spans):
score_diff = (e_base_scores[idx, label] - e_masking_scores[m_i, idx, label]).clamp(0, 1).item()
for j in range(start, end + 1):
ngram_rewards[j].append(score_diff)
for q_idx in range(question_length):
reward[idx, q_idx] = sum(ngram_rewards[q_idx]) / len(
ngram_rewards[q_idx]) if q_idx in ngram_rewards else 0.0
masking_rewards[e_type] = reward
return masking_rewards
|
ContextualSP/awakening_latent_grounding/models/spider_align.py/0
|
{
"file_path": "ContextualSP/awakening_latent_grounding/models/spider_align.py",
"repo_id": "ContextualSP",
"token_count": 6237
}
| 237 |
from utils.nlp_utils import ValueMatch, is_adjective
import torch
import nltk
from collections import OrderedDict, defaultdict
from typing import Any, List, Dict, Tuple
from dataclasses import dataclass
from utils.data_types import *
from utils.data_iter import MetaIndex
from utils.schema_linker import *
from fuzzywuzzy import fuzz
import os
def reduce_alignment_matrix(alignment_matrix: torch.Tensor, mappings: List[int], max_length: int) -> torch.Tensor:
new_alignment_matrix = torch.zeros((alignment_matrix.size(0), max_length), device=alignment_matrix.device)
for i in range(alignment_matrix.size(0)):
for j in range(alignment_matrix.size(1)):
new_alignment_matrix[i][mappings[j]] += alignment_matrix[i][j]
return new_alignment_matrix
def reduce_alignment_matrix_question_first(alignment_matrix: torch.Tensor, mappings: List[int],
max_length: int) -> torch.Tensor:
assert len(alignment_matrix) >= max_length
new_alignment_matrix = torch.zeros((max_length, alignment_matrix.size(1)), device=alignment_matrix.device)
for i in range(alignment_matrix.size(0)):
for j in range(alignment_matrix.size(1)):
new_alignment_matrix[mappings[i]][j] += alignment_matrix[i][j] / mappings.count(mappings[i])
return new_alignment_matrix
def evaluate_linking(gold_align_labels: List[AlignmentLabel], pred_align_labels: List[AlignmentLabel],
enable_eval_types: List[SQLTokenType]):
assert len(gold_align_labels) == len(pred_align_labels)
eval_result = {}
for eval_type in enable_eval_types:
eval_result[eval_type] = defaultdict(int)
for gold_label, pred_label in zip(gold_align_labels, pred_align_labels):
if gold_label.align_type == eval_type:
if gold_label == pred_label:
eval_result[eval_type]['tp'] += 1
else:
eval_result[eval_type]['fn'] += 1
if pred_label.align_type == eval_type:
if gold_label != pred_label:
eval_result[eval_type]['fp'] += 1
return eval_result
def get_precision_recall_and_f1(eval_result: Dict) -> Dict:
metrics = {}
for eval_type in eval_result:
precision = eval_result[eval_type]['tp'] / (eval_result[eval_type]['tp'] + eval_result[eval_type]['fp']) if (
eval_result[
eval_type][
'tp'] +
eval_result[
eval_type][
'fp']) > 0 else 0
recall = eval_result[eval_type]['tp'] / (eval_result[eval_type]['tp'] + eval_result[eval_type]['fn']) if (
eval_result[
eval_type][
'tp'] +
eval_result[
eval_type][
'fn']) > 0 else 0
f1 = 2 * precision * recall / (precision + recall) if precision + recall > 0 else 0
metrics[eval_type] = {'P': precision, 'R': recall, 'F1': f1}
return metrics
def get_spider_alignments_from_labeling(nl_alignments: List[Dict], question: Utterance, schema: SpiderSchema) -> List[
AlignmentLabel]:
assert len(question.tokens) == len(nl_alignments)
align_labels = []
for q_idx in range(len(nl_alignments)):
align_obj = nl_alignments[q_idx]
if align_obj is None:
align_labels.append(AlignmentLabel(question.tokens[q_idx], SQLTokenType.null, None, 1.0))
elif align_obj['type'] == 'tbl':
align_labels.append(AlignmentLabel(token=question.tokens[q_idx], align_type=SQLTokenType.table,
align_value=schema.table_names_original[align_obj['id']].lower(),
confidence=1.0))
elif align_obj['type'] == 'col':
col_full_name = schema.get_column_full_name(align_obj['id'])
align_labels.append(
AlignmentLabel(token=question.tokens[q_idx], align_type=SQLTokenType.column, align_value=col_full_name,
confidence=1.0))
elif align_obj['type'] == 'val':
col_full_name = schema.get_column_full_name(align_obj['id'])
align_labels.append(AlignmentLabel(token=question.tokens[q_idx], align_type=SQLTokenType.value,
align_value="VAL_{}".format(col_full_name), confidence=1.0))
else:
raise NotImplementedError()
return align_labels
def get_spider_alignments_from_prediction(alignment_weights: torch.Tensor, question: Utterance, schema: SpiderSchema,
values: List[ValueMatch], meta_index: MetaIndex, threshold: float = 0.1) -> \
List[AlignmentLabel]:
alignment_weights[meta_index.num_tables] = 0.0 # Set column * to 0
alignment_weights = alignment_weights.transpose(0, 1)
assert len(alignment_weights) == len(question.tokens)
align_labels = []
for q_idx in range(len(alignment_weights)):
max_idx = torch.argmax(alignment_weights[q_idx], dim=-1).item()
confidence = alignment_weights[q_idx, max_idx]
if confidence < threshold:
align_label = AlignmentLabel(question.tokens[q_idx], SQLTokenType.null, None, 1 - confidence)
align_labels.append(align_label)
continue
if max_idx < meta_index.num_tables:
align_labels.append(
AlignmentLabel(question.tokens[q_idx], SQLTokenType.table, schema.table_names_original[max_idx].lower(),
confidence))
elif max_idx < meta_index.num_tables + meta_index.num_columns:
align_labels.append(AlignmentLabel(question.tokens[q_idx], SQLTokenType.column,
schema.get_column_full_name(max_idx - meta_index.num_tables),
confidence))
elif max_idx < meta_index.num_tables + meta_index.num_columns + meta_index.num_values:
value_idx = max_idx - meta_index.num_tables - meta_index.num_columns
align_labels.append(
AlignmentLabel(question.tokens[q_idx], SQLTokenType.value, 'VAL_{}'.format(values[value_idx].column),
confidence))
else:
raise NotImplementedError()
return align_labels
def post_process_alignment_labels(pred_align_labels: List[AlignmentLabel], gold_align_labels: List[AlignmentLabel]):
new_pred_align_labels = []
for i, (pred_align_label, gold_align_label) in enumerate(zip(pred_align_labels, gold_align_labels)):
if gold_align_label.align_type == SQLTokenType.value and pred_align_label.align_type in [SQLTokenType.column,
SQLTokenType.table]:
new_label = AlignmentLabel(pred_align_label.token, align_type=SQLTokenType.null, align_value=None,
confidence=pred_align_label.confidence)
new_pred_align_labels += [new_label]
continue
if gold_align_label.align_type == SQLTokenType.null and pred_align_label.token.token.lower() in ['with', 'any',
'without']:
new_label = AlignmentLabel(pred_align_label.token, align_type=SQLTokenType.null, align_value=None,
confidence=pred_align_label.confidence)
new_pred_align_labels += [new_label]
continue
if gold_align_label.align_type == SQLTokenType.null and pred_align_label.align_type == SQLTokenType.column and is_adjective(
pred_align_label.token.token.lower()):
new_label = AlignmentLabel(pred_align_label.token, align_type=SQLTokenType.null, align_value=None,
confidence=pred_align_label.confidence)
new_pred_align_labels += [new_label]
continue
new_pred_align_labels.append(pred_align_label)
return new_pred_align_labels
def get_wtq_alignments_from_labeling(nl_alignments: List[Dict], question: Utterance, schema: WTQSchema) -> List[AlignmentLabel]:
assert len(question.tokens) == len(nl_alignments)
align_labels = []
for q_idx in range(len(question.tokens)):
align_obj = nl_alignments[q_idx]
if align_obj[0] == 'None':
align_labels += [AlignmentLabel(question.tokens[q_idx], SQLTokenType.null, None, 1.0)]
elif align_obj[0] == 'Keyword':
align_labels += [AlignmentLabel(question.tokens[q_idx], SQLTokenType.keyword, align_obj[1][0], 1.0)]
elif align_obj[0] == 'Column':
col_id = schema.internal_name_to_id[align_obj[1]]
align_labels += [AlignmentLabel(question.tokens[q_idx], SQLTokenType.column,
schema.column_headers[schema.internal_to_header[col_id]], 1.0)]
elif align_obj[0] == 'Literal':
align_labels += [AlignmentLabel(question.tokens[q_idx], SQLTokenType.value, None, 1.0)]
else:
raise NotImplementedError("not supported alignment type: {}".format(align_obj))
return align_labels
def get_wtq_alignments_from_prediction(alignment_weights: torch.Tensor, question: Utterance, schema: WTQSchema,
meta_index: MetaIndex, threshold: float = 0.1, question_first=False) -> List[AlignmentLabel]:
if question_first:
alignment_weights = reduce_alignment_matrix_question_first(alignment_weights, question.get_piece2token(),
len(question.tokens))
else:
alignment_weights = reduce_alignment_matrix(alignment_weights, question.get_piece2token(),
len(question.tokens)).transpose(0, 1)
assert len(alignment_weights) == len(question.tokens)
align_labels = []
align_span = []
for q_idx in range(len(alignment_weights)):
max_idx = torch.argmax(alignment_weights[q_idx], dim=-1)
confidence = alignment_weights[q_idx, max_idx]
if confidence < threshold:
align_label = AlignmentLabel(question.tokens[q_idx], SQLTokenType.null, None, 1 - confidence)
align_labels.append(align_label)
continue
assert max_idx < meta_index.num_columns
col_idx = meta_index.lookup_entity_id('col', int(max_idx))
align_label = AlignmentLabel(question.tokens[q_idx], SQLTokenType.column, schema.column_headers[col_idx],
confidence)
align_labels.append(align_label)
i = 0
while i < len(align_labels):
if align_labels[i].align_type == SQLTokenType.column:
j = i + 1
while j < len(align_labels):
if align_labels[j].align_value == align_labels[i].align_value and \
align_labels[j].align_type == SQLTokenType.column:
j += 1
else:
break
align_span.append((i, j))
i = j
continue
else:
i += 1
table_id = schema.table_id
# load table from the same directory
data_dir = os.getenv("PT_DATA_DIR", default="data/squall")
table_content = json.load(open("{}/json/{}.json".format(data_dir, table_id), 'r'))
all_values = []
for internal_columns in table_content['contents']:
for column in internal_columns:
all_values += column['data']
for span in align_span:
find = 0
for _label in align_labels[span[0]: span[1]]:
if _label.align_value != align_labels[span[0]].align_value:
find = 1
break
if find == 1:
continue
span_strs = [(span[0], span[1], " ".join([x.token.token for x in align_labels[span[0]: span[1]]]))]
if span[0] - 1 >= 0:
span_strs.append(
(span[0] - 1, span[1], " ".join([x.token.token for x in align_labels[span[0] - 1: span[1]]])))
if span[0] - 2 >= 0:
span_strs.append(
(span[0] - 2, span[1], " ".join([x.token.token for x in align_labels[span[0] - 2: span[1]]])))
if span[1] + 1 < len(question.tokens):
span_strs.append(
(span[0], span[1] + 1, " ".join([x.token.token for x in align_labels[span[0]: span[1] + 1]])))
if span[1] + 2 < len(question.tokens):
span_strs.append(
(span[0], span[1] + 2, " ".join([x.token.token for x in align_labels[span[0]: span[1] + 2]])))
for value in all_values:
if not isinstance(value, str):
continue
for sp1, sp2, span_str in span_strs:
if value is not None and \
(fuzz.ratio(value.lower(), span_str.lower()) > 80):
if value.lower() in table_content['headers']:
continue
for label in align_labels[span[0]:span[1]]:
label.align_type = SQLTokenType.null
break
for sp1, sp2, span_str in span_strs:
if span_str in table_content['headers'][2:]:
for idx in range(sp1, sp2):
align_labels[idx] = AlignmentLabel(question.tokens[idx], SQLTokenType.column, span_str, 1)
return align_labels
@dataclass
class SpiderCase:
schema: SpiderSchema
question: Utterance
goal_sql: SQLExpression
enc_input_tokens: List[str]
correct_dict: Dict[str, bool]
identification_dict: Dict[str, Any]
alignment_dict: Dict[str, torch.Tensor]
gold_alignments: List[AlignmentLabel]
pred_alignments: List[AlignmentLabel]
metrics: Dict[str, Dict[str, float]]
values: List[ValueMatch]
def to_string(self):
out_strs = []
tokens = [token.token for token in self.question.tokens]
out_strs.append(
"Q: {}, Table = {}, Column = {}, Value = {}, All = {}".format(self.question.text, self.correct_dict['tbl'],
self.correct_dict['col'],
self.correct_dict['val'],
self.correct_dict['all']))
out_strs.append("Input tokens: {}".format(" ".join(self.enc_input_tokens)))
out_strs.append(self.schema.to_string('\n'))
out_strs.append("Gold SQL: {}".format(self.goal_sql.sql))
if 'tbl' in self.identification_dict:
for i, (tbl_id, gold_label, pred_label, pred_score) in enumerate(self.identification_dict['tbl']):
tbl_name = self.schema.table_names_original[tbl_id]
if gold_label == 0 and pred_label == 0:
continue
out_strs.append(
"T {}: gold = {}, pred = {} / {:.3f}, Correct = {}".format(tbl_name, gold_label, pred_label,
pred_score, gold_label == pred_label))
if 'tbl' in self.alignment_dict:
align_vector = self.alignment_dict['tbl'][i]
assert len(align_vector) == len(tokens)
align_strs = ["{}/{:.3f}".format(token, weight.item()) for token, weight in
zip(tokens, align_vector)]
out_strs += ["Alignment: {}".format(" ".join(align_strs))]
if 'col' in self.identification_dict:
for i, (col_id, gold_label, pred_label, pred_score) in enumerate(self.identification_dict['col']):
if gold_label == 0 and pred_label == 0:
continue
col_name = self.schema.get_column_full_name(col_id)
out_strs.append(
"C {}: gold = {}, pred = {} / {:.3f}, Correct = {}".format(col_name, gold_label, pred_label,
pred_score, gold_label == pred_label))
if 'col' in self.alignment_dict:
align_vector = self.alignment_dict['col'][i]
assert len(align_vector) == len(tokens)
align_strs = ["{}/{:.3f}".format(token, weight.item()) for token, weight in
zip(tokens, align_vector)]
out_strs += ["Alignment: {}".format(" ".join(align_strs))]
if 'val' in self.identification_dict:
for i, (val_id, gold_label, pred_label, pred_score) in enumerate(self.identification_dict['val']):
if gold_label == 0 and pred_label == 0:
continue
col_name = self.values[val_id].column
val_str = "{}[{}:{}]".format(self.values[i].value, self.values[i].start, self.values[i].end)
out_strs.append(
"V {}_{}: gold = {}, pred = {} / {:.3f}, Correct = {}".format(col_name, val_str, gold_label,
pred_label, pred_score,
gold_label == pred_label))
if 'val' in self.alignment_dict:
align_vector = self.alignment_dict['val'][i]
assert len(align_vector) == len(tokens)
align_strs = ["{}/{:.3f}".format(token, weight.item()) for token, weight in
zip(tokens, align_vector)]
out_strs += ["Alignment: {}".format(" ".join(align_strs))]
out_strs.append('Gold Align: {}'.format(" ".join([str(align_label) for align_label in self.gold_alignments])))
out_strs.append('Pred Align: {}'.format(" ".join([str(align_label) for align_label in self.pred_alignments])))
for align_type in [SQLTokenType.table, SQLTokenType.column]:
out_strs.append(
"{} P = {:.3f}, R = {:.3f}, F1 = {:.3f}".format(str(align_type), self.metrics[align_type]['P'],
self.metrics[align_type]['R'],
self.metrics[align_type]['F1']))
return '\n'.join(out_strs)
class SpiderEvaluator:
def __init__(self) -> None:
self.statistics = defaultdict(int)
self.cases: List[SpiderCase] = []
self.align_results = {SQLTokenType.table: defaultdict(int), SQLTokenType.column: defaultdict(int),
SQLTokenType.value: defaultdict(int)}
def add_batch(self, inputs, outputs):
batch_size = len(inputs['input_token_ids'])
self.statistics['total_count'] += batch_size
self.statistics['total_loss'] += outputs['loss'].item() * batch_size
for i in range(batch_size):
example = inputs['example'][i]
question: Utterance = Utterance.from_json(example['question'])
meta_index: MetaIndex = inputs['meta_index'][i]
schema: SpiderSchema = SpiderSchema.from_json(example['schema'])
gold_sql: SQLExpression = SQLExpression.from_json(example['sql'])
values: List[ValueMatch] = [ValueMatch.from_json(x) for x in example['values']]
gold_tbl_labels = inputs['table_labels'][i]
pred_tbl_labels = torch.argmax(outputs['table_logits'][i], dim=-1)
pred_tbl_scores = torch.softmax(outputs['table_logits'][i], dim=-1)
tbl_correct = pred_tbl_labels.equal(gold_tbl_labels)
tbl_identify_results = []
for j in range(len(gold_tbl_labels)):
tbl_identify_results += [(j, gold_tbl_labels[j].item(), pred_tbl_labels[j].item(),
pred_tbl_scores[j, pred_tbl_labels[j]].item())]
gold_col_labels = inputs['column_labels'][i]
pred_col_labels = torch.argmax(outputs['column_logits'][i], dim=-1)
pred_col_scores = torch.softmax(outputs['column_logits'][i], dim=-1)
col_correct = pred_col_labels.equal(gold_col_labels)
col_identify_results = []
for j in range(len(gold_col_labels)):
col_identify_results += [(j, gold_col_labels[j].item(), pred_col_labels[j].item(),
pred_col_scores[j, pred_col_labels[j]].item())]
val_correct = True
gold_val_labels = inputs['value_labels'][i]
val_identify_results = []
if len(gold_val_labels) > 0:
pred_val_labels = torch.argmax(outputs['value_logits'][i], dim=-1)
pred_val_scores = torch.softmax(outputs['value_logits'][i], dim=-1)
val_correct = pred_val_labels.equal(gold_val_labels)
for j in range(len(gold_val_labels)):
val_identify_results += [(j, gold_val_labels[j].item(), pred_val_labels[j].item(),
pred_val_scores[j, pred_val_labels[j]].item())]
align_weights = {}
if 'alignment_weights' in outputs:
tbl_align_weights, col_align_weights, val_align_weights = meta_index.split(
outputs['alignment_weights'][i], dim=0)
align_weights['tbl'] = tbl_align_weights
align_weights['col'] = col_align_weights
align_weights['val'] = val_align_weights
gold_align_labels = get_spider_alignments_from_labeling(example['align_labels'], question, schema)
# pred_align_labels = get_spider_alignments_from_prediction(outputs['alignment_weights'][i], question, schema, values, meta_index, threshold=0.15)
# pred_align_labels = post_process_alignment_labels(pred_align_labels, gold_align_labels)
identify_logits = {SQLTokenType.table: outputs['table_logits'][i],
SQLTokenType.column: outputs['column_logits'][i],
SQLTokenType.value: outputs['value_logits'][i]}
align_weights2 = {SQLTokenType.table: align_weights['tbl'], SQLTokenType.column: align_weights['col'],
SQLTokenType.value: align_weights['val']}
pred_align_labels = greedy_link_spider(identify_logits, align_weights2, question, schema, values,
threshold=0.3)
align_result = evaluate_linking(gold_align_labels, pred_align_labels,
[SQLTokenType.table, SQLTokenType.column, SQLTokenType.value])
metrics = get_precision_recall_and_f1(align_result)
for align_type in align_result:
for key in align_result[align_type]:
self.align_results[align_type][key] += align_result[align_type][key]
eval_case = SpiderCase(
schema=schema,
question=question,
goal_sql=gold_sql,
enc_input_tokens=inputs['input_tokens'][i],
correct_dict={'tbl': tbl_correct, 'col': col_correct, 'val': val_correct,
'all': tbl_correct & col_correct},
identification_dict={'tbl': tbl_identify_results, 'col': col_identify_results,
'val': val_identify_results},
alignment_dict=align_weights,
gold_alignments=gold_align_labels,
pred_alignments=pred_align_labels,
metrics=metrics,
values=values)
self.cases += [eval_case]
self.statistics['tbl_correct'] += tbl_correct
self.statistics['col_correct'] += col_correct
self.statistics['val_correct'] += val_correct
self.statistics['overall_correct'] += tbl_correct & col_correct # & val_correct
def get_metrics(self, saved_file: str = None):
metrics = OrderedDict()
total_count = self.statistics['total_count']
metrics['avg loss'] = self.statistics['total_loss'] / total_count
metrics['table accuracy'] = self.statistics['tbl_correct'] / total_count
metrics['column accuracy'] = self.statistics['col_correct'] / total_count
metrics['value accuracy'] = self.statistics['val_correct'] / total_count
metrics['overall accuracy'] = self.statistics['overall_correct'] / total_count
align_metrics = get_precision_recall_and_f1(self.align_results)
align_f1_string = ["acc_{:.3f}".format(metrics['overall accuracy'])]
total_f1 = 0
for align_type in self.align_results:
total_f1 += align_metrics[align_type]['F1']
metrics[str(align_type)] = " P = {:.3f}, R = {:.3f}, F1 = {:.3f}".format(align_metrics[align_type]['P'],
align_metrics[align_type]['R'],
align_metrics[align_type]['F1'])
align_f1_string += ["{}_{:.3f}".format(align_type.abbr, align_metrics[align_type]['F1'])]
align_f1_string = ".".join(align_f1_string)
metrics['average F1'] = total_f1 / len(self.align_results)
if saved_file is not None:
with open(saved_file.replace(".txt", ".{}.txt".format(align_f1_string)), 'w', encoding='utf-8') as fw:
for case in self.cases:
fw.write(case.to_string() + '\n\n')
return metrics
@dataclass
class WTQCase:
schema: WTQSchema
gold_sql: SQLExpression
pred_sql: SQLExpression
question: Utterance
enc_input_tokens: List[str]
correct_dict: Dict[str, bool]
identification_dict: Dict[str, Any]
alignment_dict: Dict[str, torch.Tensor]
gold_alignments: List[AlignmentLabel]
pred_alignments: List[AlignmentLabel]
metrics: Dict[str, Dict[str, float]]
def to_string(self):
out_strs = []
tokens = self.question.get_piece2token()
sql_correct = self.pred_sql is not None and self.pred_sql == self.gold_sql
out_strs.append("Q: {}, Column = {}, All = {}, SQL = {}".format(self.question.text, self.correct_dict['col'],
self.correct_dict['all'], sql_correct))
out_strs.append("Gold SQL: {}".format(self.gold_sql.sql))
if self.pred_sql is not None:
out_strs.append("Pred SQL: {}".format(self.pred_sql.sql))
out_strs.append("Encode Tokens: {}".format(" ".join(self.enc_input_tokens)))
out_strs.append(self.schema.to_string())
for i, (col_id, gold_label, pred_label, pred_score) in enumerate(self.identification_dict['col']):
if gold_label == 0 and pred_label == 0:
continue
out_strs.append(
"{}: gold = {}, pred = {} / {:.3f}, Correct = {}".format(col_id, gold_label, pred_label, pred_score,
gold_label == pred_label))
# if 'col' in self.alignment_dict:
# align_vector = self.alignment_dict['col'][i]
# assert len(align_vector) == len(tokens), print(align_vector.shape, tokens)
# align_strs = ["{}/{:.3f}".format(token, weight.item()) for token, weight in zip(tokens, align_vector)]
# out_strs += ["Alignment: {}".format(" ".join(align_strs))]
out_strs.append('Gold Align: {}'.format(" ".join([str(align_label) for align_label in self.gold_alignments])))
out_strs.append('Pred Align: {}'.format(" ".join([str(align_label) for align_label in self.pred_alignments])))
for align_type in [SQLTokenType.column]:
out_strs.append(
"{} P = {:.3f}, R = {:.3f}, F1 = {:.3f}".format(str(align_type), self.metrics[align_type]['P'],
self.metrics[align_type]['R'],
self.metrics[align_type]['F1']))
return '\n'.join(out_strs)
class WTQEvaluator:
def __init__(self) -> None:
self.statistics = defaultdict(int)
self.cases: List[WTQCase] = []
self.align_results = {SQLTokenType.column: defaultdict(int)}
def add_batch(self, inputs, outputs):
batch_size = len(inputs['input_token_ids'])
self.statistics['total_count'] += batch_size
self.statistics['total_loss'] += outputs['loss'].item() * batch_size
for i in range(batch_size):
example = inputs['example'][i]
gold_sql: SQLExpression = SQLExpression.from_json(example['sql'])
question: Utterance = Utterance.from_json(example['question'])
meta_index: MetaIndex = inputs['meta_index'][i]
schema: WTQSchema = WTQSchema.from_json(example['schema'])
gold_col_labels = inputs['column_labels'][i]
pred_col_labels = torch.argmax(outputs['column_logits'][i], dim=-1)
pred_col_scores = torch.softmax(outputs['column_logits'][i], dim=-1)
col_correct = pred_col_labels.equal(gold_col_labels)
col_identify_results = []
for j in range(len(gold_col_labels)):
col_name = schema.column_headers[j]
col_identify_results += [(col_name, gold_col_labels[j].item(), pred_col_labels[j].item(),
pred_col_scores[j, pred_col_labels[j]].item())]
align_weights = {}
if 'alignment_weights' in outputs:
col_align_weights = outputs['alignment_weights'][i]
align_weights['col'] = col_align_weights
gold_align_labels = get_wtq_alignments_from_labeling(example['align_labels'], question, schema)
pred_align_labels = get_wtq_alignments_from_prediction(outputs['alignment_weights'][i], question, schema,
meta_index, threshold=0.15)
align_result = evaluate_linking(gold_align_labels, pred_align_labels, [SQLTokenType.column])
metrics = get_precision_recall_and_f1(align_result)
for align_type in align_result:
for key in align_result[align_type]:
self.align_results[align_type][key] += align_result[align_type][key]
pred_sql: SQLExpression = None
if 'hypotheses' in outputs:
hypothesis: SQLTokenHypothesis = outputs['hypotheses'][i]
pred_sql = hypothesis.to_sql()
sql_correct = pred_sql is not None and pred_sql == gold_sql
eval_case = WTQCase(
schema=schema,
gold_sql=gold_sql,
pred_sql=pred_sql,
question=Utterance.from_json(example['question']),
enc_input_tokens=inputs['input_tokens'][i],
correct_dict={'col': col_correct, 'all': col_correct},
identification_dict={'col': col_identify_results},
alignment_dict=align_weights,
gold_alignments=gold_align_labels,
pred_alignments=pred_align_labels,
metrics=metrics)
self.cases += [eval_case]
self.statistics['col_correct'] += col_correct
self.statistics['overall_correct'] += col_correct
self.statistics['LF_correct'] += sql_correct
def get_metrics(self, saved_file: str = None):
metrics = OrderedDict()
total_count = self.statistics['total_count']
metrics['Average loss'] = self.statistics['total_loss'] / total_count
metrics['Column accuracy'] = self.statistics['col_correct'] / total_count
metrics['overall accuracy'] = self.statistics['overall_correct'] / total_count
metrics['SQL accuracy'] = self.statistics['LF_correct'] / total_count
align_metrics = get_precision_recall_and_f1(self.align_results)
for align_type in self.align_results:
metrics[str(align_type)] = " P = {:.3f}, R = {:.3f}, F1 = {:.3f}".format(align_metrics[align_type]['P'],
align_metrics[align_type]['R'],
align_metrics[align_type]['F1'])
if saved_file is not None:
with open(saved_file, 'w', encoding='utf-8') as fw:
fw.write('{}\n\n'.format("\n".join(
[f"{k} = {v:.4f}" if isinstance(v, float) else "{} = {}".format(k, str(v)) for k, v in
metrics.items()])))
for case in self.cases:
fw.write(case.to_string() + '\n\n')
return metrics
|
ContextualSP/awakening_latent_grounding/utils/evaluator.py/0
|
{
"file_path": "ContextualSP/awakening_latent_grounding/utils/evaluator.py",
"repo_id": "ContextualSP",
"token_count": 18161
}
| 238 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.