text
stringlengths 5
261k
| id
stringlengths 16
106
| metadata
dict | __index_level_0__
int64 0
266
|
---|---|---|---|
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/layers/normalization_layers/'" />
|
keras-io/redirects/layers/normalization/index.html/0
|
{
"file_path": "keras-io/redirects/layers/normalization/index.html",
"repo_id": "keras-io",
"token_count": 38
}
| 143 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/'" />
|
keras-io/redirects/scikit-learn-api/index.html/0
|
{
"file_path": "keras-io/redirects/scikit-learn-api/index.html",
"repo_id": "keras-io",
"token_count": 27
}
| 144 |
MODELS_MASTER = {
"path": "models/",
"title": "Models",
"toc": True,
"children": [
{
"path": "albert/",
"title": "Albert",
"toc": True,
"children": [
{
"path": "albert_tokenizer",
"title": "AlbertTokenizer",
"generate": [
"keras_nlp.models.AlbertTokenizer",
"keras_nlp.models.AlbertTokenizer.from_preset",
],
},
{
"path": "albert_preprocessor",
"title": "AlbertPreprocessor layer",
"generate": [
"keras_nlp.models.AlbertPreprocessor",
"keras_nlp.models.AlbertPreprocessor.from_preset",
"keras_nlp.models.AlbertPreprocessor.tokenizer",
],
},
{
"path": "albert_backbone",
"title": "AlbertBackbone model",
"generate": [
"keras_nlp.models.AlbertBackbone",
"keras_nlp.models.AlbertBackbone.from_preset",
"keras_nlp.models.AlbertBackbone.token_embedding",
],
},
{
"path": "albert_classifier",
"title": "AlbertClassifier model",
"generate": [
"keras_nlp.models.AlbertClassifier",
"keras_nlp.models.AlbertClassifier.from_preset",
"keras_nlp.models.AlbertClassifier.backbone",
"keras_nlp.models.AlbertClassifier.preprocessor",
],
},
{
"path": "albert_masked_lm",
"title": "AlbertMaskedLM model",
"generate": [
"keras_nlp.models.AlbertMaskedLM",
"keras_nlp.models.AlbertMaskedLM.from_preset",
"keras_nlp.models.AlbertMaskedLM.backbone",
"keras_nlp.models.AlbertMaskedLM.preprocessor",
],
},
{
"path": "albert_masked_lm_preprocessor",
"title": "AlbertMaskedLMPreprocessor layer",
"generate": [
"keras_nlp.models.AlbertMaskedLMPreprocessor",
"keras_nlp.models.AlbertMaskedLMPreprocessor.from_preset",
"keras_nlp.models.AlbertMaskedLMPreprocessor.tokenizer",
],
},
],
},
{
"path": "bart/",
"title": "Bart",
"toc": True,
"children": [
{
"path": "bart_tokenizer",
"title": "BertTokenizer",
"generate": [
"keras_nlp.models.BertTokenizer",
"keras_nlp.models.BertTokenizer.from_preset",
],
},
{
"path": "bart_preprocessor",
"title": "BertPreprocessor layer",
"generate": [
"keras_nlp.models.BertPreprocessor",
"keras_nlp.models.BertPreprocessor.from_preset",
"keras_nlp.models.BertPreprocessor.tokenizer",
],
},
{
"path": "bart_backbone",
"title": "BertBackbone model",
"generate": [
"keras_nlp.models.BertBackbone",
"keras_nlp.models.BertBackbone.from_preset",
"keras_nlp.models.BertBackbone.token_embedding",
],
},
{
"path": "bart_seq_2_seq_lm",
"title": "BartSeq2SeqLM model",
"generate": [
"keras_nlp.models.BartSeq2SeqLM",
"keras_nlp.models.BartSeq2SeqLM.from_preset",
"keras_nlp.models.BartSeq2SeqLM.generate",
"keras_nlp.models.BartSeq2SeqLM.backbone",
"keras_nlp.models.BartSeq2SeqLM.preprocessor",
],
},
{
"path": "bart_seq_2_seq_lm_preprocessor",
"title": "BartSeq2SeqLMPreprocessor layer",
"generate": [
"keras_nlp.models.BartSeq2SeqLMPreprocessor",
"keras_nlp.models.BartSeq2SeqLMPreprocessor.from_preset",
"keras_nlp.models.BartSeq2SeqLMPreprocessor.generate_preprocess",
"keras_nlp.models.BartSeq2SeqLMPreprocessor.generate_postprocess",
"keras_nlp.models.BartSeq2SeqLMPreprocessor.tokenizer",
],
},
],
},
{
"path": "bert/",
"title": "Bert",
"toc": True,
"children": [
{
"path": "bert_tokenizer",
"title": "BertTokenizer",
"generate": [
"keras_nlp.models.BertTokenizer",
"keras_nlp.models.BertTokenizer.from_preset",
],
},
{
"path": "bert_preprocessor",
"title": "BertPreprocessor layer",
"generate": [
"keras_nlp.models.BertPreprocessor",
"keras_nlp.models.BertPreprocessor.from_preset",
"keras_nlp.models.BertPreprocessor.tokenizer",
],
},
{
"path": "bert_backbone",
"title": "BertBackbone model",
"generate": [
"keras_nlp.models.BertBackbone",
"keras_nlp.models.BertBackbone.from_preset",
"keras_nlp.models.BertBackbone.token_embedding",
],
},
{
"path": "bert_classifier",
"title": "BertClassifier model",
"generate": [
"keras_nlp.models.BertClassifier",
"keras_nlp.models.BertClassifier.from_preset",
"keras_nlp.models.BertClassifier.backbone",
"keras_nlp.models.BertClassifier.preprocessor",
],
},
{
"path": "bert_masked_lm",
"title": "BertMaskedLM model",
"generate": [
"keras_nlp.models.BertMaskedLM",
"keras_nlp.models.BertMaskedLM.from_preset",
"keras_nlp.models.BertMaskedLM.backbone",
"keras_nlp.models.BertMaskedLM.preprocessor",
],
},
{
"path": "bert_masked_lm_preprocessor",
"title": "BertMaskedLMPreprocessor layer",
"generate": [
"keras_nlp.models.BertMaskedLMPreprocessor",
"keras_nlp.models.BertMaskedLMPreprocessor.from_preset",
"keras_nlp.models.BertMaskedLMPreprocessor.tokenizer",
],
},
],
},
{
"path": "deberta_v3/",
"title": "DebertaV3",
"toc": True,
"children": [
{
"path": "deberta_v3_tokenizer",
"title": "DebertaV3Tokenizer",
"generate": [
"keras_nlp.models.DebertaV3Tokenizer",
"keras_nlp.models.DebertaV3Tokenizer.from_preset",
],
},
{
"path": "deberta_v3_preprocessor",
"title": "DebertaV3Preprocessor layer",
"generate": [
"keras_nlp.models.DebertaV3Preprocessor",
"keras_nlp.models.DebertaV3Preprocessor.from_preset",
"keras_nlp.models.DebertaV3Preprocessor.tokenizer",
],
},
{
"path": "deberta_v3_backbone",
"title": "DebertaV3Backbone model",
"generate": [
"keras_nlp.models.DebertaV3Backbone",
"keras_nlp.models.DebertaV3Backbone.from_preset",
"keras_nlp.models.DebertaV3Backbone.token_embedding",
],
},
{
"path": "deberta_v3_classifier",
"title": "DebertaV3Classifier model",
"generate": [
"keras_nlp.models.DebertaV3Classifier",
"keras_nlp.models.DebertaV3Classifier.from_preset",
"keras_nlp.models.DebertaV3Classifier.backbone",
"keras_nlp.models.DebertaV3Classifier.preprocessor",
],
},
{
"path": "deberta_v3_masked_lm",
"title": "DebertaV3MaskedLM model",
"generate": [
"keras_nlp.models.DebertaV3MaskedLM",
"keras_nlp.models.DebertaV3MaskedLM.from_preset",
"keras_nlp.models.DebertaV3MaskedLM.backbone",
"keras_nlp.models.DebertaV3MaskedLM.preprocessor",
],
},
{
"path": "deberta_v3_masked_lm_preprocessor",
"title": "DebertaV3MaskedLMPreprocessor layer",
"generate": [
"keras_nlp.models.DebertaV3MaskedLMPreprocessor",
"keras_nlp.models.DebertaV3MaskedLMPreprocessor.from_preset",
"keras_nlp.models.DebertaV3MaskedLMPreprocessor.tokenizer",
],
},
],
},
{
"path": "distil_bert/",
"title": "DistilBert",
"toc": True,
"children": [
{
"path": "distil_bert_tokenizer",
"title": "DistilBertTokenizer",
"generate": [
"keras_nlp.models.DistilBertTokenizer",
"keras_nlp.models.DistilBertTokenizer.from_preset",
],
},
{
"path": "distil_bert_preprocessor",
"title": "DistilBertPreprocessor layer",
"generate": [
"keras_nlp.models.DistilBertPreprocessor",
"keras_nlp.models.DistilBertPreprocessor.from_preset",
"keras_nlp.models.DistilBertPreprocessor.tokenizer",
],
},
{
"path": "distil_bert_backbone",
"title": "DistilBertBackbone model",
"generate": [
"keras_nlp.models.DistilBertBackbone",
"keras_nlp.models.DistilBertBackbone.from_preset",
"keras_nlp.models.DistilBertBackbone.token_embedding",
],
},
{
"path": "distil_bert_classifier",
"title": "DistilBertClassifier model",
"generate": [
"keras_nlp.models.DistilBertClassifier",
"keras_nlp.models.DistilBertClassifier.from_preset",
"keras_nlp.models.DistilBertClassifier.backbone",
"keras_nlp.models.DistilBertClassifier.preprocessor",
],
},
{
"path": "distil_bert_masked_lm",
"title": "DistilBertMaskedLM model",
"generate": [
"keras_nlp.models.DistilBertMaskedLM",
"keras_nlp.models.DistilBertMaskedLM.from_preset",
"keras_nlp.models.DistilBertMaskedLM.backbone",
"keras_nlp.models.DistilBertMaskedLM.preprocessor",
],
},
{
"path": "distil_bert_masked_lm_preprocessor",
"title": "DistilBertMaskedLMPreprocessor layer",
"generate": [
"keras_nlp.models.DistilBertMaskedLMPreprocessor",
"keras_nlp.models.DistilBertMaskedLMPreprocessor.from_preset",
"keras_nlp.models.DistilBertMaskedLMPreprocessor.tokenizer",
],
},
],
},
{
"path": "gemma/",
"title": "Gemma",
"toc": True,
"children": [
{
"path": "gemma_tokenizer",
"title": "GemmaTokenizer",
"generate": [
"keras_nlp.models.GemmaTokenizer",
"keras_nlp.models.GemmaTokenizer.from_preset",
],
},
{
"path": "gemma_preprocessor",
"title": "GemmaPreprocessor layer",
"generate": [
"keras_nlp.models.GemmaPreprocessor",
"keras_nlp.models.GemmaPreprocessor.from_preset",
"keras_nlp.models.GemmaPreprocessor.tokenizer",
],
},
{
"path": "gemma_backbone",
"title": "GemmaBackbone model",
"generate": [
"keras_nlp.models.GemmaBackbone",
"keras_nlp.models.GemmaBackbone.from_preset",
"keras_nlp.models.GemmaBackbone.token_embedding",
"keras_nlp.models.GemmaBackbone.enable_lora",
"keras_nlp.models.GemmaBackbone.get_layout_map",
],
},
{
"path": "gemma_causal_lm",
"title": "GemmaCausalLM model",
"generate": [
"keras_nlp.models.GemmaCausalLM",
"keras_nlp.models.GemmaCausalLM.from_preset",
"keras_nlp.models.GemmaCausalLM.generate",
"keras_nlp.models.GemmaCausalLM.backbone",
"keras_nlp.models.GemmaCausalLM.preprocessor",
"keras_nlp.models.GemmaCausalLM.score",
],
},
{
"path": "gemma_causal_lm_preprocessor",
"title": "GemmaCausalLMPreprocessor layer",
"generate": [
"keras_nlp.models.GemmaCausalLMPreprocessor",
"keras_nlp.models.GemmaCausalLMPreprocessor.from_preset",
"keras_nlp.models.GemmaCausalLMPreprocessor.tokenizer",
],
},
],
},
{
"path": "gpt2/",
"title": "GPT2",
"toc": True,
"children": [
{
"path": "gpt2_tokenizer",
"title": "GPT2Tokenizer",
"generate": [
"keras_nlp.models.GPT2Tokenizer",
"keras_nlp.models.GPT2Tokenizer.from_preset",
],
},
{
"path": "gpt2_preprocessor",
"title": "GPT2Preprocessor layer",
"generate": [
"keras_nlp.models.GPT2Preprocessor",
"keras_nlp.models.GPT2Preprocessor.from_preset",
"keras_nlp.models.GPT2Preprocessor.tokenizer",
],
},
{
"path": "gpt2_backbone",
"title": "GPT2Backbone model",
"generate": [
"keras_nlp.models.GPT2Backbone",
"keras_nlp.models.GPT2Backbone.from_preset",
"keras_nlp.models.GPT2Backbone.token_embedding",
],
},
{
"path": "gpt2_causal_lm",
"title": "GPT2CausalLM model",
"generate": [
"keras_nlp.models.GPT2CausalLM",
"keras_nlp.models.GPT2CausalLM.from_preset",
"keras_nlp.models.GPT2CausalLM.generate",
"keras_nlp.models.GPT2CausalLM.backbone",
"keras_nlp.models.GPT2CausalLM.preprocessor",
],
},
{
"path": "gpt2_causal_lm_preprocessor",
"title": "GPT2CausalLMPreprocessor layer",
"generate": [
"keras_nlp.models.GPT2CausalLMPreprocessor",
"keras_nlp.models.GPT2CausalLMPreprocessor.from_preset",
"keras_nlp.models.GPT2CausalLMPreprocessor.generate_preprocess",
"keras_nlp.models.GPT2CausalLMPreprocessor.generate_postprocess",
"keras_nlp.models.GPT2CausalLMPreprocessor.tokenizer",
],
},
],
},
{
"path": "f_net/",
"title": "FNet",
"toc": True,
"children": [
{
"path": "f_net_tokenizer",
"title": "FNetTokenizer",
"generate": [
"keras_nlp.models.FNetTokenizer",
"keras_nlp.models.FNetTokenizer.from_preset",
],
},
{
"path": "f_net_preprocessor",
"title": "FNetPreprocessor layer",
"generate": [
"keras_nlp.models.FNetPreprocessor",
"keras_nlp.models.FNetPreprocessor.from_preset",
"keras_nlp.models.FNetPreprocessor.tokenizer",
],
},
{
"path": "f_net3_backbone",
"title": "FNetBackbone model",
"generate": [
"keras_nlp.models.FNetBackbone",
"keras_nlp.models.FNetBackbone.from_preset",
"keras_nlp.models.FNetBackbone.token_embedding",
],
},
{
"path": "f_net_classifier",
"title": "FNetClassifier model",
"generate": [
"keras_nlp.models.FNetClassifier",
"keras_nlp.models.FNetClassifier.from_preset",
"keras_nlp.models.FNetClassifier.backbone",
"keras_nlp.models.FNetClassifier.preprocessor",
],
},
{
"path": "f_net_masked_lm",
"title": "FNetMaskedLM model",
"generate": [
"keras_nlp.models.FNetMaskedLM",
"keras_nlp.models.FNetMaskedLM.from_preset",
"keras_nlp.models.FNetMaskedLM.backbone",
"keras_nlp.models.FNetMaskedLM.preprocessor",
],
},
{
"path": "f_net_masked_lm_preprocessor",
"title": "FNetMaskedLMPreprocessor layer",
"generate": [
"keras_nlp.models.FNetMaskedLMPreprocessor",
"keras_nlp.models.FNetMaskedLMPreprocessor.from_preset",
"keras_nlp.models.FNetMaskedLMPreprocessor.tokenizer",
],
},
],
},
{
"path": "mistral/",
"title": "Mistral",
"toc": True,
"children": [
{
"path": "mistral_tokenizer",
"title": "MistralTokenizer",
"generate": [
"keras_nlp.models.MistralTokenizer",
"keras_nlp.models.MistralTokenizer.from_preset",
],
},
{
"path": "mistral_preprocessor",
"title": "MistralPreprocessor layer",
"generate": [
"keras_nlp.models.MistralPreprocessor",
"keras_nlp.models.MistralPreprocessor.from_preset",
"keras_nlp.models.MistralPreprocessor.tokenizer",
],
},
{
"path": "mistral_backbone",
"title": "MistralBackbone model",
"generate": [
"keras_nlp.models.MistralBackbone",
"keras_nlp.models.MistralBackbone.from_preset",
"keras_nlp.models.MistralBackbone.token_embedding",
"keras_nlp.models.MistralBackbone.enable_lora",
],
},
{
"path": "mistral_causal_lm",
"title": "MistralCausalLM model",
"generate": [
"keras_nlp.models.MistralCausalLM",
"keras_nlp.models.MistralCausalLM.from_preset",
"keras_nlp.models.MistralCausalLM.generate",
"keras_nlp.models.MistralCausalLM.backbone",
"keras_nlp.models.MistralCausalLM.preprocessor",
],
},
{
"path": "mistral_causal_lm_preprocessor",
"title": "MistralCausalLMPreprocessor layer",
"generate": [
"keras_nlp.models.MistralCausalLMPreprocessor",
"keras_nlp.models.MistralCausalLMPreprocessor.from_preset",
"keras_nlp.models.MistralCausalLMPreprocessor.tokenizer",
],
},
],
},
{
"path": "opt/",
"title": "OPT",
"toc": True,
"children": [
{
"path": "opt_tokenizer",
"title": "OPTTokenizer",
"generate": [
"keras_nlp.models.OPTTokenizer",
"keras_nlp.models.OPTTokenizer.from_preset",
],
},
{
"path": "opt_preprocessor",
"title": "OPTPreprocessor layer",
"generate": [
"keras_nlp.models.OPTPreprocessor",
"keras_nlp.models.OPTPreprocessor.from_preset",
"keras_nlp.models.OPTPreprocessor.tokenizer",
],
},
{
"path": "opt_backbone",
"title": "OPTBackbone model",
"generate": [
"keras_nlp.models.OPTBackbone",
"keras_nlp.models.OPTBackbone.from_preset",
"keras_nlp.models.OPTBackbone.token_embedding",
],
},
{
"path": "opt_causal_lm",
"title": "OPTCausalLM model",
"generate": [
"keras_nlp.models.OPTCausalLM",
"keras_nlp.models.OPTCausalLM.from_preset",
"keras_nlp.models.OPTCausalLM.generate",
"keras_nlp.models.OPTCausalLM.backbone",
"keras_nlp.models.OPTCausalLM.preprocessor",
],
},
{
"path": "opt_causal_lm_preprocessor",
"title": "OPTCausalLMPreprocessor layer",
"generate": [
"keras_nlp.models.OPTCausalLMPreprocessor",
"keras_nlp.models.OPTCausalLMPreprocessor.from_preset",
"keras_nlp.models.OPTCausalLMPreprocessor.tokenizer",
],
},
],
},
{
"path": "roberta/",
"title": "Roberta",
"toc": True,
"children": [
{
"path": "roberta_tokenizer",
"title": "RobertaTokenizer",
"generate": [
"keras_nlp.models.RobertaTokenizer",
"keras_nlp.models.RobertaTokenizer.from_preset",
],
},
{
"path": "roberta_preprocessor",
"title": "RobertaPreprocessor layer",
"generate": [
"keras_nlp.models.RobertaPreprocessor",
"keras_nlp.models.RobertaPreprocessor.from_preset",
"keras_nlp.models.RobertaPreprocessor.tokenizer",
],
},
{
"path": "roberta_backbone",
"title": "RobertaBackbone model",
"generate": [
"keras_nlp.models.RobertaBackbone",
"keras_nlp.models.RobertaBackbone.from_preset",
"keras_nlp.models.RobertaBackbone.token_embedding",
],
},
{
"path": "roberta_classifier",
"title": "RobertaClassifier model",
"generate": [
"keras_nlp.models.RobertaClassifier",
"keras_nlp.models.RobertaClassifier.from_preset",
"keras_nlp.models.RobertaClassifier.backbone",
"keras_nlp.models.RobertaClassifier.preprocessor",
],
},
{
"path": "roberta_masked_lm",
"title": "RobertaMaskedLM model",
"generate": [
"keras_nlp.models.RobertaMaskedLM",
"keras_nlp.models.RobertaMaskedLM.from_preset",
"keras_nlp.models.RobertaMaskedLM.backbone",
"keras_nlp.models.RobertaMaskedLM.preprocessor",
],
},
{
"path": "roberta_masked_lm_preprocessor",
"title": "RobertaMaskedLMPreprocessor layer",
"generate": [
"keras_nlp.models.RobertaMaskedLMPreprocessor",
"keras_nlp.models.RobertaMaskedLMPreprocessor.from_preset",
"keras_nlp.models.RobertaMaskedLMPreprocessor.tokenizer",
],
},
],
},
{
"path": "xlm_roberta/",
"title": "XLMRoberta",
"toc": True,
"children": [
{
"path": "xlm_roberta_tokenizer",
"title": "XLMRobertaTokenizer",
"generate": [
"keras_nlp.models.XLMRobertaTokenizer",
"keras_nlp.models.XLMRobertaTokenizer.from_preset",
],
},
{
"path": "xlm_roberta_preprocessor",
"title": "XLMRobertaPreprocessor layer",
"generate": [
"keras_nlp.models.XLMRobertaPreprocessor",
"keras_nlp.models.XLMRobertaPreprocessor.from_preset",
"keras_nlp.models.XLMRobertaPreprocessor.tokenizer",
],
},
{
"path": "xlm_roberta_backbone",
"title": "XLMRobertaBackbone model",
"generate": [
"keras_nlp.models.XLMRobertaBackbone",
"keras_nlp.models.XLMRobertaBackbone.from_preset",
"keras_nlp.models.XLMRobertaBackbone.token_embedding",
],
},
{
"path": "xlm_roberta_classifier",
"title": "XLMRobertaClassifier model",
"generate": [
"keras_nlp.models.XLMRobertaClassifier",
"keras_nlp.models.XLMRobertaClassifier.from_preset",
"keras_nlp.models.XLMRobertaClassifier.backbone",
"keras_nlp.models.XLMRobertaClassifier.preprocessor",
],
},
{
"path": "xlm_roberta_masked_lm",
"title": "XLMRobertaMaskedLM model",
"generate": [
"keras_nlp.models.XLMRobertaMaskedLM",
"keras_nlp.models.XLMRobertaMaskedLM.from_preset",
"keras_nlp.models.XLMRobertaMaskedLM.backbone",
"keras_nlp.models.XLMRobertaMaskedLM.preprocessor",
],
},
{
"path": "xlm_roberta_masked_lm_preprocessor",
"title": "XLMRobertaMaskedLMPreprocessor layer",
"generate": [
"keras_nlp.models.XLMRobertaMaskedLMPreprocessor",
"keras_nlp.models.XLMRobertaMaskedLMPreprocessor.from_preset",
"keras_nlp.models.XLMRobertaMaskedLMPreprocessor.tokenizer",
],
},
],
},
],
}
SAMPLERS_MASTER = {
"path": "samplers/",
"title": "Samplers",
"toc": True,
"children": [
{
"path": "samplers",
"title": "Sampler base class",
"generate": [
"keras_nlp.samplers.Sampler",
"keras_nlp.samplers.Sampler.get_next_token",
],
},
{
"path": "beam_sampler",
"title": "BeamSampler",
"generate": ["keras_nlp.samplers.BeamSampler"],
},
{
"path": "contrastive_sampler",
"title": "ContrastiveSampler",
"generate": ["keras_nlp.samplers.ContrastiveSampler"],
},
{
"path": "greedy_sampler",
"title": "GreedySampler",
"generate": ["keras_nlp.samplers.GreedySampler"],
},
{
"path": "random_sampler",
"title": "RandomSampler",
"generate": ["keras_nlp.samplers.RandomSampler"],
},
{
"path": "top_k_sampler",
"title": "TopKSampler",
"generate": ["keras_nlp.samplers.TopKSampler"],
},
{
"path": "top_p_sampler",
"title": "TopPSampler",
"generate": ["keras_nlp.samplers.TopPSampler"],
},
],
}
TOKENIZERS_MASTER = {
"path": "tokenizers/",
"title": "Tokenizers",
"toc": True,
"children": [
{
"path": "tokenizer",
"title": "Tokenizer base class",
"generate": [
"keras_nlp.tokenizers.Tokenizer",
"keras_nlp.tokenizers.Tokenizer.tokenize",
"keras_nlp.tokenizers.Tokenizer.detokenize",
"keras_nlp.tokenizers.Tokenizer.get_vocabulary",
"keras_nlp.tokenizers.Tokenizer.vocabulary_size",
"keras_nlp.tokenizers.Tokenizer.token_to_id",
"keras_nlp.tokenizers.Tokenizer.id_to_token",
],
},
{
"path": "word_piece_tokenizer",
"title": "WordPieceTokenizer",
"generate": [
"keras_nlp.tokenizers.WordPieceTokenizer",
"keras_nlp.tokenizers.WordPieceTokenizer.tokenize",
"keras_nlp.tokenizers.WordPieceTokenizer.detokenize",
"keras_nlp.tokenizers.WordPieceTokenizer.get_vocabulary",
"keras_nlp.tokenizers.WordPieceTokenizer.vocabulary_size",
"keras_nlp.tokenizers.WordPieceTokenizer.token_to_id",
"keras_nlp.tokenizers.WordPieceTokenizer.id_to_token",
],
},
{
"path": "sentence_piece_tokenizer",
"title": "SentencePieceTokenizer",
"generate": [
"keras_nlp.tokenizers.SentencePieceTokenizer",
"keras_nlp.tokenizers.SentencePieceTokenizer.tokenize",
"keras_nlp.tokenizers.SentencePieceTokenizer.detokenize",
"keras_nlp.tokenizers.SentencePieceTokenizer.get_vocabulary",
"keras_nlp.tokenizers.SentencePieceTokenizer.vocabulary_size",
"keras_nlp.tokenizers.SentencePieceTokenizer.token_to_id",
"keras_nlp.tokenizers.SentencePieceTokenizer.id_to_token",
],
},
{
"path": "byte_pair_tokenizer",
"title": "BytePairTokenizer",
"generate": [
"keras_nlp.tokenizers.BytePairTokenizer",
"keras_nlp.tokenizers.BytePairTokenizer.tokenize",
"keras_nlp.tokenizers.BytePairTokenizer.detokenize",
"keras_nlp.tokenizers.BytePairTokenizer.get_vocabulary",
"keras_nlp.tokenizers.BytePairTokenizer.vocabulary_size",
"keras_nlp.tokenizers.BytePairTokenizer.token_to_id",
"keras_nlp.tokenizers.BytePairTokenizer.id_to_token",
],
},
{
"path": "byte_tokenizer",
"title": "ByteTokenizer",
"generate": [
"keras_nlp.tokenizers.ByteTokenizer",
"keras_nlp.tokenizers.ByteTokenizer.tokenize",
"keras_nlp.tokenizers.ByteTokenizer.detokenize",
"keras_nlp.tokenizers.ByteTokenizer.get_vocabulary",
"keras_nlp.tokenizers.ByteTokenizer.vocabulary_size",
"keras_nlp.tokenizers.ByteTokenizer.token_to_id",
"keras_nlp.tokenizers.ByteTokenizer.id_to_token",
],
},
{
"path": "unicode_codepoint_tokenizer",
"title": "UnicodeCodepointTokenizer",
"generate": [
"keras_nlp.tokenizers.UnicodeCodepointTokenizer",
"keras_nlp.tokenizers.UnicodeCodepointTokenizer.tokenize",
"keras_nlp.tokenizers.UnicodeCodepointTokenizer.detokenize",
"keras_nlp.tokenizers.UnicodeCodepointTokenizer.get_vocabulary",
"keras_nlp.tokenizers.UnicodeCodepointTokenizer.vocabulary_size",
"keras_nlp.tokenizers.UnicodeCodepointTokenizer.token_to_id",
"keras_nlp.tokenizers.UnicodeCodepointTokenizer.id_to_token",
],
},
{
"path": "compute_word_piece_vocabulary",
"title": "compute_word_piece_vocabulary function",
"generate": ["keras_nlp.tokenizers.compute_word_piece_vocabulary"],
},
{
"path": "compute_sentence_piece_proto",
"title": "compute_sentence_piece_proto function",
"generate": ["keras_nlp.tokenizers.compute_sentence_piece_proto"],
},
],
}
PREPROCESSING_LAYERS_MASTER = {
"path": "preprocessing_layers/",
"title": "Preprocessing Layers",
"toc": True,
"children": [
{
"path": "start_end_packer",
"title": "StartEndPacker layer",
"generate": ["keras_nlp.layers.StartEndPacker"],
},
{
"path": "multi_segment_packer",
"title": "MultiSegmentPacker layer",
"generate": ["keras_nlp.layers.MultiSegmentPacker"],
},
{
"path": "random_swap",
"title": "RandomSwap layer",
"generate": ["keras_nlp.layers.RandomSwap"],
},
{
"path": "random_deletion",
"title": "RandomDeletion layer",
"generate": ["keras_nlp.layers.RandomDeletion"],
},
{
"path": "masked_lm_mask_generator",
"title": "MaskedLMMaskGenerator layer",
"generate": ["keras_nlp.layers.MaskedLMMaskGenerator"],
},
],
}
MODELING_LAYERS_MASTER = {
"path": "modeling_layers/",
"title": "Modeling Layers",
"toc": True,
"children": [
{
"path": "transformer_encoder",
"title": "TransformerEncoder layer",
"generate": [
"keras_nlp.layers.TransformerEncoder",
"keras_nlp.layers.TransformerEncoder.call",
],
},
{
"path": "transformer_decoder",
"title": "TransformerDecoder layer",
"generate": [
"keras_nlp.layers.TransformerDecoder",
"keras_nlp.layers.TransformerDecoder.call",
],
},
{
"path": "fnet_encoder",
"title": "FNetEncoder layer",
"generate": ["keras_nlp.layers.FNetEncoder"],
},
{
"path": "position_embedding",
"title": "PositionEmbedding layer",
"generate": ["keras_nlp.layers.PositionEmbedding"],
},
{
"path": "rotary_embedding",
"title": "RotaryEmbedding layer",
"generate": ["keras_nlp.layers.RotaryEmbedding"],
},
{
"path": "sine_position_encoding",
"title": "SinePositionEncoding layer",
"generate": ["keras_nlp.layers.SinePositionEncoding"],
},
{
"path": "reversible_embedding",
"title": "ReversibleEmbedding layer",
"generate": ["keras_nlp.layers.ReversibleEmbedding"],
},
{
"path": "token_and_position_embedding",
"title": "TokenAndPositionEmbedding layer",
"generate": ["keras_nlp.layers.TokenAndPositionEmbedding"],
},
{
"path": "alibi_bias",
"title": "AlibiBias layer",
"generate": ["keras_nlp.layers.AlibiBias"],
},
{
"path": "masked_lm_head",
"title": "MaskedLMHead layer",
"generate": ["keras_nlp.layers.MaskedLMHead"],
},
{
"path": "cached_multi_head_attention",
"title": "CachedMultiHeadAttention layer",
"generate": ["keras_nlp.layers.CachedMultiHeadAttention"],
},
],
}
METRICS_MASTER = {
"path": "metrics/",
"title": "Metrics",
"toc": True,
"children": [
{
"path": "perplexity",
"title": "Perplexity metric",
"generate": ["keras_nlp.metrics.Perplexity"],
},
{
"path": "rouge_l",
"title": "RougeL metric",
"generate": ["keras_nlp.metrics.RougeL"],
},
{
"path": "rouge_n",
"title": "RougeN metric",
"generate": ["keras_nlp.metrics.RougeN"],
},
{
"path": "bleu",
"title": "Bleu metric",
"generate": ["keras_nlp.metrics.Bleu"],
},
{
"path": "edit_distance",
"title": "EditDistance metric",
"generate": ["keras_nlp.metrics.EditDistance"],
},
],
}
NLP_API_MASTER = {
"path": "keras_nlp/",
"title": "KerasNLP",
"toc": True,
"children": [
MODELS_MASTER,
TOKENIZERS_MASTER,
PREPROCESSING_LAYERS_MASTER,
MODELING_LAYERS_MASTER,
SAMPLERS_MASTER,
METRICS_MASTER,
],
}
|
keras-io/scripts/nlp_api_master.py/0
|
{
"file_path": "keras-io/scripts/nlp_api_master.py",
"repo_id": "keras-io",
"token_count": 25918
}
| 145 |
# KerasCV Models
KerasCV contains end-to-end implementations of popular model
architectures. These models can be created in two ways:
- Through the `from_preset()` constructor, which instantiates an object with
a pre-trained configuration, and (optionally) weights.
Available preset names are listed on this page.
```python
model = keras_cv.models.RetinaNet.from_preset(
"resnet50_v2_imagenet",
num_classes=20,
bounding_box_format="xywh",
)
```
- Through custom configuration controlled by the user. To do this, simply
pass the desired configuration parameters to the default constructors of the
symbols documented below.
```python
backbone = keras_cv.models.ResNetBackbone(
stackwise_filters=[64, 128, 256, 512],
stackwise_blocks=[2, 2, 2, 2],
stackwise_strides=[1, 2, 2, 2],
include_rescaling=False,
)
model = keras_cv.models.RetinaNet(
backbone=backbone,
num_classes=20,
bounding_box_format="xywh",
)
```
## Backbone presets
Each of the following preset name corresponds to a configuration and weights for
a **backbone** model.
The names below can be used with the `from_preset()` constructor for the
corresponding **backbone** model.
```python
backbone = keras_cv.models.ResNetBackbone.from_preset("resnet50_imagenet")
```
For brevity, we do not include the presets without pretrained weights in the
following table.
**Note**: All pretrained weights should be used with unnormalized pixel
intensities in the range `[0, 255]` if `include_rescaling=True` or in the range
`[0, 1]` if `including_rescaling=False`.
{{backbone_presets_table}}
## Task presets
Each of the following preset name corresponds to a configuration and weights for
a **task** model. These models are application-ready, but can be further
fine-tuned if desired.
The names below can be used with the `from_preset()` constructor for the
corresponding **task** models.
```python
object_detector = keras_cv.models.RetinaNet.from_preset(
"retinanet_resnet50_pascalvoc",
bounding_box_format="xywh",
)
```
Note that all backbone presets are also applicable to the tasks. For example,
you can directly use a `ResNetBackbone` preset with the `RetinaNet`. In this
case, fine-tuning is necessary since task-specific layers will be randomly
initialized.
```python
backbone = keras_cv.models.RetinaNet.from_preset(
"resnet50_imagenet",
bounding_box_format="xywh",
)
```
For brevity, we do not include the backbone presets in the following table.
**Note**: All pretrained weights should be used with unnormalized pixel
intensities in the range `[0, 255]` if `include_rescaling=True` or in the range
`[0, 1]` if `including_rescaling=False`.
{{task_presets_table}}
## API Documentation
{{toc}}
|
keras-io/templates/api/keras_cv/models/index.md/0
|
{
"file_path": "keras-io/templates/api/keras_cv/models/index.md",
"repo_id": "keras-io",
"token_count": 878
}
| 146 |
# Layer weight constraints
## Usage of constraints
Classes from the `keras.constraints` module allow setting constraints (eg. non-negativity)
on model parameters during training. They are per-variable projection functions
applied to the target variable after each gradient update (when using `fit()`).
The exact API will depend on the layer, but the layers `Dense`, `Conv1D`, `Conv2D` and `Conv3D` have a unified API.
These layers expose two keyword arguments:
- `kernel_constraint` for the main weights matrix
- `bias_constraint` for the bias.
```python
from keras.constraints import max_norm
model.add(Dense(64, kernel_constraint=max_norm(2.)))
```
---
## Available weight constraints
{{autogenerated}}
## Creating custom weight constraints
A weight constraint can be any callable that takes a tensor
and returns a tensor with the same shape and dtype. You would typically
implement your constraints as subclasses of `keras.constraints.Constraint`.
Here's a simple example: a constraint that forces weight tensors
to be centered around a specific value on average.
```python
from keras import ops
class CenterAround(keras.constraints.Constraint):
"""Constrains weight tensors to be centered around `ref_value`."""
def __init__(self, ref_value):
self.ref_value = ref_value
def __call__(self, w):
mean = ops.mean(w)
return w - mean + self.ref_value
def get_config(self):
return {'ref_value': self.ref_value}
```
Optionally, you an also implement the method `get_config` and the class
method `from_config` in order to support serialization -- just like with
any Keras object. Note that we don't have to implement `from_config`
in the example above since the constructor arguments of the class
the keys in the config returned by `get_config` are the same.
In this case, the default `from_config` works fine.
|
keras-io/templates/api/layers/constraints.md/0
|
{
"file_path": "keras-io/templates/api/layers/constraints.md",
"repo_id": "keras-io",
"token_count": 535
}
| 147 |
# Automatic Speech Recognition with Transformer
**Author:** [Apoorv Nandan](https://twitter.com/NandanApoorv)<br>
**Date created:** 2021/01/13<br>
**Last modified:** 2021/01/13<br>
**Description:** Training a sequence-to-sequence Transformer for automatic speech recognition.
<div class='example_version_banner keras_3'>ⓘ This example uses Keras 3</div>
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/audio/ipynb/transformer_asr.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/audio/transformer_asr.py)
---
## Introduction
Automatic speech recognition (ASR) consists of transcribing audio speech segments into text.
ASR can be treated as a sequence-to-sequence problem, where the
audio can be represented as a sequence of feature vectors
and the text as a sequence of characters, words, or subword tokens.
For this demonstration, we will use the LJSpeech dataset from the
[LibriVox](https://librivox.org/) project. It consists of short
audio clips of a single speaker reading passages from 7 non-fiction books.
Our model will be similar to the original Transformer (both encoder and decoder)
as proposed in the paper, "Attention is All You Need".
**References:**
- [Attention is All You Need](https://papers.nips.cc/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf)
- [Very Deep Self-Attention Networks for End-to-End Speech Recognition](https://arxiv.org/abs/1904.13377)
- [Speech Transformers](https://ieeexplore.ieee.org/document/8462506)
- [LJSpeech Dataset](https://keithito.com/LJ-Speech-Dataset/)
```python
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
from glob import glob
import tensorflow as tf
import keras
from keras import layers
```
---
## Define the Transformer Input Layer
When processing past target tokens for the decoder, we compute the sum of
position embeddings and token embeddings.
When processing audio features, we apply convolutional layers to downsample
them (via convolution strides) and process local relationships.
```python
class TokenEmbedding(layers.Layer):
def __init__(self, num_vocab=1000, maxlen=100, num_hid=64):
super().__init__()
self.emb = keras.layers.Embedding(num_vocab, num_hid)
self.pos_emb = layers.Embedding(input_dim=maxlen, output_dim=num_hid)
def call(self, x):
maxlen = tf.shape(x)[-1]
x = self.emb(x)
positions = tf.range(start=0, limit=maxlen, delta=1)
positions = self.pos_emb(positions)
return x + positions
class SpeechFeatureEmbedding(layers.Layer):
def __init__(self, num_hid=64, maxlen=100):
super().__init__()
self.conv1 = keras.layers.Conv1D(
num_hid, 11, strides=2, padding="same", activation="relu"
)
self.conv2 = keras.layers.Conv1D(
num_hid, 11, strides=2, padding="same", activation="relu"
)
self.conv3 = keras.layers.Conv1D(
num_hid, 11, strides=2, padding="same", activation="relu"
)
def call(self, x):
x = self.conv1(x)
x = self.conv2(x)
return self.conv3(x)
```
---
## Transformer Encoder Layer
```python
class TransformerEncoder(layers.Layer):
def __init__(self, embed_dim, num_heads, feed_forward_dim, rate=0.1):
super().__init__()
self.att = layers.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim)
self.ffn = keras.Sequential(
[
layers.Dense(feed_forward_dim, activation="relu"),
layers.Dense(embed_dim),
]
)
self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = layers.Dropout(rate)
self.dropout2 = layers.Dropout(rate)
def call(self, inputs, training=False):
attn_output = self.att(inputs, inputs)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(inputs + attn_output)
ffn_output = self.ffn(out1)
ffn_output = self.dropout2(ffn_output, training=training)
return self.layernorm2(out1 + ffn_output)
```
---
## Transformer Decoder Layer
```python
class TransformerDecoder(layers.Layer):
def __init__(self, embed_dim, num_heads, feed_forward_dim, dropout_rate=0.1):
super().__init__()
self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm3 = layers.LayerNormalization(epsilon=1e-6)
self.self_att = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim
)
self.enc_att = layers.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim)
self.self_dropout = layers.Dropout(0.5)
self.enc_dropout = layers.Dropout(0.1)
self.ffn_dropout = layers.Dropout(0.1)
self.ffn = keras.Sequential(
[
layers.Dense(feed_forward_dim, activation="relu"),
layers.Dense(embed_dim),
]
)
def causal_attention_mask(self, batch_size, n_dest, n_src, dtype):
"""Masks the upper half of the dot product matrix in self attention.
This prevents flow of information from future tokens to current token.
1's in the lower triangle, counting from the lower right corner.
"""
i = tf.range(n_dest)[:, None]
j = tf.range(n_src)
m = i >= j - n_src + n_dest
mask = tf.cast(m, dtype)
mask = tf.reshape(mask, [1, n_dest, n_src])
mult = tf.concat(
[tf.expand_dims(batch_size, -1), tf.constant([1, 1], dtype=tf.int32)], 0
)
return tf.tile(mask, mult)
def call(self, enc_out, target):
input_shape = tf.shape(target)
batch_size = input_shape[0]
seq_len = input_shape[1]
causal_mask = self.causal_attention_mask(batch_size, seq_len, seq_len, tf.bool)
target_att = self.self_att(target, target, attention_mask=causal_mask)
target_norm = self.layernorm1(target + self.self_dropout(target_att))
enc_out = self.enc_att(target_norm, enc_out)
enc_out_norm = self.layernorm2(self.enc_dropout(enc_out) + target_norm)
ffn_out = self.ffn(enc_out_norm)
ffn_out_norm = self.layernorm3(enc_out_norm + self.ffn_dropout(ffn_out))
return ffn_out_norm
```
---
## Complete the Transformer model
Our model takes audio spectrograms as inputs and predicts a sequence of characters.
During training, we give the decoder the target character sequence shifted to the left
as input. During inference, the decoder uses its own past predictions to predict the
next token.
```python
class Transformer(keras.Model):
def __init__(
self,
num_hid=64,
num_head=2,
num_feed_forward=128,
source_maxlen=100,
target_maxlen=100,
num_layers_enc=4,
num_layers_dec=1,
num_classes=10,
):
super().__init__()
self.loss_metric = keras.metrics.Mean(name="loss")
self.num_layers_enc = num_layers_enc
self.num_layers_dec = num_layers_dec
self.target_maxlen = target_maxlen
self.num_classes = num_classes
self.enc_input = SpeechFeatureEmbedding(num_hid=num_hid, maxlen=source_maxlen)
self.dec_input = TokenEmbedding(
num_vocab=num_classes, maxlen=target_maxlen, num_hid=num_hid
)
self.encoder = keras.Sequential(
[self.enc_input]
+ [
TransformerEncoder(num_hid, num_head, num_feed_forward)
for _ in range(num_layers_enc)
]
)
for i in range(num_layers_dec):
setattr(
self,
f"dec_layer_{i}",
TransformerDecoder(num_hid, num_head, num_feed_forward),
)
self.classifier = layers.Dense(num_classes)
def decode(self, enc_out, target):
y = self.dec_input(target)
for i in range(self.num_layers_dec):
y = getattr(self, f"dec_layer_{i}")(enc_out, y)
return y
def call(self, inputs):
source = inputs[0]
target = inputs[1]
x = self.encoder(source)
y = self.decode(x, target)
return self.classifier(y)
@property
def metrics(self):
return [self.loss_metric]
def train_step(self, batch):
"""Processes one batch inside model.fit()."""
source = batch["source"]
target = batch["target"]
dec_input = target[:, :-1]
dec_target = target[:, 1:]
with tf.GradientTape() as tape:
preds = self([source, dec_input])
one_hot = tf.one_hot(dec_target, depth=self.num_classes)
mask = tf.math.logical_not(tf.math.equal(dec_target, 0))
loss = model.compute_loss(None, one_hot, preds, sample_weight=mask)
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
self.loss_metric.update_state(loss)
return {"loss": self.loss_metric.result()}
def test_step(self, batch):
source = batch["source"]
target = batch["target"]
dec_input = target[:, :-1]
dec_target = target[:, 1:]
preds = self([source, dec_input])
one_hot = tf.one_hot(dec_target, depth=self.num_classes)
mask = tf.math.logical_not(tf.math.equal(dec_target, 0))
loss = model.compute_loss(None, one_hot, preds, sample_weight=mask)
self.loss_metric.update_state(loss)
return {"loss": self.loss_metric.result()}
def generate(self, source, target_start_token_idx):
"""Performs inference over one batch of inputs using greedy decoding."""
bs = tf.shape(source)[0]
enc = self.encoder(source)
dec_input = tf.ones((bs, 1), dtype=tf.int32) * target_start_token_idx
dec_logits = []
for i in range(self.target_maxlen - 1):
dec_out = self.decode(enc, dec_input)
logits = self.classifier(dec_out)
logits = tf.argmax(logits, axis=-1, output_type=tf.int32)
last_logit = tf.expand_dims(logits[:, -1], axis=-1)
dec_logits.append(last_logit)
dec_input = tf.concat([dec_input, last_logit], axis=-1)
return dec_input
```
---
## Download the dataset
Note: This requires ~3.6 GB of disk space and
takes ~5 minutes for the extraction of files.
```python
keras.utils.get_file(
os.path.join(os.getcwd(), "data.tar.gz"),
"https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2",
extract=True,
archive_format="tar",
cache_dir=".",
)
saveto = "./datasets/LJSpeech-1.1"
wavs = glob("{}/**/*.wav".format(saveto), recursive=True)
id_to_text = {}
with open(os.path.join(saveto, "metadata.csv"), encoding="utf-8") as f:
for line in f:
id = line.strip().split("|")[0]
text = line.strip().split("|")[2]
id_to_text[id] = text
def get_data(wavs, id_to_text, maxlen=50):
"""returns mapping of audio paths and transcription texts"""
data = []
for w in wavs:
id = w.split("/")[-1].split(".")[0]
if len(id_to_text[id]) < maxlen:
data.append({"audio": w, "text": id_to_text[id]})
return data
```
<div class="k-default-codeblock">
```
Downloading data from https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2
2748572632/2748572632 ━━━━━━━━━━━━━━━━━━━━ 18s 0us/step
```
</div>
---
## Preprocess the dataset
```python
class VectorizeChar:
def __init__(self, max_len=50):
self.vocab = (
["-", "#", "<", ">"]
+ [chr(i + 96) for i in range(1, 27)]
+ [" ", ".", ",", "?"]
)
self.max_len = max_len
self.char_to_idx = {}
for i, ch in enumerate(self.vocab):
self.char_to_idx[ch] = i
def __call__(self, text):
text = text.lower()
text = text[: self.max_len - 2]
text = "<" + text + ">"
pad_len = self.max_len - len(text)
return [self.char_to_idx.get(ch, 1) for ch in text] + [0] * pad_len
def get_vocabulary(self):
return self.vocab
max_target_len = 200 # all transcripts in out data are < 200 characters
data = get_data(wavs, id_to_text, max_target_len)
vectorizer = VectorizeChar(max_target_len)
print("vocab size", len(vectorizer.get_vocabulary()))
def create_text_ds(data):
texts = [_["text"] for _ in data]
text_ds = [vectorizer(t) for t in texts]
text_ds = tf.data.Dataset.from_tensor_slices(text_ds)
return text_ds
def path_to_audio(path):
# spectrogram using stft
audio = tf.io.read_file(path)
audio, _ = tf.audio.decode_wav(audio, 1)
audio = tf.squeeze(audio, axis=-1)
stfts = tf.signal.stft(audio, frame_length=200, frame_step=80, fft_length=256)
x = tf.math.pow(tf.abs(stfts), 0.5)
# normalisation
means = tf.math.reduce_mean(x, 1, keepdims=True)
stddevs = tf.math.reduce_std(x, 1, keepdims=True)
x = (x - means) / stddevs
audio_len = tf.shape(x)[0]
# padding to 10 seconds
pad_len = 2754
paddings = tf.constant([[0, pad_len], [0, 0]])
x = tf.pad(x, paddings, "CONSTANT")[:pad_len, :]
return x
def create_audio_ds(data):
flist = [_["audio"] for _ in data]
audio_ds = tf.data.Dataset.from_tensor_slices(flist)
audio_ds = audio_ds.map(path_to_audio, num_parallel_calls=tf.data.AUTOTUNE)
return audio_ds
def create_tf_dataset(data, bs=4):
audio_ds = create_audio_ds(data)
text_ds = create_text_ds(data)
ds = tf.data.Dataset.zip((audio_ds, text_ds))
ds = ds.map(lambda x, y: {"source": x, "target": y})
ds = ds.batch(bs)
ds = ds.prefetch(tf.data.AUTOTUNE)
return ds
split = int(len(data) * 0.99)
train_data = data[:split]
test_data = data[split:]
ds = create_tf_dataset(train_data, bs=64)
val_ds = create_tf_dataset(test_data, bs=4)
```
<div class="k-default-codeblock">
```
vocab size 34
```
</div>
---
## Callbacks to display predictions
```python
class DisplayOutputs(keras.callbacks.Callback):
def __init__(
self, batch, idx_to_token, target_start_token_idx=27, target_end_token_idx=28
):
"""Displays a batch of outputs after every epoch
Args:
batch: A test batch containing the keys "source" and "target"
idx_to_token: A List containing the vocabulary tokens corresponding to their indices
target_start_token_idx: A start token index in the target vocabulary
target_end_token_idx: An end token index in the target vocabulary
"""
self.batch = batch
self.target_start_token_idx = target_start_token_idx
self.target_end_token_idx = target_end_token_idx
self.idx_to_char = idx_to_token
def on_epoch_end(self, epoch, logs=None):
if epoch % 5 != 0:
return
source = self.batch["source"]
target = self.batch["target"].numpy()
bs = tf.shape(source)[0]
preds = self.model.generate(source, self.target_start_token_idx)
preds = preds.numpy()
for i in range(bs):
target_text = "".join([self.idx_to_char[_] for _ in target[i, :]])
prediction = ""
for idx in preds[i, :]:
prediction += self.idx_to_char[idx]
if idx == self.target_end_token_idx:
break
print(f"target: {target_text.replace('-','')}")
print(f"prediction: {prediction}\n")
```
---
## Learning rate schedule
```python
class CustomSchedule(keras.optimizers.schedules.LearningRateSchedule):
def __init__(
self,
init_lr=0.00001,
lr_after_warmup=0.001,
final_lr=0.00001,
warmup_epochs=15,
decay_epochs=85,
steps_per_epoch=203,
):
super().__init__()
self.init_lr = init_lr
self.lr_after_warmup = lr_after_warmup
self.final_lr = final_lr
self.warmup_epochs = warmup_epochs
self.decay_epochs = decay_epochs
self.steps_per_epoch = steps_per_epoch
def calculate_lr(self, epoch):
"""linear warm up - linear decay"""
warmup_lr = (
self.init_lr
+ ((self.lr_after_warmup - self.init_lr) / (self.warmup_epochs - 1)) * epoch
)
decay_lr = tf.math.maximum(
self.final_lr,
self.lr_after_warmup
- (epoch - self.warmup_epochs)
* (self.lr_after_warmup - self.final_lr)
/ self.decay_epochs,
)
return tf.math.minimum(warmup_lr, decay_lr)
def __call__(self, step):
epoch = step // self.steps_per_epoch
epoch = tf.cast(epoch, "float32")
return self.calculate_lr(epoch)
```
---
## Create & train the end-to-end model
```python
batch = next(iter(val_ds))
# The vocabulary to convert predicted indices into characters
idx_to_char = vectorizer.get_vocabulary()
display_cb = DisplayOutputs(
batch, idx_to_char, target_start_token_idx=2, target_end_token_idx=3
) # set the arguments as per vocabulary index for '<' and '>'
model = Transformer(
num_hid=200,
num_head=2,
num_feed_forward=400,
target_maxlen=max_target_len,
num_layers_enc=4,
num_layers_dec=1,
num_classes=34,
)
loss_fn = keras.losses.CategoricalCrossentropy(
from_logits=True,
label_smoothing=0.1,
)
learning_rate = CustomSchedule(
init_lr=0.00001,
lr_after_warmup=0.001,
final_lr=0.00001,
warmup_epochs=15,
decay_epochs=85,
steps_per_epoch=len(ds),
)
optimizer = keras.optimizers.Adam(learning_rate)
model.compile(optimizer=optimizer, loss=loss_fn)
history = model.fit(ds, validation_data=val_ds, callbacks=[display_cb], epochs=1)
```
<div class="k-default-codeblock">
```
1/203 [37m━━━━━━━━━━━━━━━━━━━━ 9:20:11 166s/step - loss: 2.2387
WARNING: All log messages before absl::InitializeLog() is called are written to STDERR
I0000 00:00:1700071380.331418 678094 device_compiler.h:187] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process.
203/203 ━━━━━━━━━━━━━━━━━━━━ 0s 947ms/step - loss: 1.8285target: <the relations between lee and marina oswald are of great importance in any attempt to understand oswald#s possible motivation.>
prediction: <the the he at the t the an of t te the ale t he t te ar the in the the s the s tan as t the t as re the te the ast he and t the s s the thee thed the the thes the s te te he t the of in anae o the or
```
</div>
<div class="k-default-codeblock">
```
target: <he was in consequence put out of the protection of their internal law, end quote. their code was a subject of some curiosity.>
prediction: <the the he at the t the an of t te the ale t he t te ar the in the the s the s tan as t the t as re the te the ast he and t the s s the thee thed the the thes the s te te he t the of in anae o the or
```
</div>
<div class="k-default-codeblock">
```
target: <that is why i occasionally leave this scene of action for a few days>
prediction: <the the he at the t the an of t te the ale t he t te ar the in the the s the s tan ase athe t as re the te the ast he and t the s s the thee thed the the thes the s te te he t the of in anse o the or
```
</div>
<div class="k-default-codeblock">
```
target: <it probably contributed greatly to the general dissatisfaction which he exhibited with his environment,>
prediction: <the the he at the t the an of t te the ale t he t te ar the in the the s the s tan as t the t as re the te the ast he and t the s s the thee thed the the thes the s te te he t the of in anae o the or
```
</div>
<div class="k-default-codeblock">
```
203/203 ━━━━━━━━━━━━━━━━━━━━ 428s 1s/step - loss: 1.8276 - val_loss: 1.5233
```
</div>
In practice, you should train for around 100 epochs or more.
Some of the predicted text at or around epoch 35 may look as follows:
```
target: <as they sat in the car, frazier asked oswald where his lunch was>
prediction: <as they sat in the car frazier his lunch ware mis lunch was>
target: <under the entry for may one, nineteen sixty,>
prediction: <under the introus for may monee, nin the sixty,>
```
|
keras-io/templates/examples/audio/transformer_asr.md/0
|
{
"file_path": "keras-io/templates/examples/audio/transformer_asr.md",
"repo_id": "keras-io",
"token_count": 9057
}
| 148 |
# KerasTuner
<a class="github-button" href="https://github.com/keras-team/keras-tuner" data-size="large" data-show-count="true" aria-label="Star keras-team/keras-tuner on GitHub">Star</a>
KerasTuner is an easy-to-use, scalable hyperparameter optimization framework
that solves the pain points of hyperparameter search. Easily configure your
search space with a define-by-run syntax, then leverage one of the available
search algorithms to find the best hyperparameter values for your models.
KerasTuner comes with Bayesian Optimization, Hyperband, and Random Search algorithms
built-in, and is also designed to be easy for researchers to extend in order to
experiment with new search algorithms.
---
## Quick links
* [Getting started with KerasTuner](/guides/keras_tuner/getting_started/)
* [KerasTuner developer guides](/guides/keras_tuner/)
* [KerasTuner API reference](/api/keras_tuner/)
* [KerasTuner on GitHub](https://github.com/keras-team/keras-tuner)
---
## Installation
Install the latest release:
```
pip install keras-tuner --upgrade
```
You can also check out other versions in our
[GitHub repository](https://github.com/keras-team/keras-tuner).
---
## Quick introduction
Import KerasTuner and TensorFlow:
```python
import keras_tuner
import keras
```
Write a function that creates and returns a Keras model.
Use the `hp` argument to define the hyperparameters during model creation.
```python
def build_model(hp):
model = keras.Sequential()
model.add(keras.layers.Dense(
hp.Choice('units', [8, 16, 32]),
activation='relu'))
model.add(keras.layers.Dense(1, activation='relu'))
model.compile(loss='mse')
return model
```
Initialize a tuner (here, `RandomSearch`).
We use `objective` to specify the objective to select the best models,
and we use `max_trials` to specify the number of different models to try.
```python
tuner = keras_tuner.RandomSearch(
build_model,
objective='val_loss',
max_trials=5)
```
Start the search and get the best model:
```python
tuner.search(x_train, y_train, epochs=5, validation_data=(x_val, y_val))
best_model = tuner.get_best_models()[0]
```
To learn more about KerasTuner, check out [this starter guide](https://keras.io/guides/keras_tuner/getting_started/).
---
## Citing KerasTuner
If KerasTuner helps your research, we appreciate your citations.
Here is the BibTeX entry:
```bibtex
@misc{omalley2019kerastuner,
title = {KerasTuner},
author = {O'Malley, Tom and Bursztein, Elie and Long, James and Chollet, Fran\c{c}ois and Jin, Haifeng and Invernizzi, Luca and others},
year = 2019,
howpublished = {\url{https://github.com/keras-team/keras-tuner}}
}
```
|
keras-io/templates/keras_tuner/index.md/0
|
{
"file_path": "keras-io/templates/keras_tuner/index.md",
"repo_id": "keras-io",
"token_count": 918
}
| 149 |
build_file: "keras-nlp/.kokoro/github/ubuntu/gpu/build.sh"
action {
define_artifacts {
regex: "**/sponge_log.log"
regex: "**/sponge_log.xml"
}
}
env_vars: {
key: "KERAS_BACKEND"
value: "tensorflow"
}
# Set timeout to 60 mins from default 180 mins
timeout_mins: 60
|
keras-nlp/.kokoro/github/ubuntu/gpu/tensorflow/continuous.cfg/0
|
{
"file_path": "keras-nlp/.kokoro/github/ubuntu/gpu/tensorflow/continuous.cfg",
"repo_id": "keras-nlp",
"token_count": 121
}
| 150 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GLUE benchmark script to test model performance.
To run the script, use this command:
```
python3 glue.py --model BertClassifier \
--preset bert_base_en \
--epochs 5 \
--batch_size 16 \
--learning_rate 0.001 \
--mixed_precision_policy mixed_float16
```
Disclaimer: This script only supports GLUE/mrpc (for now).
"""
import inspect
import time
import tensorflow as tf
import tensorflow_datasets as tfds
from absl import app
from absl import flags
from absl import logging
from tensorflow import keras
import keras_nlp
seed = 42
tf.random.set_seed(seed)
flags.DEFINE_string(
"task",
"mrpc",
"The name of the GLUE task to finetune on.",
)
flags.DEFINE_string(
"model", None, "The name of the classifier such as BertClassifier."
)
flags.DEFINE_string(
"preset",
None,
"The model preset, e.g., 'bert_base_en_uncased' for `BertClassifier`",
)
flags.DEFINE_float(
"learning_rate", 0.005, "The learning_rate for the optimizer."
)
flags.DEFINE_string(
"mixed_precision_policy",
"mixed_float16",
"The global precision policy to use, e.g., 'mixed_float16' or 'float32'.",
)
flags.DEFINE_integer("epochs", 2, "The number of epochs.")
flags.DEFINE_integer("batch_size", 8, "Batch Size.")
FLAGS = flags.FLAGS
def load_data():
"""Load data.
Load GLUE/MRPC dataset, and convert the dictionary format to
(features, label), where `features` is a tuple of all input sentences.
"""
feature_names = ("sentence1", "sentence2")
def split_features(x):
# GLUE comes with dictonary data, we convert it to a uniform format
# (features, label), where features is a tuple consisting of all
# features.
features = tuple([x[name] for name in feature_names])
label = x["label"]
return (features, label)
train_ds, test_ds, validation_ds = tfds.load(
"glue/mrpc",
split=["train", "test", "validation"],
)
train_ds = train_ds.map(split_features, num_parallel_calls=tf.data.AUTOTUNE)
test_ds = test_ds.map(split_features, num_parallel_calls=tf.data.AUTOTUNE)
validation_ds = validation_ds.map(
split_features, num_parallel_calls=tf.data.AUTOTUNE
)
return train_ds, test_ds, validation_ds
def load_model(model, preset, num_classes):
for name, symbol in keras_nlp.models.__dict__.items():
if inspect.isclass(symbol) and issubclass(symbol, keras.Model):
if model and name != model:
continue
if not hasattr(symbol, "from_preset"):
continue
for preset in symbol.presets:
if preset and preset != preset:
continue
model = symbol.from_preset(preset, num_classes=num_classes)
logging.info(f"\nUsing model {name} with preset {preset}\n")
return model
raise ValueError(f"Model {model} or preset {preset} not found.")
def main(_):
keras.mixed_precision.set_global_policy(FLAGS.mixed_precision_policy)
# Check task is supported.
# TODO(chenmoneygithub): Add support for other glue tasks.
if FLAGS.task != "mrpc":
raise ValueError(
f"For now only mrpc is supported, but received {FLAGS.task}."
)
logging.info(
"Benchmarking configs...\n"
"=========================\n"
f"MODEL: {FLAGS.model}\n"
f"PRESET: {FLAGS.preset}\n"
f"TASK: glue/{FLAGS.task}\n"
f"BATCH_SIZE: {FLAGS.batch_size}\n"
f"EPOCHS: {FLAGS.epochs}\n"
"=========================\n"
)
# Load datasets.
train_ds, test_ds, validation_ds = load_data()
train_ds = train_ds.batch(FLAGS.batch_size).prefetch(tf.data.AUTOTUNE)
test_ds = test_ds.batch(FLAGS.batch_size).prefetch(tf.data.AUTOTUNE)
validation_ds = validation_ds.batch(FLAGS.batch_size).prefetch(
tf.data.AUTOTUNE
)
# Load the model.
model = load_model(model=FLAGS.model, preset=FLAGS.preset, num_classes=2)
# Set loss and metrics.
loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metrics = [keras.metrics.SparseCategoricalAccuracy()]
# Configure optimizer.
lr = tf.keras.optimizers.schedules.PolynomialDecay(
FLAGS.learning_rate,
decay_steps=train_ds.cardinality() * FLAGS.epochs,
end_learning_rate=0.0,
)
optimizer = tf.keras.optimizers.experimental.AdamW(lr, weight_decay=0.01)
optimizer.exclude_from_weight_decay(
var_names=["LayerNorm", "layer_norm", "bias"]
)
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
# Start training.
logging.info("Starting Training...")
st = time.time()
history = model.fit(
train_ds,
validation_data=validation_ds,
epochs=FLAGS.epochs,
)
wall_time = time.time() - st
validation_accuracy = history.history["val_sparse_categorical_accuracy"][-1]
examples_per_second = (
FLAGS.epochs * FLAGS.batch_size * (len(train_ds) + len(validation_ds))
) / wall_time
logging.info("Training Finished!")
logging.info(f"Wall Time: {wall_time:.4f} seconds.")
logging.info(f"Validation Accuracy: {validation_accuracy:.4f}")
logging.info(f"examples_per_second: {examples_per_second:.4f}")
if __name__ == "__main__":
flags.mark_flag_as_required("model")
flags.mark_flag_as_required("preset")
app.run(main)
|
keras-nlp/benchmarks/glue.py/0
|
{
"file_path": "keras-nlp/benchmarks/glue.py",
"repo_id": "keras-nlp",
"token_count": 2499
}
| 151 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
import random
import re
import string
import tensorflow as tf
from tensorflow import keras
def download_data():
text_file = keras.utils.get_file(
fname="spa-eng.zip",
origin=(
"http://storage.googleapis.com/download.tensorflow.org/data/"
+ "spa-eng.zip"
),
extract=True,
)
return pathlib.Path(text_file).parent / "spa-eng" / "spa.txt"
def read_data(filepath):
with open(filepath) as f:
lines = f.read().split("\n")[:-1]
text_pairs = []
for line in lines:
eng, spa = line.split("\t")
spa = "[start] " + spa + " [end]"
text_pairs.append((eng, spa))
return text_pairs
def split_train_val_test(text_pairs):
random.shuffle(text_pairs)
num_val_samples = int(0.15 * len(text_pairs))
num_train_samples = len(text_pairs) - 2 * num_val_samples
train_pairs = text_pairs[:num_train_samples]
val_end_index = num_train_samples + num_val_samples
val_pairs = text_pairs[num_train_samples:val_end_index]
test_pairs = text_pairs[val_end_index:]
return train_pairs, val_pairs, test_pairs
strip_chars = string.punctuation + "¿"
strip_chars = strip_chars.replace("[", "")
strip_chars = strip_chars.replace("]", "")
@keras.saving.register_keras_serializable()
def custom_standardization(input_string):
lowercase = tf.strings.lower(input_string)
return tf.strings.regex_replace(
lowercase,
"[%s]" % re.escape(strip_chars),
"",
)
def prepare_tokenizer(train_pairs, sequence_length, vocab_size):
"""Preapare English and Spanish tokenizer."""
eng_tokenizer = keras.layers.TextVectorization(
max_tokens=vocab_size,
output_mode="int",
output_sequence_length=sequence_length,
)
spa_tokenizer = keras.layers.TextVectorization(
max_tokens=vocab_size,
output_mode="int",
output_sequence_length=sequence_length + 1,
standardize=custom_standardization,
)
eng_texts, spa_texts = zip(*train_pairs)
eng_tokenizer.adapt(eng_texts)
spa_tokenizer.adapt(spa_texts)
return eng_tokenizer, spa_tokenizer
def prepare_datasets(text_pairs, batch_size, eng_tokenizer, spa_tokenizer):
"""Transform raw text pairs to tf datasets."""
eng_texts, spa_texts = zip(*text_pairs)
eng_texts = list(eng_texts)
spa_texts = list(spa_texts)
def format_dataset(eng, spa):
"""Format the dataset given input English and Spanish text.
The output format is:
x: a pair of English and Spanish sentence.
y: The Spanish sentence in x shifts 1 token towards right, because
we are predicting the next token.
"""
eng = eng_tokenizer(eng)
spa = spa_tokenizer(spa)
return (
{
"encoder_inputs": eng,
"decoder_inputs": spa[:, :-1],
},
spa[:, 1:],
tf.cast((spa[:, 1:] != 0), "float32"), # mask as sample weights
)
dataset = tf.data.Dataset.from_tensor_slices((eng_texts, spa_texts))
dataset = dataset.batch(batch_size)
dataset = dataset.map(format_dataset)
return dataset.shuffle(2048).prefetch(tf.data.AUTOTUNE).cache()
def get_dataset_and_tokenizer(sequence_length, vocab_size, batch_size):
"""Main method to get the formatted machine translation dataset."""
filepath = download_data()
text_pairs = read_data(filepath)
train_pairs, val_pairs, test_pairs = split_train_val_test(text_pairs)
eng_tokenizer, spa_tokenizer = prepare_tokenizer(
train_pairs, sequence_length, vocab_size
)
train_ds = prepare_datasets(
train_pairs,
batch_size,
eng_tokenizer,
spa_tokenizer,
)
val_ds = prepare_datasets(
val_pairs,
batch_size,
eng_tokenizer,
spa_tokenizer,
)
test_ds = prepare_datasets(
test_pairs,
batch_size,
eng_tokenizer,
spa_tokenizer,
)
return (train_ds, val_ds, test_ds), (eng_tokenizer, spa_tokenizer)
|
keras-nlp/examples/machine_translation/data.py/0
|
{
"file_path": "keras-nlp/examples/machine_translation/data.py",
"repo_id": "keras-nlp",
"token_count": 2005
}
| 152 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Keras backend module.
This module adds a temporary Keras API surface that is fully under KerasNLP
control. The goal is to allow us to write Keras 3-like code everywhere, while
still supporting Keras 2. We do this by using the `keras_core` package with
Keras 2 to backport Keras 3 numerics APIs (`keras.ops` and `keras.random`) into
Keras 2. The sub-modules exposed are as follows:
- `config`: check which version of Keras is being run.
- `keras`: The full `keras` API with compat shims for older Keras versions.
- `ops`: `keras.ops` for Keras 3 or `keras_core.ops` for Keras 2.
- `random`: `keras.random` for Keras 3 or `keras_core.ops` for Keras 2.
"""
from keras_nlp.backend import config
from keras_nlp.backend import keras
from keras_nlp.backend import ops
from keras_nlp.backend import random
|
keras-nlp/keras_nlp/backend/__init__.py/0
|
{
"file_path": "keras-nlp/keras_nlp/backend/__init__.py",
"repo_id": "keras-nlp",
"token_count": 420
}
| 153 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.backend import keras
from keras_nlp.backend import ops
@keras_nlp_export("keras_nlp.layers.PositionEmbedding")
class PositionEmbedding(keras.layers.Layer):
"""A layer which learns a position embedding for inputs sequences.
This class assumes that in the input tensor, the last dimension corresponds
to the features, and the dimension before the last corresponds to the
sequence.
This layer does not supporting masking, but can be combined with a
`keras.layers.Embedding` for padding mask support.
Args:
sequence_length: The maximum length of the dynamic sequence.
initializer: The initializer to use for the embedding weights. Defaults
to `"glorot_uniform"`.
seq_axis: The axis of the input tensor where we add the embeddings.
Call arguments:
inputs: The tensor inputs to compute an embedding for, with shape
`(batch_size, sequence_length, hidden_dim)`. Only the input shape
will be used, as the position embedding does not depend on the
input sequence content.
start_index: An integer or integer tensor. The starting position to
compute the position embedding from. This is useful during cached
decoding, where each position is predicted separately in a loop.
Examples:
Called directly on input.
>>> layer = keras_nlp.layers.PositionEmbedding(sequence_length=10)
>>> layer(np.zeros((8, 10, 16)))
Combine with a token embedding.
```python
seq_length = 50
vocab_size = 5000
embed_dim = 128
inputs = keras.Input(shape=(seq_length,))
token_embeddings = keras.layers.Embedding(
input_dim=vocab_size, output_dim=embed_dim
)(inputs)
position_embeddings = keras_nlp.layers.PositionEmbedding(
sequence_length=seq_length
)(token_embeddings)
outputs = token_embeddings + position_embeddings
```
Reference:
- [Devlin et al., 2019](https://arxiv.org/abs/1810.04805)
"""
def __init__(
self,
sequence_length,
initializer="glorot_uniform",
**kwargs,
):
super().__init__(**kwargs)
if sequence_length is None:
raise ValueError(
"`sequence_length` must be an Integer, received `None`."
)
self.sequence_length = int(sequence_length)
self.initializer = keras.initializers.get(initializer)
def get_config(self):
config = super().get_config()
config.update(
{
"sequence_length": self.sequence_length,
"initializer": keras.initializers.serialize(self.initializer),
}
)
return config
def build(self, inputs_shape):
feature_size = inputs_shape[-1]
self.position_embeddings = self.add_weight(
name="embeddings",
shape=[self.sequence_length, feature_size],
initializer=self.initializer,
trainable=True,
)
self.built = True
def call(self, inputs, start_index=0):
shape = ops.shape(inputs)
feature_length = shape[-1]
sequence_length = shape[-2]
# trim to match the length of the input sequence, which might be less
# than the sequence_length of the layer.
position_embeddings = ops.convert_to_tensor(self.position_embeddings)
position_embeddings = ops.slice(
position_embeddings,
(start_index, 0),
(sequence_length, feature_length),
)
return ops.broadcast_to(position_embeddings, shape)
def compute_output_shape(self, input_shape):
return input_shape
|
keras-nlp/keras_nlp/layers/modeling/position_embedding.py/0
|
{
"file_path": "keras-nlp/keras_nlp/layers/modeling/position_embedding.py",
"repo_id": "keras-nlp",
"token_count": 1684
}
| 154 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from keras_nlp.backend import keras
from keras_nlp.metrics.edit_distance import EditDistance
from keras_nlp.tests.test_case import TestCase
class EditDistanceTest(TestCase):
def test_initialization(self):
edit_distance = EditDistance()
result = edit_distance.result()
self.assertEqual(result, 0.0)
def test_1d_list_input_normalize(self):
edit_distance = EditDistance()
y_true = "the tiny little cat was found under the big funny bed".split()
y_pred = "the cat was found under the bed".split()
edit_distance_val = edit_distance(y_true, y_pred)
self.assertAlmostEqual(edit_distance_val, 0.364, delta=1e-3)
def test_2d_list_input_normalize(self):
edit_distance = EditDistance()
y_true = [
"the tiny little cat was found under the big funny bed".split(),
"it is sunny today".split(),
]
y_pred = [
"the cat was found under the bed".split(),
"it is sunny but with a hint of cloud cover".split(),
]
edit_distance_val = edit_distance(y_true, y_pred)
self.assertAlmostEqual(edit_distance_val, 0.733, delta=1e-3)
def test_1d_list_input_normalize_false(self):
edit_distance = EditDistance(normalize=False)
y_true = "the tiny little cat was found under the big funny bed".split()
y_pred = "the cat was found under the bed".split()
edit_distance_val = edit_distance(y_true, y_pred)
self.assertAlmostEqual(edit_distance_val, 4.0, delta=1e-3)
def test_2d_list_input_normalize_false(self):
edit_distance = EditDistance(normalize=False)
y_true = [
"the tiny little cat was found under the big funny bed".split(),
"it is sunny today".split(),
]
y_pred = [
"the cat was found under the bed".split(),
"it is sunny but with a hint of cloud cover".split(),
]
edit_distance_val = edit_distance(y_true, y_pred)
self.assertAlmostEqual(edit_distance_val, 5.5, delta=1e-3)
def test_tensor_input(self):
edit_distance = EditDistance()
y_true = tf.strings.split(
[
"the tiny little cat was found under the big funny bed",
"it is sunny today",
]
)
y_pred = tf.strings.split(
[
"the cat was found under the bed",
"it is sunny but with a hint of cloud cover",
]
)
edit_distance_val = edit_distance(y_true, y_pred)
self.assertAlmostEqual(edit_distance_val, 0.733, delta=1e-3)
@pytest.mark.tf_only # string model output only applies to tf.
def test_model_compile_normalize(self):
inputs = keras.Input(shape=(None,), dtype="string")
outputs = keras.layers.Identity()(inputs)
model = keras.Model(inputs, outputs)
model.compile(metrics=[EditDistance()])
y_pred = x = tf.strings.split(["the cat was found under the bed"])
y = tf.strings.split(
["the tiny little cat was found under the big funny bed"]
)
output = model.compute_metrics(x, y, y_pred, sample_weight=None)
self.assertAlmostEqual(output["edit_distance"], 0.364, delta=1e-3)
@pytest.mark.tf_only # string model output only applies to tf.
def test_model_compile_normalize_false(self):
inputs = keras.Input(shape=(None,), dtype="string")
outputs = keras.layers.Identity()(inputs)
model = keras.Model(inputs, outputs)
model.compile(metrics=[EditDistance(normalize=False)])
y_pred = x = tf.strings.split(["the cat was found under the bed"])
y = tf.strings.split(
["the tiny little cat was found under the big funny bed"]
)
output = model.compute_metrics(x, y, y_pred, sample_weight=None)
self.assertAlmostEqual(output["edit_distance"], 4.0, delta=1e-3)
def test_rank_1_tensor_input_normalize(self):
edit_distance = EditDistance()
y_true = tf.strings.split(
"the tiny little cat was found under the big funny bed"
)
y_pred = tf.strings.split("the cat was found under the bed")
edit_distance_val = edit_distance(y_true, y_pred)
self.assertAlmostEqual(edit_distance_val, 0.364, delta=1e-3)
def test_reset_state_normalize(self):
edit_distance = EditDistance()
y_true = [
"the tiny little cat was found under the big funny bed".split(),
"it is sunny today".split(),
]
y_pred = [
"the cat was found under the bed".split(),
"it is sunny but with a hint of cloud cover".split(),
]
edit_distance.update_state(y_true, y_pred)
edit_distance_val = edit_distance.result()
self.assertNotEqual(edit_distance_val, 0.0)
edit_distance.reset_state()
edit_distance_val = edit_distance.result()
self.assertEqual(edit_distance_val, 0.0)
def test_update_state_normalize(self):
edit_distance = EditDistance()
y_true_1 = [
"the tiny little cat was found under the big funny bed".split(),
"it is sunny today".split(),
]
y_pred_1 = [
"the cat was found under the bed".split(),
"it is sunny but with a hint of cloud cover".split(),
]
edit_distance.update_state(y_true_1, y_pred_1)
edit_distance_val = edit_distance.result()
self.assertAlmostEqual(edit_distance_val, 0.733, delta=1e-3)
y_true_2 = tf.strings.split(["what is your favourite show"])
y_pred_2 = tf.strings.split(["my favourite show is silicon valley"])
edit_distance.update_state(y_true_2, y_pred_2)
edit_distance_val = edit_distance.result()
self.assertAlmostEqual(edit_distance_val, 0.85, delta=1e-3)
def test_update_state_normalize_false(self):
edit_distance = EditDistance(normalize=False)
y_true_1 = [
"the tiny little cat was found under the big funny bed".split(),
"it is sunny today".split(),
]
y_pred_1 = [
"the cat was found under the bed".split(),
"it is sunny but with a hint of cloud cover".split(),
]
edit_distance.update_state(y_true_1, y_pred_1)
edit_distance_val = edit_distance.result()
self.assertAlmostEqual(edit_distance_val, 5.5, delta=1e-3)
y_true_2 = tf.strings.split(["what is your favourite show"])
y_pred_2 = tf.strings.split(["my favourite show is silicon valley"])
edit_distance.update_state(y_true_2, y_pred_2)
edit_distance_val = edit_distance.result()
self.assertAlmostEqual(edit_distance_val, 5.667, delta=1e-3)
def test_get_config(self):
rouge = EditDistance(
normalize=False,
dtype="float32",
name="edit_distance_test",
)
config = rouge.get_config()
expected_config_subset = {
"normalize": False,
}
self.assertEqual(config, {**config, **expected_config_subset})
|
keras-nlp/keras_nlp/metrics/edit_distance_test.py/0
|
{
"file_path": "keras-nlp/keras_nlp/metrics/edit_distance_test.py",
"repo_id": "keras-nlp",
"token_count": 3337
}
| 155 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from keras_nlp.models.albert.albert_masked_lm_preprocessor import (
AlbertMaskedLMPreprocessor,
)
from keras_nlp.models.albert.albert_tokenizer import AlbertTokenizer
from keras_nlp.tests.test_case import TestCase
class AlbertMaskedLMPreprocessorTest(TestCase):
def setUp(self):
self.tokenizer = AlbertTokenizer(
# Generated using create_albert_test_proto.py
proto=os.path.join(
self.get_test_data_dir(), "albert_test_vocab.spm"
)
)
self.init_kwargs = {
"tokenizer": self.tokenizer,
# Simplify our testing by masking every available token.
"mask_selection_rate": 1.0,
"mask_token_rate": 1.0,
"random_token_rate": 0.0,
"mask_selection_length": 4,
"sequence_length": 12,
}
self.input_data = ["the quick brown fox"]
def test_preprocessor_basics(self):
self.run_preprocessor_test(
cls=AlbertMaskedLMPreprocessor,
init_kwargs=self.init_kwargs,
input_data=self.input_data,
expected_output=(
{
"token_ids": [[2, 4, 4, 4, 4, 3, 0, 0, 0, 0, 0, 0]],
"segment_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
"padding_mask": [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]],
"mask_positions": [[1, 2, 3, 4]],
},
[[5, 10, 6, 8]],
[[1.0, 1.0, 1.0, 1.0]],
),
)
def test_no_masking_zero_rate(self):
no_mask_preprocessor = AlbertMaskedLMPreprocessor(
self.tokenizer,
mask_selection_rate=0.0,
mask_selection_length=4,
sequence_length=12,
)
input_data = ["the quick brown fox"]
self.assertAllClose(
no_mask_preprocessor(input_data),
(
{
"token_ids": [[2, 5, 10, 6, 8, 3, 0, 0, 0, 0, 0, 0]],
"segment_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
"padding_mask": [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]],
"mask_positions": [[0, 0, 0, 0]],
},
[[0, 0, 0, 0]],
[[0.0, 0.0, 0.0, 0.0]],
),
)
@pytest.mark.extra_large
def test_all_presets(self):
for preset in AlbertMaskedLMPreprocessor.presets:
self.run_preset_test(
cls=AlbertMaskedLMPreprocessor,
preset=preset,
input_data=self.input_data,
)
|
keras-nlp/keras_nlp/models/albert/albert_masked_lm_preprocessor_test.py/0
|
{
"file_path": "keras-nlp/keras_nlp/models/albert/albert_masked_lm_preprocessor_test.py",
"repo_id": "keras-nlp",
"token_count": 1664
}
| 156 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from keras_nlp.models.bart.bart_seq_2_seq_lm_preprocessor import (
BartSeq2SeqLMPreprocessor,
)
from keras_nlp.models.bart.bart_tokenizer import BartTokenizer
from keras_nlp.tests.test_case import TestCase
class BartSeq2SeqLMPreprocessorTest(TestCase):
def setUp(self):
self.vocab = ["<s>", "<pad>", "</s>", "air", "Ġair", "plane", "Ġat"]
self.vocab += ["port", "<mask>"]
self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)])
self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"]
self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"]
self.merges += ["Ġai r", "Ġa i", "pla ne"]
self.tokenizer = BartTokenizer(
vocabulary=self.vocab, merges=self.merges
)
self.init_kwargs = {
"tokenizer": self.tokenizer,
"encoder_sequence_length": 5,
"decoder_sequence_length": 8,
}
self.input_data = (
{
"encoder_text": [" airplane at airport"],
"decoder_text": [" airplane airport"],
},
)
def test_preprocessor_basics(self):
self.run_preprocessor_test(
cls=BartSeq2SeqLMPreprocessor,
init_kwargs=self.init_kwargs,
input_data=self.input_data,
expected_output=(
{
"encoder_token_ids": [[0, 4, 5, 6, 2]],
"encoder_padding_mask": [[1, 1, 1, 1, 1]],
"decoder_token_ids": [[2, 0, 4, 5, 4, 7, 2, 1]],
"decoder_padding_mask": [[1, 1, 1, 1, 1, 1, 1, 0]],
},
[[0, 4, 5, 4, 7, 2, 1, 1]],
[[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0]],
),
token_id_key="decoder_token_ids",
)
def test_generate_preprocess(self):
preprocessor = BartSeq2SeqLMPreprocessor(**self.init_kwargs)
input_data = {
"encoder_text": [" airplane at airport"],
"decoder_text": [" airplane airport"],
}
output = preprocessor.generate_preprocess(input_data)
self.assertAllClose(
output,
{
"encoder_token_ids": [[0, 4, 5, 6, 2]],
"encoder_padding_mask": [[1, 1, 1, 1, 1]],
"decoder_token_ids": [[2, 0, 4, 5, 4, 7, 1, 1]],
"decoder_padding_mask": [[1, 1, 1, 1, 1, 1, 0, 0]],
},
)
def test_generate_postprocess(self):
preprocessor = BartSeq2SeqLMPreprocessor(**self.init_kwargs)
input_data = {
"decoder_token_ids": [0, 4, 5, 6, 2],
"decoder_padding_mask": [1, 1, 1, 1, 1],
}
output = preprocessor.generate_postprocess(input_data)
self.assertAllEqual(output, " airplane at")
@pytest.mark.extra_large
def test_all_presets(self):
for preset in BartSeq2SeqLMPreprocessor.presets:
self.run_preset_test(
cls=BartSeq2SeqLMPreprocessor,
preset=preset,
input_data=self.input_data,
)
|
keras-nlp/keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor_test.py/0
|
{
"file_path": "keras-nlp/keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor_test.py",
"repo_id": "keras-nlp",
"token_count": 1872
}
| 157 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.models.bert.bert_presets import backbone_presets
from keras_nlp.models.bert.bert_presets import classifier_presets
from keras_nlp.tokenizers.word_piece_tokenizer import WordPieceTokenizer
from keras_nlp.utils.python_utils import classproperty
PRESET_NAMES = ", ".join(list(backbone_presets) + list(classifier_presets))
@keras_nlp_export("keras_nlp.models.BertTokenizer")
class BertTokenizer(WordPieceTokenizer):
"""A BERT tokenizer using WordPiece subword segmentation.
This tokenizer class will tokenize raw strings into integer sequences and
is based on `keras_nlp.tokenizers.WordPieceTokenizer`. Unlike the
underlying tokenizer, it will check for all special tokens needed by BERT
models and provides a `from_preset()` method to automatically download
a matching vocabulary for a BERT preset.
This tokenizer does not provide truncation or padding of inputs. It can be
combined with a `keras_nlp.models.BertPreprocessor` layer for input packing.
If input is a batch of strings (rank > 0), the layer will output a
`tf.RaggedTensor` where the last dimension of the output is ragged.
If input is a scalar string (rank == 0), the layer will output a dense
`tf.Tensor` with static shape `[None]`.
Args:
vocabulary: A list of strings or a string filename path. If
passing a list, each element of the list should be a single word
piece token string. If passing a filename, the file should be a
plain text file containing a single word piece token per line.
lowercase: If `True`, the input text will be first lowered before
tokenization.
Examples:
```python
# Unbatched input.
tokenizer = keras_nlp.models.BertTokenizer.from_preset(
"bert_base_en_uncased",
)
tokenizer("The quick brown fox jumped.")
# Batched input.
tokenizer(["The quick brown fox jumped.", "The fox slept."])
# Detokenization.
tokenizer.detokenize(tokenizer("The quick brown fox jumped."))
# Custom vocabulary.
vocab = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]
vocab += ["The", "quick", "brown", "fox", "jumped", "."]
tokenizer = keras_nlp.models.BertTokenizer(vocabulary=vocab)
tokenizer("The quick brown fox jumped.")
```
"""
def __init__(
self,
vocabulary=None,
lowercase=False,
**kwargs,
):
self.cls_token = "[CLS]"
self.sep_token = "[SEP]"
self.pad_token = "[PAD]"
self.mask_token = "[MASK]"
super().__init__(
vocabulary=vocabulary,
lowercase=lowercase,
**kwargs,
)
def set_vocabulary(self, vocabulary):
super().set_vocabulary(vocabulary)
if vocabulary is not None:
# Check for necessary special tokens.
for token in [self.cls_token, self.pad_token, self.sep_token]:
if token not in self.vocabulary:
raise ValueError(
f"Cannot find token `'{token}'` in the provided "
f"`vocabulary`. Please provide `'{token}'` in your "
"`vocabulary` or use a pretrained `vocabulary` name."
)
self.cls_token_id = self.token_to_id(self.cls_token)
self.sep_token_id = self.token_to_id(self.sep_token)
self.pad_token_id = self.token_to_id(self.pad_token)
self.mask_token_id = self.token_to_id(self.mask_token)
else:
self.cls_token_id = None
self.sep_token_id = None
self.pad_token_id = None
self.mask_token_id = None
@classproperty
def presets(cls):
return copy.deepcopy({**backbone_presets, **classifier_presets})
|
keras-nlp/keras_nlp/models/bert/bert_tokenizer.py/0
|
{
"file_path": "keras-nlp/keras_nlp/models/bert/bert_tokenizer.py",
"repo_id": "keras-nlp",
"token_count": 1752
}
| 158 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.backend import keras
from keras_nlp.layers.modeling.token_and_position_embedding import (
TokenAndPositionEmbedding,
)
from keras_nlp.layers.modeling.transformer_encoder import TransformerEncoder
from keras_nlp.models.backbone import Backbone
from keras_nlp.models.distil_bert.distil_bert_presets import backbone_presets
from keras_nlp.utils.python_utils import classproperty
def distilbert_kernel_initializer(stddev=0.02):
return keras.initializers.TruncatedNormal(stddev=stddev)
@keras_nlp_export("keras_nlp.models.DistilBertBackbone")
class DistilBertBackbone(Backbone):
"""A DistilBERT encoder network.
This network implements a bi-directional Transformer-based encoder as
described in ["DistilBERT, a distilled version of BERT: smaller, faster,
cheaper and lighter"](https://arxiv.org/abs/1910.01108). It includes the
embedding lookups and transformer layers, but not the masked language model
or classification task networks.
The default constructor gives a fully customizable, randomly initialized
DistilBERT encoder with any number of layers, heads, and embedding
dimensions. To load preset architectures and weights, use the
`from_preset()` constructor.
Disclaimer: Pre-trained models are provided on an "as is" basis, without
warranties or conditions of any kind. The underlying model is provided by a
third party and subject to a separate license, available
[here](https://github.com/huggingface/transformers).
Args:
vocabulary_size: int. The size of the token vocabulary.
num_layers: int. The number of transformer layers.
num_heads: int. The number of attention heads for each transformer.
The hidden size must be divisible by the number of attention heads.
hidden_dim: int. The size of the transformer encoding and pooler layers.
intermediate_dim: int. The output dimension of the first Dense layer in
a two-layer feedforward network for each transformer.
dropout: float. Dropout probability for the Transformer encoder.
max_sequence_length: int. The maximum sequence length that this encoder
can consume. If None, `max_sequence_length` uses the value from
sequence length. This determines the variable shape for positional
embeddings.
dtype: string or `keras.mixed_precision.DTypePolicy`. The dtype to use
for model computations and weights. Note that some computations,
such as softmax and layer normalization, will always be done at
float32 precision regardless of dtype.
Examples:
```python
input_data = {
"token_ids": np.ones(shape=(1, 12), dtype="int32"),
"padding_mask": np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]]),
}
# Pretrained DistilBERT encoder.
model = keras_nlp.models.DistilBertBackbone.from_preset(
"distil_bert_base_en_uncased"
)
model(input_data)
# Randomly initialized DistilBERT encoder with custom config.
model = keras_nlp.models.DistilBertBackbone(
vocabulary_size=30552,
num_layers=4,
num_heads=4,
hidden_dim=256,
intermediate_dim=512,
max_sequence_length=128,
)
model(input_data)
```
"""
def __init__(
self,
vocabulary_size,
num_layers,
num_heads,
hidden_dim,
intermediate_dim,
dropout=0.1,
max_sequence_length=512,
dtype=None,
**kwargs,
):
# === Layers ===
self.embeddings = TokenAndPositionEmbedding(
vocabulary_size=vocabulary_size,
sequence_length=max_sequence_length,
embedding_dim=hidden_dim,
embeddings_initializer=distilbert_kernel_initializer(),
dtype=dtype,
name="token_and_position_embedding",
)
# Keep the token_embedding property for consistency across models.
self.token_embedding = self.embeddings.token_embedding
self.embeddings_layer_norm = keras.layers.LayerNormalization(
axis=-1,
epsilon=1e-12,
dtype=dtype,
name="embeddings_layer_norm",
)
self.embeddings_dropout = keras.layers.Dropout(
dropout,
dtype=dtype,
name="embeddings_dropout",
)
self.transformer_layers = []
for i in range(num_layers):
layer = TransformerEncoder(
num_heads=num_heads,
intermediate_dim=intermediate_dim,
activation="gelu",
dropout=dropout,
layer_norm_epsilon=1e-12,
kernel_initializer=distilbert_kernel_initializer(),
dtype=dtype,
name=f"transformer_layer_{i}",
)
self.transformer_layers.append(layer)
# === Functional Model ===
token_id_input = keras.Input(
shape=(None,), dtype="int32", name="token_ids"
)
padding_mask_input = keras.Input(
shape=(None,), dtype="int32", name="padding_mask"
)
x = self.embeddings(token_id_input)
x = self.embeddings_layer_norm(x)
x = self.embeddings_dropout(x)
for transformer_layer in self.transformer_layers:
x = transformer_layer(x, padding_mask=padding_mask_input)
super().__init__(
inputs={
"token_ids": token_id_input,
"padding_mask": padding_mask_input,
},
outputs=x,
**kwargs,
)
# === Config ===
self.vocabulary_size = vocabulary_size
self.num_layers = num_layers
self.num_heads = num_heads
self.hidden_dim = hidden_dim
self.intermediate_dim = intermediate_dim
self.dropout = dropout
self.max_sequence_length = max_sequence_length
self.cls_token_index = 0
def get_config(self):
config = super().get_config()
config.update(
{
"vocabulary_size": self.vocabulary_size,
"num_layers": self.num_layers,
"num_heads": self.num_heads,
"hidden_dim": self.hidden_dim,
"intermediate_dim": self.intermediate_dim,
"dropout": self.dropout,
"max_sequence_length": self.max_sequence_length,
}
)
return config
@classproperty
def presets(cls):
return copy.deepcopy(backbone_presets)
|
keras-nlp/keras_nlp/models/distil_bert/distil_bert_backbone.py/0
|
{
"file_path": "keras-nlp/keras_nlp/models/distil_bert/distil_bert_backbone.py",
"repo_id": "keras-nlp",
"token_count": 3065
}
| 159 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.tokenizers import WordPieceTokenizer
@keras_nlp_export("keras_nlp.models.ElectraTokenizer")
class ElectraTokenizer(WordPieceTokenizer):
"""A ELECTRA tokenizer using WordPiece subword segmentation.
This tokenizer class will tokenize raw strings into integer sequences and
is based on `keras_nlp.tokenizers.WordPieceTokenizer`.
If input is a batch of strings (rank > 0), the layer will output a
`tf.RaggedTensor` where the last dimension of the output is ragged.
If input is a scalar string (rank == 0), the layer will output a dense
`tf.Tensor` with static shape `[None]`.
Args:
vocabulary: A list of strings or a string filename path. If
passing a list, each element of the list should be a single word
piece token string. If passing a filename, the file should be a
plain text file containing a single word piece token per line.
lowercase: If `True`, the input text will be first lowered before
tokenization.
Examples:
```python
# Custom Vocabulary.
vocab = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]
vocab += ["The", "quick", "brown", "fox", "jumped", "."]
# Instantiate the tokenizer.
tokenizer = keras_nlp.models.ElectraTokenizer(vocabulary=vocab)
# Unbatched input.
tokenizer("The quick brown fox jumped.")
# Batched input.
tokenizer(["The quick brown fox jumped.", "The fox slept."])
# Detokenization.
tokenizer.detokenize(tokenizer("The quick brown fox jumped."))
```
"""
def __init__(self, vocabulary, lowercase=False, **kwargs):
self.cls_token = "[CLS]"
self.sep_token = "[SEP]"
self.pad_token = "[PAD]"
self.mask_token = "[MASK]"
super().__init__(vocabulary=vocabulary, lowercase=lowercase, **kwargs)
def set_vocabulary(self, vocabulary):
super().set_vocabulary(vocabulary)
if vocabulary is not None:
# Check for necessary special tokens.
for token in [self.cls_token, self.pad_token, self.sep_token]:
if token not in self.vocabulary:
raise ValueError(
f"Cannot find token `'{token}'` in the provided "
f"`vocabulary`. Please provide `'{token}'` in your "
"`vocabulary` or use a pretrained `vocabulary` name."
)
self.cls_token_id = self.token_to_id(self.cls_token)
self.sep_token_id = self.token_to_id(self.sep_token)
self.pad_token_id = self.token_to_id(self.pad_token)
self.mask_token_id = self.token_to_id(self.mask_token)
else:
self.cls_token_id = None
self.sep_token_id = None
self.pad_token_id = None
self.mask_token_id = None
|
keras-nlp/keras_nlp/models/electra/electra_tokenizer.py/0
|
{
"file_path": "keras-nlp/keras_nlp/models/electra/electra_tokenizer.py",
"repo_id": "keras-nlp",
"token_count": 1378
}
| 160 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import tensorflow as tf
import tree
from keras_nlp.backend import config
from keras_nlp.backend import keras
from keras_nlp.backend import ops
from keras_nlp.models.task import Task
from keras_nlp.samplers.serialization import get as get_sampler
from keras_nlp.utils.tensor_utils import tensor_to_list
@keras.saving.register_keras_serializable(package="keras_nlp")
class GenerativeTask(Task):
"""Base class for Generative Task models."""
def compile(
self,
*args,
run_eagerly=False,
jit_compile=True,
sampler="top_k",
**kwargs,
):
xla_compatible = True
super().compile(
*args,
run_eagerly=run_eagerly,
# Only `jit_compile` if not eager and in a compatible environment.
jit_compile=jit_compile and xla_compatible and not run_eagerly,
**kwargs,
)
self._sampler = get_sampler(sampler)
# Clear the compiled generate function.
self.generate_function = None
def generate_step(self):
"""Run generation on a single batch of input."""
raise NotImplementedError
def make_generate_function(self):
"""Create or return the compiled generation function."""
if self.generate_function is not None:
return self.generate_function
self.generate_function = self.generate_step
if config.backend() == "torch":
import torch
def wrapped_generate_function(
inputs,
end_token_id=None,
):
with torch.no_grad():
return self.generate_step(inputs, end_token_id)
self.generate_function = wrapped_generate_function
elif config.backend() == "tensorflow" and not self.run_eagerly:
# `jit_compile` is a property of keras.Model after TF 2.12.
# Use `getattr()` for backwards compatibility.
jit_compile = getattr(self, "jit_compile", True)
self.generate_function = tf.function(
self.generate_step, jit_compile=jit_compile
)
elif config.backend() == "jax" and not self.run_eagerly:
import jax
@jax.jit
def compiled_generate_function(inputs, end_token_id, state):
(
sampler_variables,
trainable_variables,
non_trainable_variables,
) = state
mapping = itertools.chain(
zip(self._sampler.variables, sampler_variables),
zip(self.trainable_variables, trainable_variables),
zip(self.non_trainable_variables, non_trainable_variables),
)
with keras.StatelessScope(state_mapping=mapping) as scope:
outputs = self.generate_step(inputs, end_token_id)
# Get updated sampler variables from the stateless scope.
sampler_variables = []
for v in self._sampler.variables:
new_v = scope.get_current_value(v)
sampler_variables.append(new_v if new_v is not None else v)
return outputs, sampler_variables
def wrapped_generate_function(
inputs,
end_token_id=None,
):
# Create an explicit tuple of all variable state.
state = (
self._sampler.variables,
# Use the explicit variable.value to preserve the
# sharding spec of distribution.
[v.value for v in self.trainable_variables],
[v.value for v in self.non_trainable_variables],
)
inputs = tree.map_structure(ops.convert_to_tensor, inputs)
outputs, sampler_variables = compiled_generate_function(
inputs,
end_token_id,
state,
)
# Only assign the sampler variables (random seeds), as other
# model variables should never be updated in generation.
for ref_v, v in zip(self._sampler.variables, sampler_variables):
ref_v.assign(v)
return outputs
self.generate_function = wrapped_generate_function
return self.generate_function
def _normalize_generate_inputs(
self,
inputs,
):
"""Normalize user input to the generate function.
This function coverts all inputs to tensors, adds a batch dimension if
necessary, and returns a iterable "dataset like" object (either an
actual `tf.data.Dataset` or a list with a single batch element).
"""
input_is_scalar = False
if isinstance(inputs, tf.data.Dataset):
return inputs, input_is_scalar
def normalize(x):
x_is_scalar = False
if isinstance(x, str) or isinstance(x, list):
x = tf.convert_to_tensor(x)
if isinstance(x, tf.Tensor) and x.shape.rank == 0:
x_is_scalar = True
x = x[tf.newaxis]
return x, x_is_scalar
if isinstance(inputs, dict):
for key in inputs:
inputs[key], input_is_scalar = normalize(inputs[key])
else:
inputs, input_is_scalar = normalize(inputs)
# We avoid converting to a dataset purely for speed, for a single batch
# of input, creating a dataset would add significant overhead.
return [inputs], input_is_scalar
def _normalize_generate_outputs(
self,
outputs,
input_is_scalar,
):
"""Normalize user output from the generate function.
This function converts all output to numpy (for integer output), or
python strings (for string output). If a batch dimension was added to
the input, it is removed from the output (so generate can be string in,
string out).
"""
def normalize(x):
if isinstance(x[0], list):
outputs = []
for batch in x:
for e in batch:
outputs.append(e)
return outputs[0] if input_is_scalar else outputs
if isinstance(x[0], tf.Tensor) and x[0].dtype == tf.string:
outputs = tf.concat(x, axis=0)
outputs = tf.squeeze(outputs, 0) if input_is_scalar else outputs
return tensor_to_list(outputs)
outputs = ops.concatenate(x, axis=0)
outputs = ops.squeeze(outputs, 0) if input_is_scalar else outputs
return ops.convert_to_numpy(outputs)
if isinstance(outputs[0], dict):
normalized = {}
for key in outputs[0]:
normalized[key] = normalize([x[key] for x in outputs])
return normalized
return normalize([x for x in outputs])
def generate(
self,
inputs,
max_length=None,
):
"""Generate text given prompt `inputs`.
This method generates text based on given `inputs`. The sampling method
used for generation can be set via the `compile()` method.
If `inputs` are a `tf.data.Dataset`, outputs will be generated
"batch-by-batch" and concatenated. Otherwise, all inputs will be handled
as a single batch.
If a `preprocessor` is attached to the model, `inputs` will be
preprocessed inside the `generate()` function and should match the
structure expected by the `preprocessor` layer (usually raw strings).
If a `preprocessor` is not attached, inputs should match the structure
expected by the `backbone`. See the example usage above for a
demonstration of each.
Args:
inputs: python data, tensor data, or a `tf.data.Dataset`. If a
`preprocessor` is attached to the model, `inputs` should match
the structure expected by the `preprocessor` layer. If a
`preprocessor` is not attached, `inputs` should match the
structure expected the `backbone` model.
max_length: Optional. int. The max length of the generated sequence.
Will default to the max configured `sequence_length` of the
`preprocessor`. If `preprocessor` is `None`, `inputs` should be
should be padded to the desired maximum length and this argument
will be ignored.
"""
# Setup our three main passes.
# 1. Optionally preprocessing strings to dense integer tensors.
# 2. Generate new tokens via a compiled function on dense tensors.
# 3. Optionally postprocess dense integer tensors back to string.
generate_function = self.make_generate_function()
end_token_id = None
if self.preprocessor is not None:
end_token_id = self.preprocessor.tokenizer.end_token_id
def preprocess(x):
return self.preprocessor.generate_preprocess(
x, sequence_length=max_length
)
def generate(x):
return generate_function(x, end_token_id=end_token_id)
def postprocess(x):
return self.preprocessor.generate_postprocess(x)
# Normalize inputs, apply our three passes, and normalize outputs.
inputs, input_is_scalar = self._normalize_generate_inputs(inputs)
if self.preprocessor is not None:
if isinstance(inputs, tf.data.Dataset):
inputs = inputs.map(preprocess, tf.data.AUTOTUNE)
inputs = inputs.prefetch(tf.data.AUTOTUNE)
else:
# Fast path for non-dataset, single-batch input.
inputs = [preprocess(x) for x in inputs]
outputs = [generate(x) for x in inputs]
if self.preprocessor is not None:
outputs = [postprocess(x) for x in outputs]
return self._normalize_generate_outputs(outputs, input_is_scalar)
|
keras-nlp/keras_nlp/models/generative_task.py/0
|
{
"file_path": "keras-nlp/keras_nlp/models/generative_task.py",
"repo_id": "keras-nlp",
"token_count": 4875
}
| 161 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.tokenizers.sentence_piece_tokenizer import SentencePieceTokenizer
@keras_nlp_export("keras_nlp.models.LlamaTokenizer")
class LlamaTokenizer(SentencePieceTokenizer):
"""Llama tokenizer layer based on SentencePiece.
This tokenizer class will tokenize raw strings into integer sequences and
is based on `keras_nlp.tokenizers.SentencePieceTokenizer`. Unlike the
underlying tokenizer, it will check for all special tokens needed by
Llama models and provides a `from_preset()` method to automatically
download a matching vocabulary for a Llama preset.
This tokenizer does not provide truncation or padding of inputs. It can be
combined with a `keras_nlp.models.LlamaPreprocessor` layer for input
packing.
If input is a batch of strings (rank > 0), the layer will output a
`tf.RaggedTensor` where the last dimension of the output is ragged.
If input is a scalar string (rank == 0), the layer will output a dense
`tf.Tensor` with static shape `[None]`.
Args:
proto: Either a `string` path to a SentencePiece proto file, or a
`bytes` object with a serialized SentencePiece proto. See the
[SentencePiece repository](https://github.com/google/sentencepiece)
for more details on the format.
Examples:
```python
# Unbatched input.
tokenizer = keras_nlp.models.LlamaTokenizer.from_preset(
"llama_7b_en",
)
tokenizer("The quick brown fox jumped.")
# Batched input.
tokenizer(["The quick brown fox jumped.", "The fox slept."])
# Detokenization.
tokenizer.detokenize(tokenizer("The quick brown fox jumped."))
```
"""
def __init__(self, proto, **kwargs):
self.start_token = "<s>"
self.end_token = "</s>"
super().__init__(proto=proto, **kwargs)
def set_proto(self, proto):
super().set_proto(proto)
if proto is not None:
for token in [self.start_token, self.end_token]:
if token not in self.get_vocabulary():
raise ValueError(
f"Cannot find token `'{token}'` in the provided "
f"`vocabulary`. Please provide `'{token}'` in your "
"`vocabulary` or use a pretrained `vocabulary` name."
)
self.start_token_id = self.token_to_id(self.start_token)
self.end_token_id = self.token_to_id(self.end_token)
self.pad_token_id = 0
else:
self.start_token_id = None
self.end_token_id = None
self.pad_token_id = None
|
keras-nlp/keras_nlp/models/llama/llama_tokenizer.py/0
|
{
"file_path": "keras-nlp/keras_nlp/models/llama/llama_tokenizer.py",
"repo_id": "keras-nlp",
"token_count": 1256
}
| 162 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_nlp.backend import keras
from keras_nlp.backend import ops
from keras_nlp.layers.modeling.transformer_layer_utils import (
compute_causal_mask,
)
from keras_nlp.layers.modeling.transformer_layer_utils import (
merge_padding_and_attention_mask,
)
from keras_nlp.models.mistral.mistral_attention import CachedMistralAttention
from keras_nlp.models.mistral.mistral_layer_norm import (
MistralLayerNormalization,
)
from keras_nlp.utils.keras_utils import clone_initializer
class MistralTransformerDecoder(keras.layers.Layer):
"""A Transformer decoder layer for the Mistral backbone."""
def __init__(
self,
intermediate_dim,
num_query_heads,
num_key_value_heads,
rope_max_wavelength=10000,
rope_scaling_factor=1.0,
activation="silu",
layer_norm_epsilon=1e-5,
kernel_initializer="glorot_uniform",
sliding_window=512,
dropout=0,
**kwargs,
):
super().__init__(**kwargs)
self.intermediate_dim = intermediate_dim
self.num_query_heads = num_query_heads
self.num_key_value_heads = num_key_value_heads
self.rope_max_wavelength = rope_max_wavelength
self.rope_scaling_factor = rope_scaling_factor
self.dropout = dropout
self.sliding_window = sliding_window
self.activation = keras.activations.get(activation)
self.layer_norm_epsilon = layer_norm_epsilon
self.kernel_initializer = keras.initializers.get(kernel_initializer)
self.supports_masking = True
def build(self, decoder_sequence_shape):
self._decoder_sequence_shape = decoder_sequence_shape
self.hidden_dim = decoder_sequence_shape[-1]
# Self attention layer.
self._self_attention_layer = CachedMistralAttention(
num_query_heads=self.num_query_heads,
num_key_value_heads=self.num_key_value_heads,
rope_max_wavelength=self.rope_max_wavelength,
rope_scaling_factor=self.rope_scaling_factor,
sliding_window=self.sliding_window,
kernel_initializer=clone_initializer(self.kernel_initializer),
dropout=self.dropout,
dtype=self.dtype_policy,
name="self_attention",
)
self._self_attention_layer.build(decoder_sequence_shape)
self._self_attention_layernorm = MistralLayerNormalization(
epsilon=self.layer_norm_epsilon,
dtype=self.dtype_policy,
name="self_attention_layernorm",
)
self._self_attention_layernorm.build(decoder_sequence_shape)
self._self_attention_dropout = keras.layers.Dropout(
rate=self.dropout,
dtype=self.dtype_policy,
name="self_attention_dropout",
)
# Feedforward layers.
self._feedforward_intermediate_dense = keras.layers.Dense(
self.intermediate_dim,
kernel_initializer=clone_initializer(self.kernel_initializer),
use_bias=False,
dtype=self.dtype_policy,
name="feedforward_intermediate_dense",
)
self._feedforward_intermediate_dense.build(decoder_sequence_shape)
self._feedforward_gate_dense = keras.layers.Dense(
self.intermediate_dim,
activation=self.activation,
kernel_initializer=clone_initializer(self.kernel_initializer),
use_bias=False,
dtype=self.dtype_policy,
name="feedforward_gate_dense",
)
self._feedforward_gate_dense.build(decoder_sequence_shape)
self._feedforward_output_dense = keras.layers.Dense(
self.hidden_dim,
kernel_initializer=clone_initializer(self.kernel_initializer),
use_bias=False,
dtype=self.dtype_policy,
name="feedforward_output_dense",
)
self._feedforward_output_dense.build(
self._feedforward_gate_dense.compute_output_shape(
decoder_sequence_shape
)
)
self._feedforward_layernorm = MistralLayerNormalization(
epsilon=self.layer_norm_epsilon,
dtype=self.dtype_policy,
name="feedforward_layernorm",
)
self._feedforward_layernorm.build(decoder_sequence_shape)
self.built = True
def call(
self,
decoder_sequence,
decoder_padding_mask=None,
decoder_attention_mask=None,
self_attention_cache=None,
self_attention_cache_update_index=None,
training=None,
):
self_attention_mask = self._compute_self_attention_mask(
decoder_sequence=decoder_sequence,
decoder_padding_mask=decoder_padding_mask,
decoder_attention_mask=decoder_attention_mask,
self_attention_cache=self_attention_cache,
self_attention_cache_update_index=self_attention_cache_update_index,
)
residual = decoder_sequence
x = self._self_attention_layernorm(decoder_sequence)
# Self attention block.
x = self._self_attention_layer(
hidden_states=x,
attention_mask=self_attention_mask,
cache=self_attention_cache,
cache_update_index=self_attention_cache_update_index,
)
if self_attention_cache is not None:
x, self_attention_cache = x
x = self._self_attention_dropout(x, training=training)
x = x + residual
residual = x
x = self._feedforward_layernorm(x)
gate_output = self._feedforward_gate_dense(x)
x = self._feedforward_intermediate_dense(x)
x = self._feedforward_output_dense(ops.multiply(x, gate_output))
decoder_output = x + residual
if self_attention_cache is not None:
return decoder_output, self_attention_cache
return decoder_output
def _compute_self_attention_mask(
self,
decoder_sequence,
decoder_padding_mask,
decoder_attention_mask,
self_attention_cache,
self_attention_cache_update_index,
):
decoder_mask = merge_padding_and_attention_mask(
decoder_sequence, decoder_padding_mask, decoder_attention_mask
)
batch_size = ops.shape(decoder_sequence)[0]
input_length = output_length = ops.shape(decoder_sequence)[1]
# We need to handle a rectangular causal mask when doing cached
# decoding. For generative inference, `decoder_sequence` will
# generally be length 1, and `cache` will be the full generation length.
if self_attention_cache is not None:
input_length = ops.shape(self_attention_cache)[2]
cache_update_index = (
0
if self_attention_cache_update_index is None
else self_attention_cache_update_index
)
# Mistral uses a banded attention mask
causal_mask_lower = compute_causal_mask(
batch_size, input_length, output_length, cache_update_index
)
# Below is a workaround for `ops.triu` for Keras 2.
# TODO(tirthasheshpatel): Use `ops.triu` once Keras 2 support is removed.
# causal_mask = ops.triu(causal_mask_lower, k=-self.sliding_window)
i = ops.arange(output_length)[:, None] + cache_update_index
j = ops.arange(input_length)[None, :]
causal_mask_upper = ops.cast(i < j + self.sliding_window, "int32")
causal_mask = ops.minimum(causal_mask_lower, causal_mask_upper)
return (
ops.minimum(decoder_mask, causal_mask)
if decoder_mask is not None
else causal_mask
)
def compute_output_shape(self, decoder_sequence_shape):
return decoder_sequence_shape
def get_config(self):
config = super().get_config()
config.update(
{
"intermediate_dim": self.intermediate_dim,
"num_query_heads": self.num_query_heads,
"rope_max_wavelength": self.rope_max_wavelength,
"rope_scaling_factor": self.rope_scaling_factor,
"num_key_value_heads": self.num_key_value_heads,
"sliding_window": self.sliding_window,
"activation": keras.activations.serialize(self.activation),
"layer_norm_epsilon": self.layer_norm_epsilon,
"kernel_initializer": keras.initializers.serialize(
self.kernel_initializer
),
"dropout": self.dropout,
}
)
return config
|
keras-nlp/keras_nlp/models/mistral/mistral_transformer_decoder.py/0
|
{
"file_path": "keras-nlp/keras_nlp/models/mistral/mistral_transformer_decoder.py",
"repo_id": "keras-nlp",
"token_count": 4161
}
| 163 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_nlp.backend import keras
from keras_nlp.backend import ops
class T5MultiHeadAttention(keras.layers.Layer):
# This layer is adapted from Hugging Face
# Ref: https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_tf_t5.py
def __init__(
self,
is_decoder,
hidden_dim,
key_value_dim,
num_heads,
dropout,
use_relative_attention_bias=False,
**kwargs,
):
super().__init__(**kwargs)
self.is_decoder = is_decoder
self.hidden_dim = hidden_dim
self.key_value_dim = key_value_dim
self.num_heads = num_heads
self.use_relative_attention_bias = use_relative_attention_bias
self.inner_dim = self.num_heads * self.key_value_dim
self.relative_attention_buckets = 32
self.relative_attention_max_distance = 128
self.query_projector = keras.layers.Dense(
self.inner_dim,
use_bias=False,
kernel_initializer=keras.initializers.RandomNormal(
mean=0, stddev=(self.inner_dim * self.key_value_dim) ** -0.5
),
dtype=self.dtype_policy,
name="query_projector",
)
self.key_projector = keras.layers.Dense(
self.inner_dim,
use_bias=False,
kernel_initializer=keras.initializers.RandomNormal(
mean=0, stddev=self.inner_dim**-0.5
),
dtype=self.dtype_policy,
name="key_projector",
)
self.value_projector = keras.layers.Dense(
self.inner_dim,
use_bias=False,
kernel_initializer=keras.initializers.RandomNormal(
mean=0, stddev=self.inner_dim**-0.5
),
dtype=self.dtype_policy,
name="value_projector",
)
self.output_projector = keras.layers.Dense(
self.hidden_dim,
use_bias=False,
kernel_initializer=keras.initializers.RandomNormal(
mean=0, stddev=self.inner_dim**-0.5
),
dtype=self.dtype_policy,
name="output_projector",
)
self.dropout_layer = keras.layers.Dropout(
dropout,
dtype=self.dtype_policy,
)
if self.use_relative_attention_bias:
self.relative_attention_bias = self.add_weight(
name="embeddings",
shape=[self.relative_attention_buckets, self.num_heads],
initializer=keras.initializers.RandomNormal(
mean=0, stddev=self.inner_dim**-0.5
),
)
@staticmethod
def _relative_position_bucket(
relative_position, bidirectional=True, num_buckets=32, max_distance=128
):
"""Adapted from Mesh Tensorflow.
Translate relative position to a bucket number for relative attention.
The relative position is defined as memory_position - query_position,
i.e. the distance in tokens from the attending position to the
attended-to position. If bidirectional=False, then positive relative
positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute
relative_positions. All relative positions >= max_distance map to
the same bucket. All relative positions <= -max_distance map to
the same bucket. This should allow for more graceful generalization to
longer sequences than the model has been trained on.
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
Tensor with the same shape as relative_position,
containing int32 values in the range [0, num_buckets)
"""
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets += (
ops.cast(
ops.greater(relative_position, 0),
dtype=relative_position.dtype,
)
* num_buckets
)
relative_position = ops.abs(relative_position)
else:
relative_position = -ops.minimum(relative_position, 0)
# now n is in the range [0, inf)
max_exact = num_buckets // 2
is_small = ops.less(relative_position, max_exact)
relative_position_if_large = max_exact + ops.cast(
ops.log(
ops.cast(relative_position, "float32")
/ ops.cast(max_exact, "float32")
)
/ ops.cast(ops.log(max_distance / max_exact), "float32")
* (num_buckets - max_exact),
dtype=relative_position.dtype,
)
relative_position_if_large = ops.minimum(
relative_position_if_large, num_buckets - 1
)
relative_buckets += ops.where(
is_small, relative_position, relative_position_if_large
)
return relative_buckets
def compute_bias(self, query_length, key_length):
"""Compute binned relative position bias"""
context_position = ops.arange(query_length)[:, None]
memory_position = ops.arange(key_length)[None, :]
relative_position = (
memory_position - context_position
) # shape (query_length, key_length)
relative_position_bucket = self._relative_position_bucket(
relative_position,
bidirectional=(not self.is_decoder),
num_buckets=self.relative_attention_buckets,
max_distance=self.relative_attention_max_distance,
)
values = ops.take(
self.relative_attention_bias, relative_position_bucket, axis=0
) # shape (query_length, key_length, num_heads)
values = ops.expand_dims(
ops.transpose(values, axes=(2, 0, 1)), axis=0
) # shape (1, num_heads, query_length, key_length)
return values
def call(
self,
hidden_states,
mask=None,
key_value_states=None,
position_bias=None,
past_key_value=None,
layer_head_mask=None,
query_length=None,
training=False,
):
# Input is (batch_size, query_length, dim)
# past_key_value[0] is (batch_size, num_heads, q_len - 1, dim_per_head)
batch_size, seq_length = ops.shape(hidden_states)[:2]
real_seq_length = seq_length
if past_key_value is not None:
if len(past_key_value) != 2:
raise ValueError(
f"Argument `past_key_value` should have 2 past states: "
f"keys and values. Got {len(past_key_value)} past states."
)
real_seq_length += (
ops.shape(past_key_value[0])[2]
if query_length is None
else query_length
)
key_length = (
real_seq_length
if key_value_states is None
else ops.shape(key_value_states)[1]
)
def shape(hidden_states):
return ops.transpose(
ops.reshape(
hidden_states,
(batch_size, -1, self.num_heads, self.key_value_dim),
),
axes=(0, 2, 1, 3),
)
def unshape(hidden_states):
return ops.reshape(
ops.transpose(hidden_states, axes=(0, 2, 1, 3)),
(batch_size, -1, self.inner_dim),
)
def project(
hidden_states, proj_layer, key_value_states, past_key_value
):
"""projects hidden states correctly to key/query states"""
if key_value_states is None:
# self-attention
# (batch_size, num_heads, seq_length, dim_per_head)
hidden_states = shape(proj_layer(hidden_states))
elif past_key_value is None:
# cross-attention
# (batch_size, num_heads, seq_length, dim_per_head)
hidden_states = shape(proj_layer(key_value_states))
if past_key_value is not None:
if key_value_states is None:
# self-attention
# (batch_size, num_heads, key_length, dim_per_head)
hidden_states = ops.concat(
[past_key_value, hidden_states], axis=2
)
else:
# cross-attention
hidden_states = past_key_value
return hidden_states
# get query
query_states = shape(
self.query_projector(hidden_states)
) # (batch_size, num_heads, query_length, dim_per_head)
# get key/value
key_states = project(
hidden_states,
self.key_projector,
key_value_states,
past_key_value[0] if past_key_value is not None else None,
)
value_states = project(
hidden_states,
self.value_projector,
key_value_states,
past_key_value[1] if past_key_value is not None else None,
)
scores = ops.einsum(
"bnqd,bnkd->bnqk", query_states, key_states
) # (batch_size, num_heads, query_length, key_length)
if position_bias is None:
if not self.use_relative_attention_bias:
position_bias = ops.zeros(
(1, self.num_heads, real_seq_length, key_length),
self.compute_dtype,
)
else:
position_bias = self.compute_bias(real_seq_length, key_length)
# if key and values are already calculated we want only
# the last query position bias
if past_key_value is not None:
if not self.use_relative_attention_bias:
position_bias = position_bias[:, :, -seq_length:, :]
else:
# we might have a padded past structure,
# in which case we want to fetch the position bias slice
# right after the most recently filled past index
most_recently_filled_past_index = ops.amax(
ops.where(past_key_value[0][0, 0, :, 0] != 0.0)
)
position_bias = ops.slice(
position_bias,
(0, 0, most_recently_filled_past_index + 1, 0),
(1, self.num_heads, seq_length, real_seq_length),
)
if mask is not None:
# Add a new mask axis for the head dim.
mask = mask[:, np.newaxis, :, :]
# Add a very large negative position bias for masked positions.
mask = (1.0 - ops.cast(mask, position_bias.dtype)) * -1e9
position_bias = position_bias + mask
scores += ops.cast(position_bias, scores.dtype)
weights = ops.nn.softmax(
scores, axis=-1
) # (batch_size, num_heads, query_length, key_length)
weights = self.dropout_layer(
weights, training=training
) # (batch_size, num_heads, query_length, key_length)
# Optionally mask heads
if layer_head_mask is not None:
weights = ops.reshape(layer_head_mask, (1, -1, 1, 1)) * weights
attention_output = ops.matmul(
weights, value_states
) # (batch_size, num_heads, query_length, dim_per_head)
attention_output = self.output_projector(unshape(attention_output))
return (attention_output, position_bias)
|
keras-nlp/keras_nlp/models/t5/t5_multi_head_attention.py/0
|
{
"file_path": "keras-nlp/keras_nlp/models/t5/t5_multi_head_attention.py",
"repo_id": "keras-nlp",
"token_count": 6148
}
| 164 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_nlp.models.whisper.whisper_audio_feature_extractor import (
WhisperAudioFeatureExtractor,
)
from keras_nlp.models.whisper.whisper_preprocessor import WhisperPreprocessor
from keras_nlp.models.whisper.whisper_tokenizer import WhisperTokenizer
from keras_nlp.tests.test_case import TestCase
class WhisperPreprocessorTest(TestCase):
def setUp(self):
self.audio_feature_extractor = WhisperAudioFeatureExtractor(
num_mels=80,
num_fft_bins=400,
stride=100,
sampling_rate=100,
max_audio_length=5,
)
self.vocab = ["air", "Ġair", "plane", "Ġat", "port"]
self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)])
self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"]
self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"]
self.merges += ["Ġai r", "Ġa i", "pla ne"]
self.special_tokens = {
"<|startoftranscript|>": 9,
"<|endoftext|>": 10,
"<|notimestamps|>": 11,
"<|transcribe|>": 12,
"<|translate|>": 13,
}
self.language_tokens = {
"<|en|>": 14,
"<|fr|>": 15,
}
self.tokenizer = WhisperTokenizer(
vocabulary=self.vocab,
merges=self.merges,
special_tokens=self.special_tokens,
language_tokens=self.language_tokens,
)
self.init_kwargs = {
"audio_feature_extractor": self.audio_feature_extractor,
"tokenizer": self.tokenizer,
"decoder_sequence_length": 12,
"language": "<|en|>",
"task": "translate",
}
self.input_data = {
"encoder_audio": np.ones((2, 200)),
"decoder_text": [" airplane at airport", " airplane at"],
}
def test_feature_extractor_basics(self):
self.run_preprocessor_test(
cls=WhisperPreprocessor,
init_kwargs=self.init_kwargs,
input_data=self.input_data,
token_id_key="decoder_token_ids",
)
def test_sequence_length_override(self):
input_data = {
"encoder_audio": np.ones((200,)),
"decoder_text": " airplane at airport",
}
preprocessor = WhisperPreprocessor(**self.init_kwargs)
x = preprocessor(input_data, decoder_sequence_length=6)
self.assertAllEqual(x["decoder_token_ids"], [9, 14, 13, 11, 1, 10])
|
keras-nlp/keras_nlp/models/whisper/whisper_preprocessor_test.py/0
|
{
"file_path": "keras-nlp/keras_nlp/models/whisper/whisper_preprocessor_test.py",
"repo_id": "keras-nlp",
"token_count": 1441
}
| 165 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import tensorflow as tf
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.models.xlm_roberta.xlm_roberta_presets import backbone_presets
from keras_nlp.tokenizers.sentence_piece_tokenizer import SentencePieceTokenizer
from keras_nlp.utils.python_utils import classproperty
from keras_nlp.utils.tensor_utils import tensor_to_list
@keras_nlp_export("keras_nlp.models.XLMRobertaTokenizer")
class XLMRobertaTokenizer(SentencePieceTokenizer):
"""An XLM-RoBERTa tokenizer using SentencePiece subword segmentation.
This tokenizer class will tokenize raw strings into integer sequences and
is based on `keras_nlp.tokenizers.SentencePieceTokenizer`. Unlike the
underlying tokenizer, it will check for all special tokens needed by
XLM-RoBERTa models and provides a `from_preset()` method to automatically
download a matching vocabulary for an XLM-RoBERTa preset.
Note: If you are providing your own custom SentencePiece model, the original
fairseq implementation of XLM-RoBERTa re-maps some token indices from the
underlying sentencepiece output. To preserve compatibility, we do the same
re-mapping here.
If input is a batch of strings (rank > 0), the layer will output a
`tf.RaggedTensor` where the last dimension of the output is ragged.
If input is a scalar string (rank == 0), the layer will output a dense
`tf.Tensor` with static shape `[None]`.
Args:
proto: Either a `string` path to a SentencePiece proto file or a
`bytes` object with a serialized SentencePiece proto. See the
[SentencePiece repository](https://github.com/google/sentencepiece)
for more details on the format.
Examples:
```python
tokenizer = keras_nlp.models.XLMRobertaTokenizer.from_preset(
"xlm_roberta_base_multi",
)
# Unbatched inputs.
tokenizer("the quick brown fox")
# Batched inputs.
tokenizer(["the quick brown fox", "الأرض كروية"])
# Detokenization.
tokenizer.detokenize(tokenizer("the quick brown fox"))
# Custom vocabulary
def train_sentencepiece(ds, vocab_size):
bytes_io = io.BytesIO()
sentencepiece.SentencePieceTrainer.train(
sentence_iterator=ds.as_numpy_iterator(),
model_writer=bytes_io,
vocab_size=vocab_size,
model_type="WORD",
unk_id=0,
bos_id=1,
eos_id=2,
)
return bytes_io.getvalue()
ds = tf.data.Dataset.from_tensor_slices(
["the quick brown fox", "the earth is round"]
)
proto = train_sentencepiece(ds, vocab_size=10)
tokenizer = keras_nlp.models.XLMRobertaTokenizer(proto=proto)
```
"""
def __init__(self, proto, **kwargs):
# List of special tokens.
self._vocabulary_prefix = ["<s>", "<pad>", "</s>", "<unk>"]
# IDs of special tokens.
self.start_token_id = 0 # <s>
self.pad_token_id = 1 # <pad>
self.end_token_id = 2 # </s>
self.unk_token_id = 3 # <unk>
super().__init__(proto=proto, **kwargs)
def set_proto(self, proto):
super().set_proto(proto)
if proto is not None:
self.mask_token_id = self.vocabulary_size() - 1
else:
self.mask_token_id = None
def vocabulary_size(self):
"""Get the size of the tokenizer vocabulary."""
return super().vocabulary_size() + 2
def get_vocabulary(self):
"""Get the size of the tokenizer vocabulary."""
self._check_vocabulary()
vocabulary = tensor_to_list(
self._sentence_piece.id_to_string(
tf.range(super().vocabulary_size())
)
)
return self._vocabulary_prefix + vocabulary[3:] + ["<mask>"]
def id_to_token(self, id):
"""Convert an integer id to a string token."""
self._check_vocabulary()
if id == self.mask_token_id:
return "<mask>"
if id < len(self._vocabulary_prefix) and id >= 0:
return self._vocabulary_prefix[id]
if id - 1 >= super().vocabulary_size() or id - 1 < 0:
raise ValueError(
f"`id` must be in range [0, {self.vocabulary_size() - 1}]. "
f"Received: {id}"
)
return tensor_to_list(self._sentence_piece.id_to_string(id - 1))
def token_to_id(self, token):
"""Convert a string token to an integer id."""
self._check_vocabulary()
if token in self._vocabulary_prefix:
return self._vocabulary_prefix.index(token)
spm_token_id = self._sentence_piece.string_to_id(token)
# OOV token
spm_unk_token_id = self._sentence_piece.string_to_id("<unk>")
if spm_token_id == spm_unk_token_id:
return self.unk_token_id
return int(spm_token_id.numpy()) + 1
def tokenize(self, inputs):
self._check_vocabulary()
tokens = super().tokenize(inputs)
# Correct `unk_token_id` (0 -> 3). Note that we do not correct
# `start_token_id` and `end_token_id`; they are dealt with in
# `XLMRobertaPreprocessor`.
tokens = tf.where(tf.equal(tokens, 0), self.unk_token_id - 1, tokens)
# Shift the tokens IDs right by one.
return tf.add(tokens, 1)
def detokenize(self, inputs):
self._check_vocabulary()
tokens = tf.ragged.boolean_mask(
inputs, tf.not_equal(inputs, self.mask_token_id)
)
# Shift the tokens IDs left by one.
tokens = tf.subtract(tokens, 1)
# Correct `unk_token_id`, `end_token_id`, `start_token_id`, respectively.
# Note: The `pad_token_id` is taken as 0 (`unk_token_id`) since the
# proto does not contain `pad_token_id`. This mapping of the pad token
# is done automatically by the above subtraction.
tokens = tf.where(tf.equal(tokens, self.unk_token_id - 1), 0, tokens)
tokens = tf.where(tf.equal(tokens, self.end_token_id - 1), 2, tokens)
tokens = tf.where(tf.equal(tokens, self.start_token_id - 1), 1, tokens)
# Note: Even though we map `"<s>" and `"</s>"` to the correct IDs,
# the `detokenize` method will return empty strings for these tokens.
# This is a vagary of the `sentencepiece` library.
return super().detokenize(tokens)
@classproperty
def presets(cls):
return copy.deepcopy(backbone_presets)
|
keras-nlp/keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer.py/0
|
{
"file_path": "keras-nlp/keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer.py",
"repo_id": "keras-nlp",
"token_count": 2906
}
| 166 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras_nlp.backend import ops
from keras_nlp.samplers.random_sampler import RandomSampler
from keras_nlp.tests.test_case import TestCase
class RandomSamplerTest(TestCase):
def setUp(self):
super().setUp()
# Use a simple alphabet of lowercase characters to [0, 25].
self.int_lookup = {i: chr(i + ord("a")) for i in range(26)}
self.char_lookup = {v: k for k, v in self.int_lookup.items()}
self.batch_size = 1
self.length = 12
self.vocab_size = len(self.int_lookup)
def next(prompt, cache, index):
# Dummy hidden states.
hidden_states = ops.ones([self.batch_size, 5])
# Return a distribution favoring the next char in state.
logits = ops.one_hot(cache[:, index], self.vocab_size) * 1e9
return logits, hidden_states, cache
self.next = next
self.sampler = RandomSampler(temperature=1.0)
def join_as_string(self, x):
x = ops.convert_to_numpy(x)
return ["".join([self.int_lookup[i] for i in s]) for s in x]
def test_stateless_call(self):
def next(prompt, cache, index):
# Dummy hidden states.
hidden_states = ops.ones([self.batch_size, 5])
# Return a distribution favoring the first token in the vocab.
logits = np.zeros((self.batch_size, self.vocab_size))
logits[:, 0] = 1e9
return ops.array(logits), hidden_states, cache
prompt = ops.full((self.batch_size, self.length), self.char_lookup["z"])
output = self.sampler(
next=next,
prompt=prompt,
index=5,
)
self.assertEqual(self.join_as_string(output), ["zzzzzaaaaaaa"])
def test_stateful_call(self):
cache_chars = list("sequentially")
cache = ops.array([[self.char_lookup[c] for c in cache_chars]])
prompt = ops.full((self.batch_size, self.length), self.char_lookup["z"])
output = self.sampler(
next=self.next,
prompt=prompt,
cache=cache,
)
self.assertEqual(self.join_as_string(output), ["sequentially"])
def test_temperature(self):
def next(prompt, cache, index):
# Dummy hidden states.
hidden_states = ops.ones([self.batch_size, 5])
logits = ops.arange(self.vocab_size, 0, -1, dtype="float32")
logits = ops.reshape(logits[None, :], (self.batch_size, -1))
return ops.array(logits), hidden_states, cache
prompt = ops.full((self.batch_size, self.length), self.char_lookup["z"])
output = RandomSampler(temperature=1e-5)(
next=next,
prompt=prompt,
)
self.assertAllEqual(output, ops.zeros_like(output))
def test_early_stopping(self):
cache_chars = list("sequentially")
cache = ops.array([[self.char_lookup[c] for c in cache_chars]])
prompt = ops.full((self.batch_size, self.length), self.char_lookup["z"])
output = self.sampler(
next=self.next,
prompt=prompt,
cache=cache,
end_token_id=self.char_lookup["t"],
)
self.assertEqual(self.join_as_string(output), ["sequentzzzzz"])
@parameterized.named_parameters(
("jit_compile_false", False), ("jit_compile_true", True)
)
@pytest.mark.tf_only
def test_compilation(self, jit_compile):
cache_chars = list("sequentially")
cache = ops.array([[self.char_lookup[c] for c in cache_chars]])
prompt = ops.full((self.batch_size, self.length), self.char_lookup["z"])
@tf.function(jit_compile=jit_compile)
def generate(prompt, cache):
return self.sampler(self.next, prompt=prompt, cache=cache)
output = generate(prompt, cache)
self.assertEqual(self.join_as_string(output), ["sequentially"])
|
keras-nlp/keras_nlp/samplers/random_sampler_test.py/0
|
{
"file_path": "keras-nlp/keras_nlp/samplers/random_sampler_test.py",
"repo_id": "keras-nlp",
"token_count": 1993
}
| 167 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import tensorflow as tf
from keras_nlp.tests.test_case import TestCase
from keras_nlp.tokenizers.sentence_piece_tokenizer import SentencePieceTokenizer
from keras_nlp.tokenizers.sentence_piece_tokenizer_trainer import (
compute_sentence_piece_proto,
)
class SentencePieceTokenizerTrainerTest(TestCase):
def test_dataset_input(self):
test_text = ["Ninjas and Samurais"]
expected_output = [
[5, 9, 6, 7, 11, 4, 8, 5, 4, 7, 10, 5, 12, 4, 13, 15, 14, 4, 6, 8]
]
data = tf.data.Dataset.from_tensor_slices(test_text)
proto = compute_sentence_piece_proto(data, 16)
tokenizer = SentencePieceTokenizer(proto=proto)
test_output = tokenizer(test_text)
self.assertAllEqual(expected_output, test_output)
def test_file_input(self):
test_text = "Ninja Land"
with open(os.path.join(self.get_temp_dir(), "test.txt"), "w+") as f:
f.write(test_text + "\n")
expected_output = [6, 8, 9, 5, 11, 4, 6, 7, 4, 5, 10]
proto = compute_sentence_piece_proto(
[os.path.join(self.get_temp_dir(), "test.txt")],
12,
)
tokenizer = SentencePieceTokenizer(proto=proto)
test_output = tokenizer(test_text)
self.assertAllEqual(expected_output, test_output)
def test_multiple_file_input(self):
with open(os.path.join(self.get_temp_dir(), "test1.txt"), "w+") as f:
f.write("Drifting Along\n")
with open(os.path.join(self.get_temp_dir(), "test2.txt"), "w+") as f:
f.write("Woah look there\n")
inputs = [
os.path.join(self.get_temp_dir(), "test1.txt"),
os.path.join(self.get_temp_dir(), "test2.txt"),
]
proto = compute_sentence_piece_proto(inputs, 20)
tokenizer = SentencePieceTokenizer(proto=proto)
test_text = "Woah Along"
test_output = tokenizer(test_text)
expected_output = [4, 16, 5, 17, 9, 4, 15, 12, 5, 11, 18]
self.assertAllEqual(expected_output, test_output)
def test_invalid_input(self):
test_text_invalid = {"file1": "test.txt"}
with self.assertRaisesRegex(
ValueError,
re.escape(f"Received: type(data)={type(test_text_invalid)}."),
):
compute_sentence_piece_proto(test_text_invalid, 10)
def test_lowercase(self):
inputs = tf.data.Dataset.from_tensor_slices(["Drifting Along"])
proto = compute_sentence_piece_proto(
inputs, vocabulary_size=15, lowercase=True
)
tokenizer = SentencePieceTokenizer(proto=proto)
output = inputs.map(tokenizer).take(1).get_single_element()
expected_output = [4, 8, 12, 5, 9, 14, 5, 6, 13, 4, 7, 10, 11, 6, 13]
self.assertAllEqual(expected_output, output)
def test_proto_output_file(self):
inputs = tf.data.Dataset.from_tensor_slices(["Drifting Along"])
compute_sentence_piece_proto(
inputs,
vocabulary_size=15,
proto_output_file=os.path.join(self.get_temp_dir(), "model.spm"),
)
tokenizer = SentencePieceTokenizer(
proto=os.path.join(self.get_temp_dir(), "model.spm")
)
output = inputs.map(tokenizer).take(1).get_single_element()
expected_output = [4, 8, 12, 5, 9, 14, 5, 6, 13, 4, 7, 10, 11, 6, 13]
self.assertAllEqual(expected_output, output)
|
keras-nlp/keras_nlp/tokenizers/sentence_piece_tokenizer_trainer_test.py/0
|
{
"file_path": "keras-nlp/keras_nlp/tokenizers/sentence_piece_tokenizer_trainer_test.py",
"repo_id": "keras-nlp",
"token_count": 1784
}
| 168 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities with miscellaneous python extensions."""
class classproperty(property):
"""Define a class level property."""
def __get__(self, _, owner_cls):
return self.fget(owner_cls)
def format_docstring(**replacements):
"""Format a python docstring using a dictionary of replacements.
This decorator can be placed on a function, class or method to format it's
docstring with python variables.
The decorator will replace any double bracketed variable with a kwargs
value passed to the decorator itself. For example
`@format_docstring(name="foo")` will replace any occurance of `{{name}}` in
the docstring with the string literal `foo`.
"""
def decorate(obj):
doc = obj.__doc__
# We use `str.format()` to replace variables in the docstring, but use
# double brackets, e.g. {{var}}, to mark format strings. So we need to
# to swap all double and single brackets in the source docstring.
doc = "{".join(part.replace("{", "{{") for part in doc.split("{{"))
doc = "}".join(part.replace("}", "}}") for part in doc.split("}}"))
obj.__doc__ = doc.format(**replacements)
return obj
return decorate
|
keras-nlp/keras_nlp/utils/python_utils.py/0
|
{
"file_path": "keras-nlp/keras_nlp/utils/python_utils.py",
"repo_id": "keras-nlp",
"token_count": 569
}
| 169 |
#!/bin/bash -e
base_dir=$(dirname $(dirname $0))
targets="${base_dir}/*.py ${base_dir}/examples/ ${base_dir}/keras_nlp/ ${base_dir}/tools/"
isort --sp "${base_dir}/pyproject.toml" -c ${targets}
if ! [ $? -eq 0 ]; then
echo "Please run \"./shell/format.sh\" to format the code."
exit 1
fi
flake8 --config "${base_dir}/setup.cfg" ${targets}
if ! [ $? -eq 0 ]; then
echo "Please fix the code style issue."
exit 1
fi
black --config "${base_dir}/pyproject.toml" --check ${targets}
if ! [ $? -eq 0 ]; then
echo "Please run \"./shell/format.sh\" to format the code."
exit 1
fi
for i in $(find ${targets} -name '*.py'); do
if ! grep -q Copyright $i; then
echo "Please run \"./shell/format.sh\" to format the code."
exit 1
fi
done
|
keras-nlp/shell/lint.sh/0
|
{
"file_path": "keras-nlp/shell/lint.sh",
"repo_id": "keras-nlp",
"token_count": 305
}
| 170 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import numpy as np
import requests
import tensorflow as tf
import transformers
from absl import app
from absl import flags
from checkpoint_conversion_utils import get_md5_checksum
from keras_nlp.models.deberta_v3.deberta_v3_backbone import DebertaV3Backbone
from keras_nlp.models.deberta_v3.deberta_v3_preprocessor import (
DebertaV3Preprocessor,
)
from keras_nlp.models.deberta_v3.deberta_v3_tokenizer import DebertaV3Tokenizer
PRESET_MAP = {
"deberta_v3_extra_small_en": "microsoft/deberta-v3-xsmall",
"deberta_v3_small_en": "microsoft/deberta-v3-small",
"deberta_v3_base_en": "microsoft/deberta-v3-base",
"deberta_v3_large_en": "microsoft/deberta-v3-large",
"deberta_v3_base_multi": "microsoft/mdeberta-v3-base",
}
EXTRACT_DIR = "./{}"
FLAGS = flags.FLAGS
flags.DEFINE_string(
"preset", None, f'Must be one of {",".join(PRESET_MAP.keys())}'
)
def download_files(hf_model_name):
print("-> Download original vocabulary and config.")
extract_dir = EXTRACT_DIR.format(FLAGS.preset)
if not os.path.exists(extract_dir):
os.makedirs(extract_dir)
# Config.
config_path = os.path.join(extract_dir, "config.json")
response = requests.get(
f"https://huggingface.co/{hf_model_name}/raw/main/config.json"
)
open(config_path, "wb").write(response.content)
print(f"`{config_path}`")
# Vocab.
spm_path = os.path.join(extract_dir, "spm.model")
response = requests.get(
f"https://huggingface.co/{hf_model_name}/resolve/main/spm.model"
)
open(spm_path, "wb").write(response.content)
print(f"`{spm_path}`")
def define_preprocessor(hf_model_name):
print("\n-> Define the tokenizers.")
extract_dir = EXTRACT_DIR.format(FLAGS.preset)
spm_path = os.path.join(extract_dir, "spm.model")
keras_nlp_tokenizer = DebertaV3Tokenizer(proto=spm_path)
# Avoid having padding tokens. This is because the representations of the
# padding token may be vastly different from the representations computed in
# the original model. See https://github.com/keras-team/keras/pull/16619#issuecomment-1156338394.
sequence_length = 14
if FLAGS.preset == "deberta_v3_base_multi":
sequence_length = 17
keras_nlp_preprocessor = DebertaV3Preprocessor(
keras_nlp_tokenizer, sequence_length=sequence_length
)
hf_tokenizer = transformers.AutoTokenizer.from_pretrained(hf_model_name)
print("\n-> Print MD5 checksum of the vocab files.")
print(f"`{spm_path}` md5sum: ", get_md5_checksum(spm_path))
return keras_nlp_preprocessor, hf_tokenizer
def convert_checkpoints(keras_nlp_model, hf_model):
print("\n-> Convert original weights to KerasNLP format.")
extract_dir = EXTRACT_DIR.format(FLAGS.preset)
config_path = os.path.join(extract_dir, "config.json")
# Build config.
cfg = {}
with open(config_path, "r") as pt_cfg_handler:
pt_cfg = json.load(pt_cfg_handler)
cfg["vocabulary_size"] = pt_cfg["vocab_size"]
cfg["num_layers"] = pt_cfg["num_hidden_layers"]
cfg["num_heads"] = pt_cfg["num_attention_heads"]
cfg["hidden_dim"] = pt_cfg["hidden_size"]
cfg["intermediate_dim"] = pt_cfg["intermediate_size"]
cfg["dropout"] = pt_cfg["hidden_dropout_prob"]
cfg["max_sequence_length"] = pt_cfg["max_position_embeddings"]
cfg["bucket_size"] = pt_cfg["position_buckets"]
print("Config:", cfg)
hf_wts = hf_model.state_dict()
print("Original weights:")
print(
str(hf_wts.keys())
.replace(", ", "\n")
.replace("odict_keys([", "")
.replace("]", "")
.replace(")", "")
)
keras_nlp_model.get_layer("token_embedding").embeddings.assign(
hf_wts["embeddings.word_embeddings.weight"]
)
keras_nlp_model.get_layer("embeddings_layer_norm").gamma.assign(
hf_wts["embeddings.LayerNorm.weight"]
)
keras_nlp_model.get_layer("embeddings_layer_norm").beta.assign(
hf_wts["embeddings.LayerNorm.bias"]
)
keras_nlp_model.get_layer("rel_embedding").rel_embeddings.assign(
hf_wts["encoder.rel_embeddings.weight"]
)
keras_nlp_model.get_layer("rel_embedding").layer_norm.gamma.assign(
hf_wts["encoder.LayerNorm.weight"]
)
keras_nlp_model.get_layer("rel_embedding").layer_norm.beta.assign(
hf_wts["encoder.LayerNorm.bias"]
)
for i in range(keras_nlp_model.num_layers):
# Q,K,V
keras_nlp_model.get_layer(
f"disentangled_attention_encoder_layer_{i}"
)._self_attention_layer._query_dense.kernel.assign(
hf_wts[f"encoder.layer.{i}.attention.self.query_proj.weight"]
.numpy()
.T.reshape((cfg["hidden_dim"], cfg["num_heads"], -1))
)
keras_nlp_model.get_layer(
f"disentangled_attention_encoder_layer_{i}"
)._self_attention_layer._query_dense.bias.assign(
hf_wts[f"encoder.layer.{i}.attention.self.query_proj.bias"]
.reshape((cfg["num_heads"], -1))
.numpy()
)
keras_nlp_model.get_layer(
f"disentangled_attention_encoder_layer_{i}"
)._self_attention_layer._key_dense.kernel.assign(
hf_wts[f"encoder.layer.{i}.attention.self.key_proj.weight"]
.numpy()
.T.reshape((cfg["hidden_dim"], cfg["num_heads"], -1))
)
keras_nlp_model.get_layer(
f"disentangled_attention_encoder_layer_{i}"
)._self_attention_layer._key_dense.bias.assign(
hf_wts[f"encoder.layer.{i}.attention.self.key_proj.bias"]
.reshape((cfg["num_heads"], -1))
.numpy()
)
keras_nlp_model.get_layer(
f"disentangled_attention_encoder_layer_{i}"
)._self_attention_layer._value_dense.kernel.assign(
hf_wts[f"encoder.layer.{i}.attention.self.value_proj.weight"]
.numpy()
.T.reshape((cfg["hidden_dim"], cfg["num_heads"], -1))
)
keras_nlp_model.get_layer(
f"disentangled_attention_encoder_layer_{i}"
)._self_attention_layer._value_dense.bias.assign(
hf_wts[f"encoder.layer.{i}.attention.self.value_proj.bias"]
.reshape((cfg["num_heads"], -1))
.numpy()
)
# Attn output.
keras_nlp_model.get_layer(
f"disentangled_attention_encoder_layer_{i}"
)._self_attention_layer._output_dense.kernel.assign(
hf_wts[f"encoder.layer.{i}.attention.output.dense.weight"]
.transpose(1, 0)
.numpy()
)
keras_nlp_model.get_layer(
f"disentangled_attention_encoder_layer_{i}"
)._self_attention_layer._output_dense.bias.assign(
hf_wts[f"encoder.layer.{i}.attention.output.dense.bias"].numpy()
)
keras_nlp_model.get_layer(
f"disentangled_attention_encoder_layer_{i}"
)._self_attention_layer_norm.gamma.assign(
hf_wts[
f"encoder.layer.{i}.attention.output.LayerNorm.weight"
].numpy()
)
keras_nlp_model.get_layer(
f"disentangled_attention_encoder_layer_{i}"
)._self_attention_layer_norm.beta.assign(
hf_wts[f"encoder.layer.{i}.attention.output.LayerNorm.bias"].numpy()
)
# Intermediate FF layer.
keras_nlp_model.get_layer(
f"disentangled_attention_encoder_layer_{i}"
)._feedforward_intermediate_dense.kernel.assign(
hf_wts[f"encoder.layer.{i}.intermediate.dense.weight"]
.transpose(1, 0)
.numpy()
)
keras_nlp_model.get_layer(
f"disentangled_attention_encoder_layer_{i}"
)._feedforward_intermediate_dense.bias.assign(
hf_wts[f"encoder.layer.{i}.intermediate.dense.bias"].numpy()
)
# Output FF layer.
keras_nlp_model.get_layer(
f"disentangled_attention_encoder_layer_{i}"
)._feedforward_output_dense.kernel.assign(
hf_wts[f"encoder.layer.{i}.output.dense.weight"].numpy().T
)
keras_nlp_model.get_layer(
f"disentangled_attention_encoder_layer_{i}"
)._feedforward_output_dense.bias.assign(
hf_wts[f"encoder.layer.{i}.output.dense.bias"].numpy()
)
keras_nlp_model.get_layer(
f"disentangled_attention_encoder_layer_{i}"
)._feedforward_layer_norm.gamma.assign(
hf_wts[f"encoder.layer.{i}.output.LayerNorm.weight"].numpy()
)
keras_nlp_model.get_layer(
f"disentangled_attention_encoder_layer_{i}"
)._feedforward_layer_norm.beta.assign(
hf_wts[f"encoder.layer.{i}.output.LayerNorm.bias"].numpy()
)
# Save the model.
print(f"\n-> Save KerasNLP model weights to `{FLAGS.preset}.h5`.")
keras_nlp_model.save_weights(f"{FLAGS.preset}.h5")
return keras_nlp_model
def check_output(
keras_nlp_preprocessor,
keras_nlp_model,
hf_tokenizer,
hf_model,
):
print("\n-> Check the outputs.")
sample_text = ["cricket is awesome, easily the best sport in the world!"]
# KerasNLP
keras_nlp_inputs = keras_nlp_preprocessor(tf.constant(sample_text))
keras_nlp_output = keras_nlp_model.predict(keras_nlp_inputs)
# HF
hf_inputs = hf_tokenizer(
sample_text, padding="longest", return_tensors="pt"
)
hf_output = hf_model(**hf_inputs).last_hidden_state
print("KerasNLP output:", keras_nlp_output[0, 0, :10])
print("HF output:", hf_output[0, 0, :10])
print("Difference:", np.mean(keras_nlp_output - hf_output.detach().numpy()))
# Show the MD5 checksum of the model weights.
print("Model md5sum: ", get_md5_checksum(f"./{FLAGS.preset}.h5"))
def main(_):
hf_model_name = PRESET_MAP[FLAGS.preset]
download_files(hf_model_name)
keras_nlp_preprocessor, hf_tokenizer = define_preprocessor(hf_model_name)
print("\n-> Load KerasNLP model.")
keras_nlp_model = DebertaV3Backbone.from_preset(
FLAGS.preset, load_weights=False
)
print("\n-> Load HF model.")
hf_model = transformers.AutoModel.from_pretrained(hf_model_name)
hf_model.eval()
keras_nlp_model = convert_checkpoints(keras_nlp_model, hf_model)
check_output(
keras_nlp_preprocessor,
keras_nlp_model,
hf_tokenizer,
hf_model,
)
if __name__ == "__main__":
flags.mark_flag_as_required("preset")
app.run(main)
|
keras-nlp/tools/checkpoint_conversion/convert_deberta_v3_checkpoints.py/0
|
{
"file_path": "keras-nlp/tools/checkpoint_conversion/convert_deberta_v3_checkpoints.py",
"repo_id": "keras-nlp",
"token_count": 5251
}
| 171 |
# Copyright 2024 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import transformers
from absl import app
from absl import flags
import keras_nlp
os.environ["KERAS_BACKEND"] = "torch"
"""
Sample usage:
For converting a keras model to HuggingFace format using a custom or fine-tuned
checkpoint from Keras, make sure to pass the path for the Keras weights file
(ending in `.weights.h5`), the model size (`2b` or `7b`), and the tokenizer
vocabulary file (`.spm`, `.model`, or equivalent) to
`--weights_file`, `--size`, and `--vocab_path`, respectively.
Optionally, you can specify the output directory
for the converted model at `--output_dir`. (defaults to `gg_hf`)
```
python tools/gemma/export_gemma_to_hf.py \
--weights_file fine_tuned_imdb.weights.h5 \
--size 2b \
--vocab_path gemma_lm_tokenizer/vocabulary.spm \
--output_dir fine_tuned_gg_hf
```
For converting a Keras model to HuggingFace format from a preset,
simply pass the Keras preset name to `--preset` and its model size
(`2b` or `7b`) to `--size`.
```
python tools/gemma/export_gemma_to_hf.py \
--preset gemma_2b_en \
--size 2b \
--output_dir keras_hf_model/
```
"""
PRESET_MAP = {
"gemma_2b_en": "gg-hf/gemma-2b",
"gemma_instruct_2b_en": "gg-hf/gemma-2b",
"gemma_7b_en": "gg-hf/gemma-7b",
"gemma_instruct_7b_en": "gg-hf/gemma-7b",
}
SIZE_MAP = {
"2b": ("gg-hf/gemma-2b", "gemma_2b_en"),
"7b": ("gg-hf/gemma-7b", "gemma_7b_en"),
}
gemma_2b_config = transformers.GemmaConfig(
num_hidden_layers=18,
num_attention_heads=8,
num_key_value_heads=1,
hidden_size=2048,
intermediate_size=16384,
)
gemma_7b_config = transformers.GemmaConfig()
CONFIG_MAPPING = {"2b": gemma_2b_config, "7b": gemma_7b_config}
FLAGS = flags.FLAGS
flags.DEFINE_string(
"hf_token",
None,
"Your HuggingFace token. Needed for access to the HuggingFace Gemma"
"implementation since the repository is private, for now.",
)
flags.DEFINE_string(
"preset",
None,
f'Must be one of {",".join(PRESET_MAP.keys())}'
" Alternatively, a Keras weights file (`.weights.h5`) can be passed"
" to --weights_file flag.",
)
flags.DEFINE_string(
"weights_file",
None,
"A Keras weights file (`.weights.h5`)."
" Alternatively, a model preset can be passed to --preset flag.",
)
flags.DEFINE_string(
"size",
None,
"Size of model. Must be passed if `weights_file` is passed. "
"This should be either `2b` or `7b`.",
)
flags.DEFINE_string(
"output_dir",
"gg_hf",
"An output directory for the converted HuggingFace model and tokenizer.",
)
flags.DEFINE_string(
"vocab_path",
None,
"A path containing the vocabulary (must be a `.spm` file or equivalent). "
"If not passed, the vocabulary of the preset will be used.",
)
def convert_checkpoints(preset, weights_file, size, output_dir, vocab_path):
if preset is not None:
hf_id = PRESET_MAP[preset]
print(f"\n-> Loading KerasNLP Gemma model with preset `{preset}`...")
keras_nlp_model = keras_nlp.models.GemmaCausalLM.from_preset(preset)
else:
hf_id, keras_preset = SIZE_MAP[size.lower()]
print(f"\n-> Loading Keras weights from file `{weights_file}`...")
keras_nlp_model = keras_nlp.models.GemmaCausalLM.from_preset(
keras_preset
)
keras_nlp_model.load_weights(weights_file)
print(f"\n-> Loading HuggingFace Gemma `{size.upper()}` model...")
hf_model = transformers.GemmaForCausalLM(CONFIG_MAPPING[size.lower()])
print("\n✅ Model loading complete.")
print("\n-> Converting weights from KerasNLP Gemma to HuggingFace Gemma...")
# Token embedding (with vocab size difference handling)
keras_embedding = keras_nlp_model.backbone.token_embedding.weights[0]
hf_vocab_size = hf_model.model.embed_tokens.weight.shape[0]
keras_nlp_vocab_size = keras_embedding.value.shape[0]
if hf_vocab_size < keras_nlp_vocab_size:
diff = keras_nlp_vocab_size - hf_vocab_size
update_state_dict(
hf_model.model.embed_tokens,
"weight",
keras_embedding.value[:-diff, :],
)
else:
update_state_dict(
hf_model.model.embed_tokens,
"weight",
keras_embedding.value,
)
# Decoder blocks
for i in range(keras_nlp_model.backbone.num_layers):
decoder_block = keras_nlp_model.backbone.get_layer(f"decoder_block_{i}")
# Pre-attention norm
update_state_dict(
hf_model.model.layers[i].input_layernorm,
"weight",
decoder_block.pre_attention_norm.weights[0].value,
)
# Attention
query_target_shape = hf_model.model.layers[
i
].self_attn.q_proj.weight.shape
query_tensor = decoder_block.attention.query_dense.weights[0].value
query_tensor = query_tensor.transpose(1, 2).reshape(query_target_shape)
update_state_dict(
hf_model.model.layers[i].self_attn.q_proj, "weight", query_tensor
)
key_target_shape = hf_model.model.layers[
i
].self_attn.k_proj.weight.shape
key_tensor = decoder_block.attention.key_dense.weights[0].value
key_tensor = key_tensor.transpose(1, 2).reshape(key_target_shape)
update_state_dict(
hf_model.model.layers[i].self_attn.k_proj, "weight", key_tensor
)
value_target_shape = hf_model.model.layers[
i
].self_attn.v_proj.weight.shape
value_tensor = decoder_block.attention.value_dense.weights[0].value
value_tensor = value_tensor.transpose(1, 2).reshape(value_target_shape)
update_state_dict(
hf_model.model.layers[i].self_attn.v_proj, "weight", value_tensor
)
out_target_shape = hf_model.model.layers[
i
].self_attn.o_proj.weight.shape
keras_out_tensor = decoder_block.attention.output_dense.weights[0].value
out_tensor = keras_out_tensor.reshape(
(out_target_shape[1], out_target_shape[0]) # Transpose target size
).transpose(0, 1)
update_state_dict(
hf_model.model.layers[i].self_attn.o_proj, "weight", out_tensor
)
# Post-attention norm
update_state_dict(
hf_model.model.layers[i].post_attention_layernorm,
"weight",
decoder_block.pre_ffw_norm.weights[0].value,
)
# MLP (Feed-forward)
update_state_dict(
hf_model.model.layers[i].mlp.gate_proj,
"weight",
decoder_block.gating_ffw.weights[0].value.transpose(0, 1),
)
update_state_dict(
hf_model.model.layers[i].mlp.up_proj,
"weight",
decoder_block.gating_ffw_2.weights[0].value.transpose(0, 1),
)
update_state_dict(
hf_model.model.layers[i].mlp.down_proj,
"weight",
decoder_block.ffw_linear.weights[0].value.transpose(0, 1),
)
# Final norm
update_state_dict(
hf_model.model.norm,
"weight",
keras_nlp_model.backbone.layers[-1].weights[0].value,
)
print("\n✅ Weights converted successfully.")
print(f"\n-> Saving HuggingFace model to `{output_dir}`...")
# Save model to HF Transformers format
os.makedirs(output_dir, exist_ok=True)
hf_model.save_pretrained(output_dir)
print(f"\n✅ Saving complete. Model saved at `{output_dir}`.")
# Tokenizer
if not vocab_path:
tokenizer_preset = preset or SIZE_MAP[size.lower()]
print(
"\n-> Loading KerasNLP Gemma tokenizer with "
f"preset `{tokenizer_preset}`..."
)
keras_nlp_tokenizer = keras_nlp.models.GemmaTokenizer.from_preset(
tokenizer_preset
)
# Save tokenizer state
keras_nlp_tokenizer.save_assets(output_dir)
vocab_path = os.path.join(output_dir, "vocabulary.spm")
print("\n✅ Tokenizer loading complete.")
hf_tokenizer = transformers.GemmaTokenizer(vocab_path)
print(f"\n-> Saving HuggingFace Gemma tokenizer to `{output_dir}`...")
# Save tokenizer to HF Transformers format
hf_tokenizer.save_pretrained(output_dir)
print(f"\n✅ Saving complete. Tokenizer saved at `{output_dir}`.")
def update_state_dict(layer, weight_name: str, tensor: torch.Tensor) -> None:
"""Updates the state dict for a weight given a tensor."""
assert (
tensor.shape == layer.state_dict()[weight_name].shape
), f"{tensor.shape} vs {layer.state_dict()[weight_name].shape}"
layer.state_dict()[weight_name].copy_(tensor)
def flag_error_handler():
if not FLAGS.preset and not FLAGS.weights_file:
raise ValueError(
"Please pass either a valid Keras preset to `--preset`"
" or supply a Keras weights file (`.weights.h5`) and model size"
" (`2b` or `7b`) to `--weights_file` and `--size`, respectively."
)
if FLAGS.weights_file:
if FLAGS.preset:
raise ValueError(
"Both `--preset` and `--weights_file` flags cannot be supplied "
"at the same time. Either supply a valid Keras preset to "
"`--preset`or supply a Keras `.weights.h5` file and "
"model size (`2b` or `7b`) to `--weights_file` and `--size`, "
"respectively."
)
if not str(FLAGS.weights_file).endswith(".weights.h5"):
raise ValueError(
"Please pass a valid Keras weights file ending in `.weights.h5`."
)
if not FLAGS.size:
raise ValueError(
"The `size` flag must be passed if a weights file is passed. "
"Please pass the appropriate size (`2b` or `7b`) for your "
"model to the `--size` flag."
)
if FLAGS.size.lower() not in ["2b", "7b"]:
raise ValueError(
"Invalid `size`. Please pass the appropriate size (`2b` or `7b`) "
"for your model to the `--size` flag."
)
def main(_):
flag_error_handler()
convert_checkpoints(
FLAGS.preset,
FLAGS.weights_file,
FLAGS.size,
FLAGS.output_dir,
FLAGS.vocab_path,
)
if __name__ == "__main__":
flags.mark_flag_as_required("size")
app.run(main)
|
keras-nlp/tools/gemma/export_gemma_to_hf.py/0
|
{
"file_path": "keras-nlp/tools/gemma/export_gemma_to_hf.py",
"repo_id": "keras-nlp",
"token_count": 5002
}
| 172 |
sudo: required
language: python
matrix:
include:
# check code style and Python 3.6
- python: 3.6
env: TEST_MODE=PEP8
# run tests with keras from source and Python 3.6
- python: 3.6
env: KERAS_HEAD=true
env: TEST_MODE=TESTS
# run tests with keras from PyPI and Python 3.6
- python: 3.6
env: TEST_MODE=TESTS
# run import test and Python 3.6
- python: 3.6
env: TEST_MODE=IMPORTS
before_install:
- sudo apt-get update
- wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh;
- bash miniconda.sh -b -p $HOME/miniconda
- export PATH="$HOME/miniconda/bin:$PATH"
- hash -r
- conda config --set always_yes yes --set changeps1 no
- conda update -q conda
# Useful for debugging any issues with conda
- conda info -a
- conda create -q -n test-environment python=$TRAVIS_PYTHON_VERSION
- source activate test-environment
install:
- if [[ $KERAS_HEAD == "true" ]]; then
pip install --no-deps git+https://github.com/keras-team/keras.git --upgrade;
fi
- if [[ "$TEST_MODE" == "PEP8" ]]; then
pip install -e .[pep8];
elif [[ "$TEST_MODE" == "TESTS" ]]; then
pip install -e .[tests];
elif [[ "$TEST_MODE" == "IMPORTS" ]]; then
pip install .;
fi
script:
- if [[ "$TEST_MODE" == "PEP8" ]]; then
flake8 -v --count;
elif [[ "$TEST_MODE" == "TESTS" ]]; then
py.test tests --cov-config .coveragerc --cov=keras_preprocessing tests;
elif [[ "$TEST_MODE" == "IMPORTS" ]]; then
python -c "import keras_preprocessing; from keras_preprocessing import image; from keras_preprocessing import sequence; from keras_preprocessing import text";
fi
|
keras-preprocessing/.travis.yml/0
|
{
"file_path": "keras-preprocessing/.travis.yml",
"repo_id": "keras-preprocessing",
"token_count": 755
}
| 173 |
# -*- coding: utf-8 -*-
"""Utilities for text input preprocessing.
"""
import json
import warnings
from collections import OrderedDict, defaultdict
from hashlib import md5
import numpy as np
maketrans = str.maketrans
def text_to_word_sequence(text,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True, split=" "):
"""Converts a text to a sequence of words (or tokens).
# Arguments
text: Input text (string).
filters: list (or concatenation) of characters to filter out, such as
punctuation. Default: ``!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\\t\\n``,
includes basic punctuation, tabs, and newlines.
lower: boolean. Whether to convert the input to lowercase.
split: str. Separator for word splitting.
# Returns
A list of words (or tokens).
"""
if lower:
text = text.lower()
translate_dict = {c: split for c in filters}
translate_map = maketrans(translate_dict)
text = text.translate(translate_map)
seq = text.split(split)
return [i for i in seq if i]
def one_hot(text, n,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=' ',
analyzer=None):
"""One-hot encodes a text into a list of word indexes of size n.
This is a wrapper to the `hashing_trick` function using `hash` as the
hashing function; unicity of word to index mapping non-guaranteed.
# Arguments
text: Input text (string).
n: int. Size of vocabulary.
filters: list (or concatenation) of characters to filter out, such as
punctuation. Default: ``!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\\t\\n``,
includes basic punctuation, tabs, and newlines.
lower: boolean. Whether to set the text to lowercase.
split: str. Separator for word splitting.
analyzer: function. Custom analyzer to split the text
# Returns
List of integers in [1, n]. Each integer encodes a word
(unicity non-guaranteed).
"""
return hashing_trick(text, n,
hash_function=hash,
filters=filters,
lower=lower,
split=split,
analyzer=analyzer)
def hashing_trick(text, n,
hash_function=None,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=' ',
analyzer=None):
"""Converts a text to a sequence of indexes in a fixed-size hashing space.
# Arguments
text: Input text (string).
n: Dimension of the hashing space.
hash_function: defaults to python `hash` function, can be 'md5' or
any function that takes in input a string and returns a int.
Note that 'hash' is not a stable hashing function, so
it is not consistent across different runs, while 'md5'
is a stable hashing function.
filters: list (or concatenation) of characters to filter out, such as
punctuation. Default: ``!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\\t\\n``,
includes basic punctuation, tabs, and newlines.
lower: boolean. Whether to set the text to lowercase.
split: str. Separator for word splitting.
analyzer: function. Custom analyzer to split the text
# Returns
A list of integer word indices (unicity non-guaranteed).
`0` is a reserved index that won't be assigned to any word.
Two or more words may be assigned to the same index, due to possible
collisions by the hashing function.
The [probability](
https://en.wikipedia.org/wiki/Birthday_problem#Probability_table)
of a collision is in relation to the dimension of the hashing space and
the number of distinct objects.
"""
if hash_function is None:
hash_function = hash
elif hash_function == 'md5':
def hash_function(w):
return int(md5(w.encode()).hexdigest(), 16)
if analyzer is None:
seq = text_to_word_sequence(text,
filters=filters,
lower=lower,
split=split)
else:
seq = analyzer(text)
return [(hash_function(w) % (n - 1) + 1) for w in seq]
class Tokenizer(object):
"""Text tokenization utility class.
This class allows to vectorize a text corpus, by turning each
text into either a sequence of integers (each integer being the index
of a token in a dictionary) or into a vector where the coefficient
for each token could be binary, based on word count, based on tf-idf...
# Arguments
num_words: the maximum number of words to keep, based
on word frequency. Only the most common `num_words-1` words will
be kept.
filters: a string where each element is a character that will be
filtered from the texts. The default is all punctuation, plus
tabs and line breaks, minus the `'` character.
lower: boolean. Whether to convert the texts to lowercase.
split: str. Separator for word splitting.
char_level: if True, every character will be treated as a token.
oov_token: if given, it will be added to word_index and used to
replace out-of-vocabulary words during text_to_sequence calls
analyzer: function. Custom analyzer to split the text.
The default analyzer is text_to_word_sequence
By default, all punctuation is removed, turning the texts into
space-separated sequences of words
(words maybe include the `'` character). These sequences are then
split into lists of tokens. They will then be indexed or vectorized.
`0` is a reserved index that won't be assigned to any word.
"""
def __init__(self, num_words=None,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=' ',
char_level=False,
oov_token=None,
analyzer=None,
**kwargs):
# Legacy support
if 'nb_words' in kwargs:
warnings.warn('The `nb_words` argument in `Tokenizer` '
'has been renamed `num_words`.')
num_words = kwargs.pop('nb_words')
document_count = kwargs.pop('document_count', 0)
if kwargs:
raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))
self.word_counts = OrderedDict()
self.word_docs = defaultdict(int)
self.filters = filters
self.split = split
self.lower = lower
self.num_words = num_words
self.document_count = document_count
self.char_level = char_level
self.oov_token = oov_token
self.index_docs = defaultdict(int)
self.word_index = {}
self.index_word = {}
self.analyzer = analyzer
def fit_on_texts(self, texts):
"""Updates internal vocabulary based on a list of texts.
In the case where texts contains lists,
we assume each entry of the lists to be a token.
Required before using `texts_to_sequences` or `texts_to_matrix`.
# Arguments
texts: can be a list of strings,
a generator of strings (for memory-efficiency),
or a list of list of strings.
"""
for text in texts:
self.document_count += 1
if self.char_level or isinstance(text, list):
if self.lower:
if isinstance(text, list):
text = [text_elem.lower() for text_elem in text]
else:
text = text.lower()
seq = text
else:
if self.analyzer is None:
seq = text_to_word_sequence(text,
filters=self.filters,
lower=self.lower,
split=self.split)
else:
seq = self.analyzer(text)
for w in seq:
if w in self.word_counts:
self.word_counts[w] += 1
else:
self.word_counts[w] = 1
for w in set(seq):
# In how many documents each word occurs
self.word_docs[w] += 1
wcounts = list(self.word_counts.items())
wcounts.sort(key=lambda x: x[1], reverse=True)
# forcing the oov_token to index 1 if it exists
if self.oov_token is None:
sorted_voc = []
else:
sorted_voc = [self.oov_token]
sorted_voc.extend(wc[0] for wc in wcounts)
# note that index 0 is reserved, never assigned to an existing word
self.word_index = dict(
zip(sorted_voc, list(range(1, len(sorted_voc) + 1))))
self.index_word = {c: w for w, c in self.word_index.items()}
for w, c in list(self.word_docs.items()):
self.index_docs[self.word_index[w]] = c
def fit_on_sequences(self, sequences):
"""Updates internal vocabulary based on a list of sequences.
Required before using `sequences_to_matrix`
(if `fit_on_texts` was never called).
# Arguments
sequences: A list of sequence.
A "sequence" is a list of integer word indices.
"""
self.document_count += len(sequences)
for seq in sequences:
seq = set(seq)
for i in seq:
self.index_docs[i] += 1
def texts_to_sequences(self, texts):
"""Transforms each text in texts to a sequence of integers.
Only top `num_words-1` most frequent words will be taken into account.
Only words known by the tokenizer will be taken into account.
# Arguments
texts: A list of texts (strings).
# Returns
A list of sequences.
"""
return list(self.texts_to_sequences_generator(texts))
def texts_to_sequences_generator(self, texts):
"""Transforms each text in `texts` to a sequence of integers.
Each item in texts can also be a list,
in which case we assume each item of that list to be a token.
Only top `num_words-1` most frequent words will be taken into account.
Only words known by the tokenizer will be taken into account.
# Arguments
texts: A list of texts (strings).
# Yields
Yields individual sequences.
"""
num_words = self.num_words
oov_token_index = self.word_index.get(self.oov_token)
for text in texts:
if self.char_level or isinstance(text, list):
if self.lower:
if isinstance(text, list):
text = [text_elem.lower() for text_elem in text]
else:
text = text.lower()
seq = text
else:
if self.analyzer is None:
seq = text_to_word_sequence(text,
filters=self.filters,
lower=self.lower,
split=self.split)
else:
seq = self.analyzer(text)
vect = []
for w in seq:
i = self.word_index.get(w)
if i is not None:
if num_words and i >= num_words:
if oov_token_index is not None:
vect.append(oov_token_index)
else:
vect.append(i)
elif self.oov_token is not None:
vect.append(oov_token_index)
yield vect
def sequences_to_texts(self, sequences):
"""Transforms each sequence into a list of text.
Only top `num_words-1` most frequent words will be taken into account.
Only words known by the tokenizer will be taken into account.
# Arguments
sequences: A list of sequences (list of integers).
# Returns
A list of texts (strings)
"""
return list(self.sequences_to_texts_generator(sequences))
def sequences_to_texts_generator(self, sequences):
"""Transforms each sequence in `sequences` to a list of texts(strings).
Each sequence has to a list of integers.
In other words, sequences should be a list of sequences
Only top `num_words-1` most frequent words will be taken into account.
Only words known by the tokenizer will be taken into account.
# Arguments
sequences: A list of sequences.
# Yields
Yields individual texts.
"""
num_words = self.num_words
oov_token_index = self.word_index.get(self.oov_token)
for seq in sequences:
vect = []
for num in seq:
word = self.index_word.get(num)
if word is not None:
if num_words and num >= num_words:
if oov_token_index is not None:
vect.append(self.index_word[oov_token_index])
else:
vect.append(word)
elif self.oov_token is not None:
vect.append(self.index_word[oov_token_index])
vect = ' '.join(vect)
yield vect
def texts_to_matrix(self, texts, mode='binary'):
"""Convert a list of texts to a Numpy matrix.
# Arguments
texts: list of strings.
mode: one of "binary", "count", "tfidf", "freq".
# Returns
A Numpy matrix.
"""
sequences = self.texts_to_sequences(texts)
return self.sequences_to_matrix(sequences, mode=mode)
def sequences_to_matrix(self, sequences, mode='binary'):
"""Converts a list of sequences into a Numpy matrix.
# Arguments
sequences: list of sequences
(a sequence is a list of integer word indices).
mode: one of "binary", "count", "tfidf", "freq"
# Returns
A Numpy matrix.
# Raises
ValueError: In case of invalid `mode` argument,
or if the Tokenizer requires to be fit to sample data.
"""
if not self.num_words:
if self.word_index:
num_words = len(self.word_index) + 1
else:
raise ValueError('Specify a dimension (`num_words` argument), '
'or fit on some text data first.')
else:
num_words = self.num_words
if mode == 'tfidf' and not self.document_count:
raise ValueError('Fit the Tokenizer on some data '
'before using tfidf mode.')
x = np.zeros((len(sequences), num_words))
for i, seq in enumerate(sequences):
if not seq:
continue
counts = defaultdict(int)
for j in seq:
if j >= num_words:
continue
counts[j] += 1
for j, c in list(counts.items()):
if mode == 'count':
x[i][j] = c
elif mode == 'freq':
x[i][j] = c / len(seq)
elif mode == 'binary':
x[i][j] = 1
elif mode == 'tfidf':
# Use weighting scheme 2 in
# https://en.wikipedia.org/wiki/Tf%E2%80%93idf
tf = 1 + np.log(c)
idf = np.log(1 + self.document_count /
(1 + self.index_docs.get(j, 0)))
x[i][j] = tf * idf
else:
raise ValueError('Unknown vectorization mode:', mode)
return x
def get_config(self):
'''Returns the tokenizer configuration as Python dictionary.
The word count dictionaries used by the tokenizer get serialized
into plain JSON, so that the configuration can be read by other
projects.
# Returns
A Python dictionary with the tokenizer configuration.
'''
json_word_counts = json.dumps(self.word_counts)
json_word_docs = json.dumps(self.word_docs)
json_index_docs = json.dumps(self.index_docs)
json_word_index = json.dumps(self.word_index)
json_index_word = json.dumps(self.index_word)
return {
'num_words': self.num_words,
'filters': self.filters,
'lower': self.lower,
'split': self.split,
'char_level': self.char_level,
'oov_token': self.oov_token,
'document_count': self.document_count,
'word_counts': json_word_counts,
'word_docs': json_word_docs,
'index_docs': json_index_docs,
'index_word': json_index_word,
'word_index': json_word_index
}
def to_json(self, **kwargs):
"""Returns a JSON string containing the tokenizer configuration.
To load a tokenizer from a JSON string, use
`keras.preprocessing.text.tokenizer_from_json(json_string)`.
# Arguments
**kwargs: Additional keyword arguments
to be passed to `json.dumps()`.
# Returns
A JSON string containing the tokenizer configuration.
"""
config = self.get_config()
tokenizer_config = {
'class_name': self.__class__.__name__,
'config': config
}
return json.dumps(tokenizer_config, **kwargs)
def tokenizer_from_json(json_string):
"""Parses a JSON tokenizer configuration file and returns a
tokenizer instance.
# Arguments
json_string: JSON string encoding a tokenizer configuration.
# Returns
A Keras Tokenizer instance
"""
tokenizer_config = json.loads(json_string)
config = tokenizer_config.get('config')
word_counts = json.loads(config.pop('word_counts'))
word_docs = json.loads(config.pop('word_docs'))
index_docs = json.loads(config.pop('index_docs'))
# Integer indexing gets converted to strings with json.dumps()
index_docs = {int(k): v for k, v in index_docs.items()}
index_word = json.loads(config.pop('index_word'))
index_word = {int(k): v for k, v in index_word.items()}
word_index = json.loads(config.pop('word_index'))
tokenizer = Tokenizer(**config)
tokenizer.word_counts = word_counts
tokenizer.word_docs = word_docs
tokenizer.index_docs = index_docs
tokenizer.word_index = word_index
tokenizer.index_word = index_word
return tokenizer
|
keras-preprocessing/keras_preprocessing/text.py/0
|
{
"file_path": "keras-preprocessing/keras_preprocessing/text.py",
"repo_id": "keras-preprocessing",
"token_count": 9086
}
| 174 |
{
"dockerFile": "Dockerfile",
"postCreateCommand": "sh /setup.sh",
"customizations": {
"vscode": {
"settings": {
"python.linting.enabled": true,
"python.linting.flake8Enabled": true,
"python.linting.pylintEnabled": false,
"python.testing.pytestEnabled": true,
"editor.formatOnSave": true,
"editor.codeActionsOnSave": {
"source.organizeImports": true
},
"[python]": {
"editor.defaultFormatter": "ms-python.black-formatter"
},
"editor.rulers": [
80
]
},
"extensions": [
"ms-python.python",
"ms-python.isort",
"ms-python.flake8",
"ms-python.black-formatter"
]
}
},
"features": {
"ghcr.io/devcontainers/features/github-cli:1": {}
}
}
|
keras-tuner/.devcontainer/devcontainer.json/0
|
{
"file_path": "keras-tuner/.devcontainer/devcontainer.json",
"repo_id": "keras-tuner",
"token_count": 612
}
| 175 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/guides/keras_tuner/distributed_tuning/'" />
|
keras-tuner/docs/site/tutorials/distributed-tuning/index.html/0
|
{
"file_path": "keras-tuner/docs/site/tutorials/distributed-tuning/index.html",
"repo_id": "keras-tuner",
"token_count": 41
}
| 176 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import types
from keras_tuner.backend.config import multi_backend
if multi_backend():
from keras import * # noqa: F403, F401
else:
import tensorflow as tf
from tensorflow.keras import * # noqa: F403, F401
# Shims to handle symbol renames for older `tf.keras` versions.
if not hasattr(tf.keras, "saving"):
saving = types.SimpleNamespace()
else:
from tensorflow.keras import saving
from tensorflow.keras import utils
if not hasattr(saving, "deserialize_keras_object"):
saving.deserialize_keras_object = utils.deserialize_keras_object
if not hasattr(saving, "serialize_keras_object"):
saving.serialize_keras_object = utils.serialize_keras_object
if not hasattr(saving, "register_keras_serializable"):
saving.register_keras_serializable = utils.register_keras_serializable
|
keras-tuner/keras_tuner/backend/keras.py/0
|
{
"file_path": "keras-tuner/keras_tuner/backend/keras.py",
"repo_id": "keras-tuner",
"token_count": 480
}
| 177 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Tuner class."""
import collections
import statistics
import numpy as np
from keras_tuner import backend
from keras_tuner import errors
from keras_tuner.backend import config
from keras_tuner.backend import keras
from keras_tuner.engine import hyperparameters as hp_module
from keras_tuner.engine import objective as obj_module
class TunerCallback(keras.callbacks.Callback):
def __init__(self, tuner, trial):
super().__init__()
self.tuner = tuner
self.trial = trial
def on_epoch_begin(self, epoch, logs=None):
self.tuner.on_epoch_begin(self.trial, self.model, epoch, logs=logs)
def on_batch_begin(self, batch, logs=None):
self.tuner.on_batch_begin(self.trial, self.model, batch, logs)
def on_batch_end(self, batch, logs=None):
self.tuner.on_batch_end(self.trial, self.model, batch, logs)
def on_epoch_end(self, epoch, logs=None):
self.tuner.on_epoch_end(self.trial, self.model, epoch, logs=logs)
class SaveBestEpoch(keras.callbacks.Callback):
"""A Keras callback to save the model weights at the best epoch.
Args:
objective: An `Objective` instance.
filepath: String. The file path to save the model weights.
"""
def __init__(self, objective, filepath):
super().__init__()
self.objective = objective
self.filepath = filepath
if self.objective.direction == "max":
self.best_value = float("-inf")
else:
self.best_value = float("inf")
def on_epoch_end(self, epoch, logs=None):
if not self.objective.has_value(logs):
# Save on every epoch if metric value is not in the logs. Either no
# objective is specified, or objective is computed and returned
# after `fit()`.
self._save_model()
return
current_value = self.objective.get_value(logs)
if self.objective.better_than(current_value, self.best_value):
self.best_value = current_value
self._save_model()
def _save_model(self):
if config.backend() != "tensorflow":
self.model.save_weights(self.filepath)
return
# Create temporary saved model files on non-chief workers.
write_filepath = backend.io.write_filepath(
self.filepath, self.model.distribute_strategy
)
self.model.save_weights(write_filepath)
# Remove temporary saved model files on non-chief workers.
backend.io.remove_temp_dir_with_filepath(
write_filepath, self.model.distribute_strategy
)
def average_metrics_dicts(metrics_dicts):
"""Averages the metrics dictionaries to one metrics dictionary."""
metrics = collections.defaultdict(list)
for metrics_dict in metrics_dicts:
for metric_name, metric_value in metrics_dict.items():
metrics[metric_name].append(metric_value)
averaged_metrics = {
metric_name: np.mean(metric_values)
for metric_name, metric_values in metrics.items()
}
return averaged_metrics
def _get_best_value_and_best_epoch_from_history(history, objective):
# A dictionary to record the metric values through epochs.
# Usage: epoch_metric[epoch_number][metric_name] == metric_value
epoch_metrics = collections.defaultdict(dict)
for metric_name, epoch_values in history.history.items():
for epoch, value in enumerate(epoch_values):
epoch_metrics[epoch][metric_name] = value
best_epoch = 0
for epoch, metrics in epoch_metrics.items():
objective_value = objective.get_value(metrics)
# Support multi-objective.
if objective.name not in metrics:
metrics[objective.name] = objective_value
best_value = epoch_metrics[best_epoch][objective.name]
if objective.better_than(objective_value, best_value):
best_epoch = epoch
return epoch_metrics[best_epoch], best_epoch
def convert_to_metrics_dict(results, objective):
"""Convert any supported results type to a metrics dictionary."""
# List of multiple exectuion results to be averaged.
# Check this case first to deal each case individually to check for errors.
if isinstance(results, list):
return average_metrics_dicts(
[convert_to_metrics_dict(elem, objective) for elem in results]
)
# Single value.
if isinstance(results, (int, float, np.floating)):
return {objective.name: float(results)}
# A dictionary.
if isinstance(results, dict):
return results
# A History.
if isinstance(results, keras.callbacks.History):
best_value, _ = _get_best_value_and_best_epoch_from_history(
results, objective
)
return best_value
def validate_trial_results(results, objective, func_name):
if isinstance(results, list):
for elem in results:
validate_trial_results(elem, objective, func_name)
return
# Single value.
if isinstance(results, (int, float, np.floating)):
return
# None
if results is None:
raise errors.FatalTypeError(
f"The return value of {func_name} is None. "
"Did you forget to return the metrics? "
)
# objective left unspecified,
# and objective value is not a single float.
if isinstance(objective, obj_module.DefaultObjective) and not (
isinstance(results, dict) and objective.name in results
):
raise errors.FatalTypeError(
f"Expected the return value of {func_name} to be "
"a single float when `objective` is left unspecified. "
f"Recevied return value: {results} of type {type(results)}."
)
# A dictionary.
if isinstance(results, dict):
if objective.name not in results:
raise errors.FatalValueError(
f"Expected the returned dictionary from {func_name} to have "
f"the specified objective, {objective.name}, "
"as one of the keys. "
f"Received: {results}."
)
return
# A History.
if isinstance(results, keras.callbacks.History):
return
# Other unsupported types.
raise errors.FatalTypeError(
f"Expected the return value of {func_name} to be "
"one of float, dict, keras.callbacks.History, "
"or a list of one of these types. "
f"Recevied return value: {results} of type {type(results)}."
)
def get_best_step(results, objective):
# Average the best epochs if multiple executions.
if isinstance(results, list):
return int(
statistics.mean(
[get_best_step(elem, objective) for elem in results]
)
)
# A History.
if isinstance(results, keras.callbacks.History):
_, best_epoch = _get_best_value_and_best_epoch_from_history(
results, objective
)
return best_epoch
return 0
def convert_hyperparams_to_hparams(hyperparams, hparams_api):
"""Converts KerasTuner HyperParameters to TensorBoard HParams."""
hparams = {}
for hp in hyperparams.space:
hparams_value = {}
try:
hparams_value = hyperparams.get(hp.name)
except ValueError: # pragma: no cover
continue # pragma: no cover
hparams_domain = {}
if isinstance(hp, hp_module.Choice):
hparams_domain = hparams_api.Discrete(hp.values)
elif isinstance(hp, hp_module.Int):
if hp.step is not None and hp.step != 1:
# Note: `hp.max_value` is inclusive, unlike the end index
# of Python `range()`, which is exclusive
values = list(range(hp.min_value, hp.max_value + 1, hp.step))
hparams_domain = hparams_api.Discrete(values)
else:
hparams_domain = hparams_api.IntInterval(
hp.min_value, hp.max_value
)
elif isinstance(hp, hp_module.Float):
if hp.step is not None:
# Note: `hp.max_value` is inclusive, unlike the end index
# of Numpy's arange(), which is exclusive
values = np.arange(
hp.min_value, hp.max_value + 1e-7, step=hp.step
).tolist()
hparams_domain = hparams_api.Discrete(values)
else:
hparams_domain = hparams_api.RealInterval(
hp.min_value, hp.max_value
)
elif isinstance(hp, hp_module.Boolean):
hparams_domain = hparams_api.Discrete([True, False])
elif isinstance(hp, hp_module.Fixed):
hparams_domain = hparams_api.Discrete([hp.value])
else:
raise ValueError( # pragma: no cover
f"`HyperParameter` type not recognized: {hp}"
)
hparams_key = hparams_api.HParam(hp.name, hparams_domain)
hparams[hparams_key] = hparams_value
return hparams
|
keras-tuner/keras_tuner/engine/tuner_utils.py/0
|
{
"file_path": "keras-tuner/keras_tuner/engine/tuner_utils.py",
"repo_id": "keras-tuner",
"token_count": 4045
}
| 178 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_tuner.backend import keras
from keras_tuner.engine import tuner as tuner_module
from keras_tuner.tuners import randomsearch
def test_update_space(tmp_path):
# Tests that HyperParameters added after the first call to `build_model`
# are sent to the Oracle via oracle.update_space.
def build_model(hp):
model = keras.Sequential()
for i in range(hp.Int("layers", 0, 2)):
model.add(
keras.layers.Dense(
units=hp.Int(f"units_{str(i)}", 2, 4, 2), activation="relu"
)
)
model.add(keras.layers.Dense(1, activation="sigmoid"))
model.compile("adam", loss="binary_crossentropy", metrics=["accuracy"])
return model
class MyRandomSearch(randomsearch.RandomSearchOracle):
def populate_space(self, trial_id):
result = super().populate_space(trial_id)
if "values" in result:
result["values"]["layers"] = 2
return result
tuner = tuner_module.Tuner(
oracle=MyRandomSearch(objective="accuracy", max_trials=1),
hypermodel=build_model,
directory=tmp_path,
)
assert {hp.name for hp in tuner.oracle.get_space().space} == {"layers"}
x, y = np.ones((10, 10)), np.ones((10, 1))
tuner.search(x, y, epochs=1)
assert {hp.name for hp in tuner.oracle.get_space().space} == {
"layers",
"units_0",
"units_1",
}
def test_exhausted_search_space(tmp_path):
class MyTuner(randomsearch.RandomSearch):
def run_trial(self, trial, *args, **kwargs):
hp = trial.hyperparameters
hp.Boolean("boolean")
hp.Boolean("boolean2")
return [np.random.rand()]
tuner = MyTuner(
max_trials=15,
directory=tmp_path,
)
tuner.search()
assert len(tuner.oracle.trials) == 4
|
keras-tuner/keras_tuner/tuners/randomsearch_test.py/0
|
{
"file_path": "keras-tuner/keras_tuner/tuners/randomsearch_test.py",
"repo_id": "keras-tuner",
"token_count": 1039
}
| 179 |
isort -c keras_tuner
if ! [ $? -eq 0 ]
then
echo "Please run \"sh shell/format.sh\" to format the code."
exit 1
fi
flake8 keras_tuner
if ! [ $? -eq 0 ]
then
echo "Please fix the code style issue."
exit 1
fi
black --check keras_tuner
if ! [ $? -eq 0 ]
then
echo "Please run \"sh shell/format.sh\" to format the code."
exit 1
fi
for i in $(find keras_tuner -name '*.py') # or whatever other pattern...
do
if ! grep -q Copyright $i
then
echo "Please run \"sh shell/format.sh\" to format the code."
exit 1
fi
done
|
keras-tuner/shell/lint.sh/0
|
{
"file_path": "keras-tuner/shell/lint.sh",
"repo_id": "keras-tuner",
"token_count": 202
}
| 180 |
"""Benchmark normalization layers.
To run benchmarks, see the following command for an example, please change the
flag to your custom value:
```
python3 -m benchmarks.layer_benchmark.normalization_benchmark \
--benchmark_name=benchmark_batch_normalization \
--num_samples=2048 \
--batch_size=256 \
--jit_compile=True
```
"""
from absl import app
from absl import flags
from benchmarks.layer_benchmark.base_benchmark import LayerBenchmark
FLAGS = flags.FLAGS
def benchmark_batch_normalization(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "BatchNormalization"
init_args = {}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[256, 256, 4],
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_group_normalization(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "GroupNormalization"
init_args = {
"groups": 2,
}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[256, 256, 4],
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_layer_normalization(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "LayerNormalization"
init_args = {}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[256, 128, 4],
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_unit_normalization(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "UnitNormalization"
init_args = {}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[256, 128, 4],
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
BENCHMARK_NAMES = {
"benchmark_batch_normalization": benchmark_batch_normalization,
"benchmark_group_normalization": benchmark_group_normalization,
"benchmark_layer_normalization": benchmark_layer_normalization,
"benchmark_unit_normalization": benchmark_unit_normalization,
}
def main(_):
benchmark_name = FLAGS.benchmark_name
num_samples = FLAGS.num_samples
batch_size = FLAGS.batch_size
jit_compile = FLAGS.jit_compile
if benchmark_name is None:
for name, benchmark_fn in BENCHMARK_NAMES.items():
benchmark_fn(num_samples, batch_size, jit_compile)
return
if benchmark_name not in BENCHMARK_NAMES:
raise ValueError(
f"Invalid benchmark name: {benchmark_name}, `benchmark_name` must "
f"be one of {BENCHMARK_NAMES.keys()}"
)
benchmark_fn = BENCHMARK_NAMES[benchmark_name]
benchmark_fn(num_samples, batch_size, jit_compile)
if __name__ == "__main__":
app.run(main)
|
keras/benchmarks/layer_benchmark/normalization_benchmark.py/0
|
{
"file_path": "keras/benchmarks/layer_benchmark/normalization_benchmark.py",
"repo_id": "keras",
"token_count": 1540
}
| 181 |
# flake8: noqa
import os
# Set backend env to JAX
os.environ["KERAS_BACKEND"] = "jax"
import jax
import numpy as np
from keras import Model
from keras import backend
from keras import initializers
from keras import layers
from keras import ops
from keras import optimizers
class MyDense(layers.Layer):
def __init__(self, units, name=None):
super().__init__(name=name)
self.units = units
def build(self, input_shape):
input_dim = input_shape[-1]
w_shape = (input_dim, self.units)
w_value = initializers.GlorotUniform()(w_shape)
self.w = backend.Variable(w_value, name="kernel")
b_shape = (self.units,)
b_value = initializers.Zeros()(b_shape)
self.b = backend.Variable(b_value, name="bias")
def call(self, inputs):
return ops.matmul(inputs, self.w) + self.b
class MyModel(Model):
def __init__(self, hidden_dim, output_dim):
super().__init__()
self.dense1 = MyDense(hidden_dim)
self.dense2 = MyDense(hidden_dim)
self.dense3 = MyDense(output_dim)
def call(self, x):
x = jax.nn.relu(self.dense1(x))
x = jax.nn.relu(self.dense2(x))
return self.dense3(x)
def Dataset():
for _ in range(20):
yield (np.random.random((32, 128)), np.random.random((32, 4)))
def loss_fn(y_true, y_pred):
return ops.sum((y_true - y_pred) ** 2)
model = MyModel(hidden_dim=256, output_dim=4)
optimizer = optimizers.SGD(learning_rate=0.001)
dataset = Dataset()
# Build model
x = np.random.random((1, 128))
model(x)
# Build optimizer
optimizer.build(model.trainable_variables)
######### Custom JAX workflow ###############
def compute_loss_and_updates(
trainable_variables, non_trainable_variables, x, y
):
y_pred, non_trainable_variables = model.stateless_call(
trainable_variables, non_trainable_variables, x
)
loss = loss_fn(y, y_pred)
return loss, non_trainable_variables
grad_fn = jax.value_and_grad(compute_loss_and_updates, has_aux=True)
@jax.jit
def train_step(state, data):
trainable_variables, non_trainable_variables, optimizer_variables = state
x, y = data
(loss, non_trainable_variables), grads = grad_fn(
trainable_variables, non_trainable_variables, x, y
)
trainable_variables, optimizer_variables = optimizer.stateless_apply(
optimizer_variables, grads, trainable_variables
)
# Return updated state
return loss, (
trainable_variables,
non_trainable_variables,
optimizer_variables,
)
trainable_variables = model.trainable_variables
non_trainable_variables = model.non_trainable_variables
optimizer_variables = optimizer.variables
state = trainable_variables, non_trainable_variables, optimizer_variables
# Training loop
for data in dataset:
loss, state = train_step(state, data)
print("Loss:", loss)
# Post-processing model state update
trainable_variables, non_trainable_variables, optimizer_variables = state
for variable, value in zip(model.trainable_variables, trainable_variables):
variable.assign(value)
for variable, value in zip(
model.non_trainable_variables, non_trainable_variables
):
variable.assign(value)
|
keras/examples/demo_custom_jax_workflow.py/0
|
{
"file_path": "keras/examples/demo_custom_jax_workflow.py",
"repo_id": "keras",
"token_count": 1300
}
| 182 |
import types
from keras.activations.activations import elu
from keras.activations.activations import exponential
from keras.activations.activations import gelu
from keras.activations.activations import hard_sigmoid
from keras.activations.activations import hard_silu
from keras.activations.activations import leaky_relu
from keras.activations.activations import linear
from keras.activations.activations import log_softmax
from keras.activations.activations import mish
from keras.activations.activations import relu
from keras.activations.activations import relu6
from keras.activations.activations import selu
from keras.activations.activations import sigmoid
from keras.activations.activations import silu
from keras.activations.activations import softmax
from keras.activations.activations import softplus
from keras.activations.activations import softsign
from keras.activations.activations import tanh
from keras.api_export import keras_export
from keras.saving import object_registration
from keras.saving import serialization_lib
ALL_OBJECTS = {
relu,
leaky_relu,
relu6,
softmax,
elu,
selu,
softplus,
softsign,
silu,
gelu,
tanh,
sigmoid,
exponential,
hard_sigmoid,
hard_silu,
linear,
mish,
log_softmax,
}
ALL_OBJECTS_DICT = {fn.__name__: fn for fn in ALL_OBJECTS}
# Additional aliases
ALL_OBJECTS_DICT["swish"] = silu
ALL_OBJECTS_DICT["hard_swish"] = hard_silu
@keras_export("keras.activations.serialize")
def serialize(activation):
fn_config = serialization_lib.serialize_keras_object(activation)
if "config" not in fn_config:
raise ValueError(
f"Unknown activation function '{activation}' cannot be "
"serialized due to invalid function name. Make sure to use "
"an activation name that matches the references defined in "
"activations.py or use "
"`@keras.saving.register_keras_serializable()`"
"to register any custom activations. "
f"config={fn_config}"
)
if not isinstance(activation, types.FunctionType):
# Case for additional custom activations represented by objects
return fn_config
if (
isinstance(fn_config["config"], str)
and fn_config["config"] not in globals()
):
# Case for custom activation functions from external activations modules
fn_config["config"] = object_registration.get_registered_name(
activation
)
return fn_config
# Case for keras.activations builtins (simply return name)
return fn_config["config"]
@keras_export("keras.activations.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras activation function via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.activations.get")
def get(identifier):
"""Retrieve a Keras activation function via an identifier."""
if identifier is None:
return linear
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
return obj
raise ValueError(
f"Could not interpret activation function identifier: {identifier}"
)
|
keras/keras/activations/__init__.py/0
|
{
"file_path": "keras/keras/activations/__init__.py",
"repo_id": "keras",
"token_count": 1275
}
| 183 |
import warnings
from keras import backend
from keras import layers
from keras.api_export import keras_export
from keras.applications import imagenet_utils
from keras.models import Functional
from keras.ops import operation_utils
from keras.utils import file_utils
BASE_WEIGHT_PATH = (
"https://storage.googleapis.com/tensorflow/keras-applications/mobilenet_v3/"
)
WEIGHTS_HASHES = {
"large_224_0.75_float": (
"765b44a33ad4005b3ac83185abf1d0eb",
"40af19a13ebea4e2ee0c676887f69a2e",
),
"large_224_1.0_float": (
"59e551e166be033d707958cf9e29a6a7",
"07fb09a5933dd0c8eaafa16978110389",
),
"large_minimalistic_224_1.0_float": (
"675e7b876c45c57e9e63e6d90a36599c",
"ec5221f64a2f6d1ef965a614bdae7973",
),
"small_224_0.75_float": (
"cb65d4e5be93758266aa0a7f2c6708b7",
"ebdb5cc8e0b497cd13a7c275d475c819",
),
"small_224_1.0_float": (
"8768d4c2e7dee89b9d02b2d03d65d862",
"d3e8ec802a04aa4fc771ee12a9a9b836",
),
"small_minimalistic_224_1.0_float": (
"99cd97fb2fcdad2bf028eb838de69e37",
"cde8136e733e811080d9fcd8a252f7e4",
),
}
BASE_DOCSTRING = """Instantiates the {name} architecture.
Reference:
- [Searching for MobileNetV3](
https://arxiv.org/pdf/1905.02244.pdf) (ICCV 2019)
The following table describes the performance of MobileNets v3:
------------------------------------------------------------------------
MACs stands for Multiply Adds
|Classification Checkpoint|MACs(M)|Parameters(M)|Top1 Accuracy|Pixel1 CPU(ms)|
|---|---|---|---|---|
| mobilenet_v3_large_1.0_224 | 217 | 5.4 | 75.6 | 51.2 |
| mobilenet_v3_large_0.75_224 | 155 | 4.0 | 73.3 | 39.8 |
| mobilenet_v3_large_minimalistic_1.0_224 | 209 | 3.9 | 72.3 | 44.1 |
| mobilenet_v3_small_1.0_224 | 66 | 2.9 | 68.1 | 15.8 |
| mobilenet_v3_small_0.75_224 | 44 | 2.4 | 65.4 | 12.8 |
| mobilenet_v3_small_minimalistic_1.0_224 | 65 | 2.0 | 61.9 | 12.2 |
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For MobileNetV3, by default input preprocessing is included as a part of the
model (as a `Rescaling` layer), and thus
`keras.applications.mobilenet_v3.preprocess_input` is actually a
pass-through function. In this use case, MobileNetV3 models expect their
inputs to be float tensors of pixels with values in the `[0-255]` range.
At the same time, preprocessing as a part of the model (i.e. `Rescaling`
layer) can be disabled by setting `include_preprocessing` argument to `False`.
With preprocessing disabled MobileNetV3 models expect their inputs to be float
tensors of pixels with values in the `[-1, 1]` range.
Args:
input_shape: Optional shape tuple, to be specified if you would
like to use a model with an input image resolution that is not
`(224, 224, 3)`.
It should have exactly 3 inputs channels.
You can also omit this option if you would like
to infer input_shape from an input_tensor.
If you choose to include both input_tensor and input_shape then
input_shape will be used if they match, if the shapes
do not match then we will throw an error.
E.g. `(160, 160, 3)` would be one valid value.
alpha: controls the width of the network. This is known as the
depth multiplier in the MobileNetV3 paper, but the name is kept for
consistency with MobileNetV1 in Keras.
- If `alpha < 1.0`, proportionally decreases the number
of filters in each layer.
- If `alpha > 1.0`, proportionally increases the number
of filters in each layer.
- If `alpha == 1`, default number of filters from the paper
are used at each layer.
minimalistic: In addition to large and small models this module also
contains so-called minimalistic models, these models have the same
per-layer dimensions characteristic as MobilenetV3 however, they don't
utilize any of the advanced blocks (squeeze-and-excite units,
hard-swish, and 5x5 convolutions).
While these models are less efficient on CPU, they
are much more performant on GPU/DSP.
include_top: Boolean, whether to include the fully-connected
layer at the top of the network. Defaults to `True`.
weights: String, one of `None` (random initialization),
`"imagenet"` (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: Optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: String, optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: Integer, optional number of classes to classify images
into, only to be specified if `include_top` is `True`, and
if no `weights` argument is specified.
dropout_rate: fraction of the input units to drop on the last layer.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
include_preprocessing: Boolean, whether to include the preprocessing
layer (`Rescaling`) at the bottom of the network. Defaults to `True`.
Call arguments:
inputs: A floating point `numpy.array` or backend-native tensor,
4D with 3 color channels, with values in the range `[0, 255]`
if `include_preprocessing` is `True` and in the range `[-1, 1]`
otherwise.
Returns:
A model instance.
"""
def MobileNetV3(
stack_fn,
last_point_ch,
input_shape=None,
alpha=1.0,
model_type="large",
minimalistic=False,
include_top=True,
weights="imagenet",
input_tensor=None,
classes=1000,
pooling=None,
dropout_rate=0.2,
classifier_activation="softmax",
include_preprocessing=True,
):
if not (weights in {"imagenet", None} or file_utils.exists(weights)):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization), `imagenet` "
"(pre-training on ImageNet), "
"or the path to the weights file to be loaded. "
f"Received weights={weights}"
)
if weights == "imagenet" and include_top and classes != 1000:
raise ValueError(
'If using `weights="imagenet"` with `include_top` '
"as true, `classes` should be 1000. "
f"Received classes={classes}"
)
# Determine proper input shape and default size.
# If both input_shape and input_tensor are used, they should match
if input_shape is not None and input_tensor is not None:
try:
is_input_t_tensor = backend.is_keras_tensor(input_tensor)
except ValueError:
try:
is_input_t_tensor = backend.is_keras_tensor(
operation_utils.get_source_inputs(input_tensor)
)
except ValueError:
raise ValueError(
"input_tensor: ",
input_tensor,
"is not type input_tensor. "
f"Received type(input_tensor)={type(input_tensor)}",
)
if is_input_t_tensor:
if backend.image_data_format() == "channels_first":
if input_tensor.shape[1] != input_shape[1]:
raise ValueError(
"When backend.image_data_format()=channels_first, "
"input_shape[1] must equal "
"input_tensor.shape[1]. Received "
f"input_shape={input_shape}, "
"input_tensor.shape="
f"{input_tensor.shape}"
)
else:
if input_tensor.shape[2] != input_shape[1]:
raise ValueError(
"input_shape[1] must equal "
"input_tensor.shape[2]. Received "
f"input_shape={input_shape}, "
"input_tensor.shape="
f"{input_tensor.shape}"
)
else:
raise ValueError(
"input_tensor specified: ",
input_tensor,
"is not a keras tensor",
)
# If input_shape is None, infer shape from input_tensor
if input_shape is None and input_tensor is not None:
try:
backend.is_keras_tensor(input_tensor)
except ValueError:
raise ValueError(
"input_tensor: ",
input_tensor,
"is type: ",
type(input_tensor),
"which is not a valid type",
)
if backend.is_keras_tensor(input_tensor):
if backend.image_data_format() == "channels_first":
rows = input_tensor.shape[2]
cols = input_tensor.shape[3]
input_shape = (3, cols, rows)
else:
rows = input_tensor.shape[1]
cols = input_tensor.shape[2]
input_shape = (cols, rows, 3)
# If input_shape is None and input_tensor is None using standard shape
if input_shape is None and input_tensor is None:
if backend.image_data_format() == "channels_last":
input_shape = (None, None, 3)
else:
input_shape = (3, None, None)
if backend.image_data_format() == "channels_last":
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if rows and cols and (rows < 32 or cols < 32):
raise ValueError(
"Input size must be at least 32x32; Received `input_shape="
f"{input_shape}`"
)
if weights == "imagenet":
if (
not minimalistic
and alpha not in [0.75, 1.0]
or minimalistic
and alpha != 1.0
):
raise ValueError(
"If imagenet weights are being loaded, "
"alpha can be one of `0.75`, `1.0` for non minimalistic "
"or `1.0` for minimalistic only."
)
if rows != cols or rows != 224:
warnings.warn(
"`input_shape` is undefined or non-square, "
"or `rows` is not 224. "
"Weights for input shape (224, 224) will be "
"loaded as the default.",
stacklevel=2,
)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
channel_axis = 1 if backend.image_data_format() == "channels_first" else -1
if minimalistic:
kernel = 3
activation = relu
se_ratio = None
else:
kernel = 5
activation = hard_swish
se_ratio = 0.25
x = img_input
if include_preprocessing:
x = layers.Rescaling(scale=1.0 / 127.5, offset=-1.0)(x)
x = layers.Conv2D(
16,
kernel_size=3,
strides=(2, 2),
padding="same",
use_bias=False,
name="conv",
)(x)
x = layers.BatchNormalization(
axis=channel_axis, epsilon=1e-3, momentum=0.999, name="conv_bn"
)(x)
x = activation(x)
x = stack_fn(x, kernel, activation, se_ratio)
last_conv_ch = _depth(x.shape[channel_axis] * 6)
# if the width multiplier is greater than 1 we
# increase the number of output channels
if alpha > 1.0:
last_point_ch = _depth(last_point_ch * alpha)
x = layers.Conv2D(
last_conv_ch,
kernel_size=1,
padding="same",
use_bias=False,
name="conv_1",
)(x)
x = layers.BatchNormalization(
axis=channel_axis, epsilon=1e-3, momentum=0.999, name="conv_1_bn"
)(x)
x = activation(x)
if include_top:
x = layers.GlobalAveragePooling2D(keepdims=True)(x)
x = layers.Conv2D(
last_point_ch,
kernel_size=1,
padding="same",
use_bias=True,
name="conv_2",
)(x)
x = activation(x)
if dropout_rate > 0:
x = layers.Dropout(dropout_rate)(x)
x = layers.Conv2D(
classes, kernel_size=1, padding="same", name="logits"
)(x)
x = layers.Flatten()(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Activation(
activation=classifier_activation, name="predictions"
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D(name="max_pool")(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = operation_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Functional(inputs, x, name="MobilenetV3" + model_type)
# Load weights.
if weights == "imagenet":
model_name = "{}{}_224_{}_float".format(
model_type, "_minimalistic" if minimalistic else "", str(alpha)
)
if include_top:
file_name = "weights_mobilenet_v3_" + model_name + ".h5"
file_hash = WEIGHTS_HASHES[model_name][0]
else:
file_name = "weights_mobilenet_v3_" + model_name + "_no_top_v2.h5"
file_hash = WEIGHTS_HASHES[model_name][1]
weights_path = file_utils.get_file(
file_name,
BASE_WEIGHT_PATH + file_name,
cache_subdir="models",
file_hash=file_hash,
)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
@keras_export("keras.applications.MobileNetV3Small")
def MobileNetV3Small(
input_shape=None,
alpha=1.0,
minimalistic=False,
include_top=True,
weights="imagenet",
input_tensor=None,
classes=1000,
pooling=None,
dropout_rate=0.2,
classifier_activation="softmax",
include_preprocessing=True,
):
def stack_fn(x, kernel, activation, se_ratio):
def depth(d):
return _depth(d * alpha)
x = _inverted_res_block(x, 1, depth(16), 3, 2, se_ratio, relu, 0)
x = _inverted_res_block(x, 72.0 / 16, depth(24), 3, 2, None, relu, 1)
x = _inverted_res_block(x, 88.0 / 24, depth(24), 3, 1, None, relu, 2)
x = _inverted_res_block(
x, 4, depth(40), kernel, 2, se_ratio, activation, 3
)
x = _inverted_res_block(
x, 6, depth(40), kernel, 1, se_ratio, activation, 4
)
x = _inverted_res_block(
x, 6, depth(40), kernel, 1, se_ratio, activation, 5
)
x = _inverted_res_block(
x, 3, depth(48), kernel, 1, se_ratio, activation, 6
)
x = _inverted_res_block(
x, 3, depth(48), kernel, 1, se_ratio, activation, 7
)
x = _inverted_res_block(
x, 6, depth(96), kernel, 2, se_ratio, activation, 8
)
x = _inverted_res_block(
x, 6, depth(96), kernel, 1, se_ratio, activation, 9
)
x = _inverted_res_block(
x, 6, depth(96), kernel, 1, se_ratio, activation, 10
)
return x
return MobileNetV3(
stack_fn,
1024,
input_shape,
alpha,
"small",
minimalistic,
include_top,
weights,
input_tensor,
classes,
pooling,
dropout_rate,
classifier_activation,
include_preprocessing,
)
@keras_export("keras.applications.MobileNetV3Large")
def MobileNetV3Large(
input_shape=None,
alpha=1.0,
minimalistic=False,
include_top=True,
weights="imagenet",
input_tensor=None,
classes=1000,
pooling=None,
dropout_rate=0.2,
classifier_activation="softmax",
include_preprocessing=True,
):
def stack_fn(x, kernel, activation, se_ratio):
def depth(d):
return _depth(d * alpha)
x = _inverted_res_block(x, 1, depth(16), 3, 1, None, relu, 0)
x = _inverted_res_block(x, 4, depth(24), 3, 2, None, relu, 1)
x = _inverted_res_block(x, 3, depth(24), 3, 1, None, relu, 2)
x = _inverted_res_block(x, 3, depth(40), kernel, 2, se_ratio, relu, 3)
x = _inverted_res_block(x, 3, depth(40), kernel, 1, se_ratio, relu, 4)
x = _inverted_res_block(x, 3, depth(40), kernel, 1, se_ratio, relu, 5)
x = _inverted_res_block(x, 6, depth(80), 3, 2, None, activation, 6)
x = _inverted_res_block(x, 2.5, depth(80), 3, 1, None, activation, 7)
x = _inverted_res_block(x, 2.3, depth(80), 3, 1, None, activation, 8)
x = _inverted_res_block(x, 2.3, depth(80), 3, 1, None, activation, 9)
x = _inverted_res_block(
x, 6, depth(112), 3, 1, se_ratio, activation, 10
)
x = _inverted_res_block(
x, 6, depth(112), 3, 1, se_ratio, activation, 11
)
x = _inverted_res_block(
x, 6, depth(160), kernel, 2, se_ratio, activation, 12
)
x = _inverted_res_block(
x, 6, depth(160), kernel, 1, se_ratio, activation, 13
)
x = _inverted_res_block(
x, 6, depth(160), kernel, 1, se_ratio, activation, 14
)
return x
return MobileNetV3(
stack_fn,
1280,
input_shape,
alpha,
"large",
minimalistic,
include_top,
weights,
input_tensor,
classes,
pooling,
dropout_rate,
classifier_activation,
include_preprocessing,
)
MobileNetV3Small.__doc__ = BASE_DOCSTRING.format(name="MobileNetV3Small")
MobileNetV3Large.__doc__ = BASE_DOCSTRING.format(name="MobileNetV3Large")
def relu(x):
return layers.ReLU()(x)
def hard_sigmoid(x):
return layers.ReLU(6.0)(x + 3.0) * (1.0 / 6.0)
def hard_swish(x):
return layers.Activation("hard_swish")(x)
# This function is taken from the original tf repo.
# It ensures that all layers have a channel number that is divisible by 8
# It can be seen here:
# https://github.com/tensorflow/models/blob/master/research/
# slim/nets/mobilenet/mobilenet.py
def _depth(v, divisor=8, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def _se_block(inputs, filters, se_ratio, prefix):
x = layers.GlobalAveragePooling2D(
keepdims=True, name=prefix + "squeeze_excite_avg_pool"
)(inputs)
x = layers.Conv2D(
_depth(filters * se_ratio),
kernel_size=1,
padding="same",
name=prefix + "squeeze_excite_conv",
)(x)
x = layers.ReLU(name=prefix + "squeeze_excite_relu")(x)
x = layers.Conv2D(
filters,
kernel_size=1,
padding="same",
name=prefix + "squeeze_excite_conv_1",
)(x)
x = hard_sigmoid(x)
x = layers.Multiply(name=prefix + "squeeze_excite_mul")([inputs, x])
return x
def _inverted_res_block(
x, expansion, filters, kernel_size, stride, se_ratio, activation, block_id
):
channel_axis = 1 if backend.image_data_format() == "channels_first" else -1
shortcut = x
prefix = "expanded_conv_"
infilters = x.shape[channel_axis]
if block_id:
# Expand
prefix = f"expanded_conv_{block_id}_"
x = layers.Conv2D(
_depth(infilters * expansion),
kernel_size=1,
padding="same",
use_bias=False,
name=prefix + "expand",
)(x)
x = layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + "expand_bn",
)(x)
x = activation(x)
if stride == 2:
x = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(x, kernel_size),
name=prefix + "depthwise_pad",
)(x)
x = layers.DepthwiseConv2D(
kernel_size,
strides=stride,
padding="same" if stride == 1 else "valid",
use_bias=False,
name=prefix + "depthwise",
)(x)
x = layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + "depthwise_bn",
)(x)
x = activation(x)
if se_ratio:
x = _se_block(x, _depth(infilters * expansion), se_ratio, prefix)
x = layers.Conv2D(
filters,
kernel_size=1,
padding="same",
use_bias=False,
name=prefix + "project",
)(x)
x = layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + "project_bn",
)(x)
if stride == 1 and infilters == filters:
x = layers.Add(name=prefix + "add")([shortcut, x])
return x
@keras_export("keras.applications.mobilenet_v3.preprocess_input")
def preprocess_input(x, data_format=None):
"""A placeholder method for backward compatibility.
The preprocessing logic has been included in the mobilenet_v3 model
implementation. Users are no longer required to call this method to
normalize the input data. This method does nothing and only kept as a
placeholder to align the API surface between old and new version of model.
Args:
x: A floating point `numpy.array` or a tensor.
data_format: Optional data format of the image tensor/array.
`None` means the global setting
`keras.config.image_data_format()` is used
(unless you changed it, it uses `"channels_last"`).
Defaults to `None`.
Returns:
Unchanged `numpy.array` or tensor.
"""
return x
@keras_export("keras.applications.mobilenet_v3.decode_predictions")
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
|
keras/keras/applications/mobilenet_v3.py/0
|
{
"file_path": "keras/keras/applications/mobilenet_v3.py",
"repo_id": "keras",
"token_count": 11053
}
| 184 |
import tree
from keras.api_export import keras_export
from keras.utils.naming import auto_name
@keras_export("keras.KerasTensor")
class KerasTensor:
"""Symbolic tensor -- encapsulates a shape and a dtype.
You can use `KerasTensor` instances to build computation
graphs of Keras operations, such as `keras.Function`
objects or Functional `keras.models.Model` objects.
Example:
>>> x = keras.KerasTensor(shape=(3, 4), dtype="float32")
>>> x.shape
(3, 4)
>>> x.dtype
float32
Calling a Keras operation (including a layer or a model)
on a `KerasTensor` instance will return another `KerasTensor`
instance with the appropriate shape and dtype. This is
called a "symbolic call" (since there is no actual data
involved). The computation of the correct output shape and
dtype is called "static shape inference".
"""
def __init__(
self,
shape,
dtype="float32",
sparse=False,
record_history=True,
name=None,
):
from keras import backend
self.shape = backend.standardize_shape(shape)
self.dtype = backend.standardize_dtype(dtype)
self.sparse = sparse
self.name = name or auto_name(self.__class__.__name__)
self.record_history = record_history
@property
def ndim(self):
return len(self.shape)
def reshape(self, newshape):
from keras import ops
return ops.Reshape(newshape)(self)
def squeeze(self, axis=None):
from keras import ops
return ops.Squeeze(axis)(self)
def __array__(self):
raise ValueError(
"A KerasTensor is symbolic: it's a placeholder for a shape "
"an a dtype. It doesn't have any actual numerical value. "
"You cannot convert it to a NumPy array."
)
def __jax_array__(self):
raise ValueError(
"A KerasTensor cannot be used as input to a JAX function. "
"A KerasTensor is a symbolic placeholder for a shape and dtype, "
"used when constructing Keras Functional models "
"or Keras Functions. You can only use it as input to a Keras layer "
"or a Keras operation (from the namespaces `keras.layers` "
"and `keras.operations`). "
"You are likely doing something like:\n\n"
"```\n"
"x = Input(...)\n"
"...\n"
"jax_fn(x) # Invalid.\n"
"```\n\n"
"What you should do instead is wrap `jax_fn` in a layer:\n\n"
"```\n"
"class MyLayer(Layer):\n"
" def call(self, x):\n"
" return jax_fn(x)\n\n"
"x = MyLayer()(x)\n"
"```\n"
)
def __tf_tensor__(self, dtype=None, name=None):
raise ValueError(
"A KerasTensor cannot be used as input to a TensorFlow function. "
"A KerasTensor is a symbolic placeholder for a shape and dtype, "
"used when constructing Keras Functional models "
"or Keras Functions. You can only use it as input to a Keras layer "
"or a Keras operation (from the namespaces `keras.layers` "
"and `keras.operations`). "
"You are likely doing something like:\n\n"
"```\n"
"x = Input(...)\n"
"...\n"
"tf_fn(x) # Invalid.\n"
"```\n\n"
"What you should do instead is wrap `tf_fn` in a layer:\n\n"
"```\n"
"class MyLayer(Layer):\n"
" def call(self, x):\n"
" return tf_fn(x)\n\n"
"x = MyLayer()(x)\n"
"```\n"
)
def __repr__(self):
return (
f"<KerasTensor shape={self.shape}, dtype={self.dtype}, "
f"sparse={self.sparse}, name={self.name}>"
)
def __iter__(self):
raise NotImplementedError(
"Iterating over a symbolic KerasTensor is not supported."
)
def __bool__(self):
raise TypeError("A symbolic KerasTensor cannot be used as a boolean.")
def __add__(self, other):
from keras import ops
return ops.Add().symbolic_call(self, other)
def __radd__(self, other):
from keras import ops
return ops.Add().symbolic_call(other, self)
def __sub__(self, other):
from keras import ops
return ops.Subtract().symbolic_call(self, other)
def __rsub__(self, other):
from keras import ops
return ops.Subtract().symbolic_call(other, self)
def __mul__(self, other):
from keras import ops
return ops.Multiply().symbolic_call(self, other)
def __rmul__(self, other):
from keras import ops
return ops.Multiply().symbolic_call(other, self)
def __matmul__(self, other):
from keras import ops
return ops.Matmul().symbolic_call(self, other)
def __rmatmul__(self, other):
from keras import ops
return ops.Matmul().symbolic_call(other, self)
def __div__(self, other):
from keras import ops
return ops.Divide().symbolic_call(self, other)
def __rdiv__(self, other):
from keras import ops
return ops.Divide().symbolic_call(other, self)
def __truediv__(self, other):
from keras import ops
return ops.TrueDivide().symbolic_call(self, other)
def __rtruediv__(self, other):
from keras import ops
return ops.TrueDivide().symbolic_call(other, self)
def __neg__(self):
from keras import ops
return ops.Negative().symbolic_call(self)
def __abs__(self):
from keras import ops
return ops.Absolute().symbolic_call(self)
def __pow__(self, other):
from keras import ops
return ops.Power().symbolic_call(self, other)
def __rpow__(self, other):
from keras import ops
return ops.Power().symbolic_call(other, self)
def __floordiv__(self, other):
from keras import ops
return ops.FloorDivide().symbolic_call(self, other)
def __rfloordiv__(self, other):
from keras import ops
return ops.FloorDivide().symbolic_call(other, self)
def __mod__(self, other):
from keras import ops
return ops.Mod().symbolic_call(self, other)
def __rmod__(self, other):
from keras import ops
return ops.Mod().symbolic_call(other, self)
def __lt__(self, other):
from keras import ops
return ops.Less().symbolic_call(self, other)
def __le__(self, other):
from keras import ops
return ops.LessEqual().symbolic_call(self, other)
def __gt__(self, other):
from keras import ops
return ops.Greater().symbolic_call(self, other)
def __ge__(self, other):
from keras import ops
return ops.GreaterEqual().symbolic_call(self, other)
def __ne__(self, other):
from keras import ops
return ops.NotEqual().symbolic_call(self, other)
def __and__(self, other):
from keras import ops
return ops.LogicalAnd().symbolic_call(self, other)
def __rand__(self, other):
from keras import ops
return ops.LogicalAnd().symbolic_call(other, self)
def __or__(self, other):
from keras import ops
return ops.LogicalOr().symbolic_call(self, other)
def __ror__(self, other):
from keras import ops
return ops.LogicalOr().symbolic_call(other, self)
def __invert__(self):
from keras import ops
return ops.LogicalNot().symbolic_call(self)
def __xor__(self, other):
from keras import ops
return ops.LogicalXor().symbolic_call(self, other)
def __rxor__(self, other):
from keras import ops
return ops.LogicalXor().symbolic_call(other, self)
def __getitem__(self, key):
from keras import ops
return ops.GetItem().symbolic_call(self, key)
def any_symbolic_tensors(args=None, kwargs=None):
args = args or ()
kwargs = kwargs or {}
for x in tree.flatten((args, kwargs)):
if isinstance(x, KerasTensor):
return True
return False
@keras_export(["keras.utils.is_keras_tensor", "keras.backend.is_keras_tensor"])
def is_keras_tensor(x):
"""Returns whether `x` is a Keras tensor.
A "Keras tensor" is a *symbolic tensor*, such as a tensor
that was created via `Input()`. A "symbolic tensor"
can be understood as a placeholder -- it does not
contain any actual numerical data, only a shape and dtype.
It can be used for building Functional models, but it
cannot be used in actual computations.
"""
return isinstance(x, KerasTensor)
|
keras/keras/backend/common/keras_tensor.py/0
|
{
"file_path": "keras/keras/backend/common/keras_tensor.py",
"repo_id": "keras",
"token_count": 3913
}
| 185 |
import jax
import jax.numpy as jnp
import jax.scipy as jsp
from keras.backend import config
from keras.backend import standardize_dtype
from keras.backend.common import dtypes
from keras.backend.jax.core import cast
from keras.backend.jax.core import convert_to_tensor
def cholesky(a):
out = jnp.linalg.cholesky(a)
if jnp.any(jnp.isnan(out)):
raise ValueError(
"Cholesky decomposition failed. "
"The input might not be a valid positive definite matrix."
)
return out
def det(a):
return jnp.linalg.det(a)
def eig(x):
return jnp.linalg.eig(x)
def inv(a):
return jnp.linalg.inv(a)
def lu_factor(x):
lu_factor_fn = jsp.linalg.lu_factor
if x.ndim > 2:
for i in range(x.ndim - 2):
lu_factor_fn = jax.vmap(lu_factor_fn)
return lu_factor_fn(x)
def norm(x, ord=None, axis=None, keepdims=False):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return jnp.linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims)
def qr(x, mode="reduced"):
if mode not in {"reduced", "complete"}:
raise ValueError(
"`mode` argument value not supported. "
"Expected one of {'reduced', 'complete'}. "
f"Received: mode={mode}"
)
return jnp.linalg.qr(x, mode=mode)
def solve(a, b):
return jnp.linalg.solve(a, b)
def solve_triangular(a, b, lower=False):
return jsp.linalg.solve_triangular(a, b, lower=lower)
def svd(x, full_matrices=True, compute_uv=True):
return jnp.linalg.svd(x, full_matrices=full_matrices, compute_uv=compute_uv)
|
keras/keras/backend/jax/linalg.py/0
|
{
"file_path": "keras/keras/backend/jax/linalg.py",
"repo_id": "keras",
"token_count": 809
}
| 186 |
import numpy as np
import tree
from keras.backend import config
from keras.backend import standardize_dtype
from keras.backend.common import dtypes
from keras.backend.numpy.core import convert_to_tensor
def add(x1, x2):
if not isinstance(x1, (int, float)):
x1 = convert_to_tensor(x1)
if not isinstance(x2, (int, float)):
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)),
getattr(x2, "dtype", type(x2)),
)
x1 = convert_to_tensor(x1, dtype)
x2 = convert_to_tensor(x2, dtype)
return np.add(x1, x2)
def einsum(subscripts, *operands, **kwargs):
operands = tree.map_structure(convert_to_tensor, operands)
dtypes_to_resolve = []
for x in operands:
dtypes_to_resolve.append(getattr(x, "dtype", type(x)))
result_dtype = dtypes.result_type(*dtypes_to_resolve)
compute_dtype = result_dtype
# TODO: np.einsum doesn't support bfloat16
if compute_dtype == "bfloat16":
compute_dtype = "float32"
operands = tree.map_structure(lambda x: x.astype(compute_dtype), operands)
return np.einsum(subscripts, *operands, **kwargs).astype(result_dtype)
def subtract(x1, x2):
if not isinstance(x1, (int, float)):
x1 = convert_to_tensor(x1)
if not isinstance(x2, (int, float)):
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)),
getattr(x2, "dtype", type(x2)),
)
x1 = convert_to_tensor(x1, dtype)
x2 = convert_to_tensor(x2, dtype)
return np.subtract(x1, x2)
def matmul(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
# When both x1 and x2 are of int8, we cast the outputs to int32 to align
# with jax
x1_dtype = standardize_dtype(x1.dtype)
x2_dtype = standardize_dtype(x2.dtype)
if x1_dtype == "int8" and x2_dtype == "int8":
dtype = "int32"
else:
dtype = dtypes.result_type(x1.dtype, x2.dtype)
return np.matmul(x1, x2).astype(dtype)
def multiply(x1, x2):
if not isinstance(x1, (int, float)):
x1 = convert_to_tensor(x1)
if not isinstance(x2, (int, float)):
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)),
getattr(x2, "dtype", type(x2)),
)
x1 = convert_to_tensor(x1, dtype)
x2 = convert_to_tensor(x2, dtype)
return np.multiply(x1, x2)
def mean(x, axis=None, keepdims=False):
axis = tuple(axis) if isinstance(axis, list) else axis
x = convert_to_tensor(x)
ori_dtype = standardize_dtype(x.dtype)
if "int" in ori_dtype or ori_dtype == "bool":
result_dtype = dtypes.result_type(x.dtype, "float32")
else:
result_dtype = ori_dtype
return np.mean(x, axis=axis, keepdims=keepdims).astype(result_dtype)
def max(x, axis=None, keepdims=False, initial=None):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.max(x, axis=axis, keepdims=keepdims, initial=initial)
def ones(shape, dtype=None):
dtype = dtype or config.floatx()
return np.ones(shape, dtype=dtype)
def zeros(shape, dtype=None):
dtype = dtype or config.floatx()
return np.zeros(shape, dtype=dtype)
def absolute(x):
return np.absolute(x)
def abs(x):
return absolute(x)
def all(x, axis=None, keepdims=False):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.all(x, axis=axis, keepdims=keepdims)
def any(x, axis=None, keepdims=False):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.any(x, axis=axis, keepdims=keepdims)
def amax(x, axis=None, keepdims=False):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.amax(x, axis=axis, keepdims=keepdims)
def amin(x, axis=None, keepdims=False):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.amin(x, axis=axis, keepdims=keepdims)
def append(x1, x2, axis=None):
axis = tuple(axis) if isinstance(axis, list) else axis
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype)
x1 = x1.astype(dtype)
x2 = x2.astype(dtype)
return np.append(x1, x2, axis=axis)
def arange(start, stop=None, step=None, dtype=None):
if dtype is None:
dtypes_to_resolve = [
getattr(start, "dtype", type(start)),
getattr(step, "dtype", type(step)),
]
if stop is not None:
dtypes_to_resolve.append(getattr(stop, "dtype", type(stop)))
dtype = dtypes.result_type(*dtypes_to_resolve)
return np.arange(start, stop, step=step, dtype=dtype)
def arccos(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = x.astype(dtype)
return np.arccos(x)
def arccosh(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = x.astype(dtype)
return np.arccosh(x)
def arcsin(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = x.astype(dtype)
return np.arcsin(x)
def arcsinh(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = x.astype(dtype)
return np.arcsinh(x)
def arctan(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = x.astype(dtype)
return np.arctan(x)
def arctan2(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype, float)
x1 = x1.astype(dtype)
x2 = x2.astype(dtype)
return np.arctan2(x1, x2)
def arctanh(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = x.astype(dtype)
return np.arctanh(x)
def argmax(x, axis=None):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.argmax(x, axis=axis).astype("int32")
def argmin(x, axis=None):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.argmin(x, axis=axis).astype("int32")
def argsort(x, axis=-1):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.argsort(x, axis=axis).astype("int32")
def array(x, dtype=None):
return convert_to_tensor(x, dtype=dtype)
def average(x, axis=None, weights=None):
axis = tuple(axis) if isinstance(axis, list) else axis
x = convert_to_tensor(x)
dtypes_to_resolve = [x.dtype, float]
if weights is not None:
weights = convert_to_tensor(weights)
dtypes_to_resolve.append(weights.dtype)
dtype = dtypes.result_type(*dtypes_to_resolve)
x = x.astype(dtype)
if weights is not None:
weights = weights.astype(dtype)
return np.average(x, weights=weights, axis=axis)
def bincount(x, weights=None, minlength=0):
x = convert_to_tensor(x)
dtypes_to_resolve = [x.dtype]
if weights is not None:
weights = convert_to_tensor(weights)
dtypes_to_resolve.append(weights.dtype)
dtype = dtypes.result_type(*dtypes_to_resolve)
else:
dtype = "int32"
if len(x.shape) == 2:
if weights is None:
def bincount_fn(arr):
return np.bincount(arr, minlength=minlength)
bincounts = list(map(bincount_fn, x))
else:
def bincount_fn(arr_w):
return np.bincount(
arr_w[0], weights=arr_w[1], minlength=minlength
)
bincounts = list(map(bincount_fn, zip(x, weights)))
return np.stack(bincounts).astype(dtype)
return np.bincount(x, weights, minlength).astype(dtype)
def broadcast_to(x, shape):
return np.broadcast_to(x, shape)
def ceil(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = x.astype(dtype)
return np.ceil(x)
def clip(x, x_min, x_max):
x = convert_to_tensor(x)
dtype = standardize_dtype(x.dtype)
if dtype == "bool":
dtype = "int32"
return np.clip(x, x_min, x_max).astype(dtype)
def concatenate(xs, axis=0):
axis = tuple(axis) if isinstance(axis, list) else axis
dtype_set = set([getattr(x, "dtype", type(x)) for x in xs])
if len(dtype_set) > 1:
dtype = dtypes.result_type(*dtype_set)
xs = tree.map_structure(
lambda x: convert_to_tensor(x).astype(dtype), xs
)
return np.concatenate(xs, axis=axis)
def conjugate(x):
return np.conjugate(x)
def conj(x):
return conjugate(x)
def copy(x):
return np.copy(x)
def cos(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = x.astype(dtype)
return np.cos(x)
def cosh(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = x.astype(dtype)
return np.cosh(x)
def count_nonzero(x, axis=None):
axis = tuple(axis) if isinstance(axis, list) else axis
# np.count_nonzero will return python int when axis=None, so we need
# to convert_to_tensor
return convert_to_tensor(np.count_nonzero(x, axis=axis)).astype("int32")
def cross(x1, x2, axisa=-1, axisb=-1, axisc=-1, axis=None):
axis = tuple(axis) if isinstance(axis, list) else axis
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype)
x1 = x1.astype(dtype)
x2 = x2.astype(dtype)
return np.cross(
x1,
x2,
axisa=axisa,
axisb=axisb,
axisc=axisc,
axis=axis,
)
def cumprod(x, axis=None, dtype=None):
axis = tuple(axis) if isinstance(axis, list) else axis
dtype = dtypes.result_type(dtype or x.dtype)
if dtype == "bool":
dtype = "int32"
return np.cumprod(x, axis=axis, dtype=dtype)
def cumsum(x, axis=None, dtype=None):
axis = tuple(axis) if isinstance(axis, list) else axis
dtype = dtypes.result_type(dtype or x.dtype)
if dtype == "bool":
dtype = "int32"
return np.cumsum(x, axis=axis, dtype=dtype)
def diag(x, k=0):
return np.diag(x, k=k)
def diagonal(x, offset=0, axis1=0, axis2=1):
axis1 = tuple(axis1) if isinstance(axis1, list) else axis1
axis2 = tuple(axis2) if isinstance(axis2, list) else axis2
return np.diagonal(
x,
offset=offset,
axis1=axis1,
axis2=axis2,
)
def diff(a, n=1, axis=-1):
return np.diff(a, n=n, axis=axis)
def digitize(x, bins):
return np.digitize(x, bins).astype(np.int32)
def dot(x, y):
x = convert_to_tensor(x)
y = convert_to_tensor(y)
dtype = dtypes.result_type(x.dtype, y.dtype)
x = x.astype(dtype)
y = y.astype(dtype)
return np.dot(x, y)
def empty(shape, dtype=None):
dtype = dtype or config.floatx()
return np.empty(shape, dtype=dtype)
def equal(x1, x2):
return np.equal(x1, x2)
def exp(x):
x = convert_to_tensor(x)
ori_dtype = standardize_dtype(x.dtype)
if "int" in ori_dtype or ori_dtype == "bool":
x = x.astype(config.floatx())
return np.exp(x)
def expand_dims(x, axis):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.expand_dims(x, axis)
def expm1(x):
x = convert_to_tensor(x)
ori_dtype = standardize_dtype(x.dtype)
if "int" in ori_dtype or ori_dtype == "bool":
x = x.astype(config.floatx())
return np.expm1(x)
def flip(x, axis=None):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.flip(x, axis=axis)
def floor(x):
x = convert_to_tensor(x)
dtype = (
config.floatx()
if standardize_dtype(x.dtype) == "int64"
else dtypes.result_type(x.dtype, float)
)
x = x.astype(dtype)
return np.floor(x)
def full(shape, fill_value, dtype=None):
dtype = dtype or config.floatx()
return np.full(shape, fill_value, dtype=dtype)
def full_like(x, fill_value, dtype=None):
return np.full_like(x, fill_value, dtype=dtype)
def greater(x1, x2):
return np.greater(x1, x2)
def greater_equal(x1, x2):
return np.greater_equal(x1, x2)
def hstack(xs):
dtype_set = set([getattr(x, "dtype", type(x)) for x in xs])
if len(dtype_set) > 1:
dtype = dtypes.result_type(*dtype_set)
xs = tree.map_structure(
lambda x: convert_to_tensor(x).astype(dtype), xs
)
return np.hstack(xs)
def identity(n, dtype=None):
dtype = dtype or config.floatx()
return np.identity(n, dtype=dtype)
def imag(x):
return np.imag(x)
def isclose(x1, x2):
return np.isclose(x1, x2)
def isfinite(x):
return np.isfinite(x)
def isinf(x):
return np.isinf(x)
def isnan(x):
return np.isnan(x)
def less(x1, x2):
return np.less(x1, x2)
def less_equal(x1, x2):
return np.less_equal(x1, x2)
def linspace(
start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0
):
axis = tuple(axis) if isinstance(axis, list) else axis
if dtype is None:
dtypes_to_resolve = [
getattr(start, "dtype", type(start)),
getattr(stop, "dtype", type(stop)),
float,
]
dtype = dtypes.result_type(*dtypes_to_resolve)
return np.linspace(
start,
stop,
num=num,
endpoint=endpoint,
retstep=retstep,
dtype=dtype,
axis=axis,
)
def log(x):
x = convert_to_tensor(x)
dtype = (
config.floatx()
if standardize_dtype(x.dtype) == "int64"
else dtypes.result_type(x.dtype, float)
)
return np.log(x, dtype=dtype)
def log10(x):
x = convert_to_tensor(x)
dtype = (
config.floatx()
if standardize_dtype(x.dtype) == "int64"
else dtypes.result_type(x.dtype, float)
)
return np.log10(x, dtype=dtype)
def log1p(x):
x = convert_to_tensor(x)
dtype = (
config.floatx()
if standardize_dtype(x.dtype) == "int64"
else dtypes.result_type(x.dtype, float)
)
return np.log1p(x, dtype=dtype)
def log2(x):
x = convert_to_tensor(x)
dtype = (
config.floatx()
if standardize_dtype(x.dtype) == "int64"
else dtypes.result_type(x.dtype, float)
)
return np.log2(x, dtype=dtype)
def logaddexp(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype, float)
x1 = x1.astype(dtype)
x2 = x2.astype(dtype)
return np.logaddexp(x1, x2)
def logical_and(x1, x2):
return np.logical_and(x1, x2)
def logical_not(x):
return np.logical_not(x)
def logical_or(x1, x2):
return np.logical_or(x1, x2)
def logspace(start, stop, num=50, endpoint=True, base=10, dtype=None, axis=0):
if dtype is None:
dtypes_to_resolve = [
getattr(start, "dtype", type(start)),
getattr(stop, "dtype", type(stop)),
float,
]
dtype = dtypes.result_type(*dtypes_to_resolve)
return np.logspace(
start,
stop,
num=num,
endpoint=endpoint,
base=base,
dtype=dtype,
axis=axis,
)
def maximum(x1, x2):
if not isinstance(x1, (int, float)):
x1 = convert_to_tensor(x1)
if not isinstance(x2, (int, float)):
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)),
getattr(x2, "dtype", type(x2)),
)
x1 = convert_to_tensor(x1, dtype)
x2 = convert_to_tensor(x2, dtype)
return np.maximum(x1, x2)
def median(x, axis=None, keepdims=False):
dtype = dtypes.result_type(x.dtype, float)
return np.median(x, axis=axis, keepdims=keepdims).astype(dtype)
def meshgrid(*x, indexing="xy"):
return np.meshgrid(*x, indexing=indexing)
def min(x, axis=None, keepdims=False, initial=None):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.min(x, axis=axis, keepdims=keepdims, initial=initial)
def minimum(x1, x2):
if not isinstance(x1, (int, float)):
x1 = convert_to_tensor(x1)
if not isinstance(x2, (int, float)):
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)),
getattr(x2, "dtype", type(x2)),
)
x1 = convert_to_tensor(x1, dtype)
x2 = convert_to_tensor(x2, dtype)
return np.minimum(x1, x2)
def mod(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype)
if dtype == "bool":
dtype = "int32"
x1 = x1.astype(dtype)
x2 = x2.astype(dtype)
return np.mod(x1, x2)
def moveaxis(x, source, destination):
return np.moveaxis(x, source=source, destination=destination)
def nan_to_num(x):
return np.nan_to_num(x)
def ndim(x):
return np.ndim(x)
def nonzero(x):
return tuple(indices.astype("int32") for indices in np.nonzero(x))
def not_equal(x1, x2):
return np.not_equal(x1, x2)
def zeros_like(x, dtype=None):
return np.zeros_like(x, dtype=dtype)
def ones_like(x, dtype=None):
return np.ones_like(x, dtype=dtype)
def outer(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype)
x1 = x1.astype(dtype)
x2 = x2.astype(dtype)
return np.outer(x1, x2)
def pad(x, pad_width, mode="constant", constant_values=None):
kwargs = {}
if constant_values is not None:
if mode != "constant":
raise ValueError(
"Argument `constant_values` can only be "
"provided when `mode == 'constant'`. "
f"Received: mode={mode}"
)
kwargs["constant_values"] = constant_values
return np.pad(x, pad_width, mode=mode, **kwargs)
def prod(x, axis=None, keepdims=False, dtype=None):
axis = tuple(axis) if isinstance(axis, list) else axis
x = convert_to_tensor(x)
if dtype is None:
dtype = dtypes.result_type(x.dtype)
if dtype in ("bool", "int8", "int16"):
dtype = "int32"
elif dtype in ("uint8", "uint16"):
dtype = "uint32"
return np.prod(x, axis=axis, keepdims=keepdims, dtype=dtype)
def quantile(x, q, axis=None, method="linear", keepdims=False):
axis = tuple(axis) if isinstance(axis, list) else axis
x = convert_to_tensor(x)
ori_dtype = standardize_dtype(x.dtype)
# np.quantile doesn't support bool
if ori_dtype == "bool":
x = x.astype(config.floatx())
if ori_dtype == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
return np.quantile(
x, q, axis=axis, method=method, keepdims=keepdims
).astype(dtype)
def ravel(x):
return np.ravel(x)
def real(x):
return np.real(x)
def reciprocal(x):
return np.reciprocal(x)
def repeat(x, repeats, axis=None):
return np.repeat(x, repeats, axis=axis)
def reshape(x, newshape):
return np.reshape(x, newshape)
def roll(x, shift, axis=None):
return np.roll(x, shift, axis=axis)
def sign(x):
return np.sign(x)
def sin(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = x.astype(dtype)
return np.sin(x)
def sinh(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = x.astype(dtype)
return np.sinh(x)
def size(x):
return np.size(x)
def sort(x, axis=-1):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.sort(x, axis=axis)
def split(x, indices_or_sections, axis=0):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.split(x, indices_or_sections, axis=axis)
def stack(x, axis=0):
axis = tuple(axis) if isinstance(axis, list) else axis
dtype_set = set([getattr(a, "dtype", type(a)) for a in x])
if len(dtype_set) > 1:
dtype = dtypes.result_type(*dtype_set)
x = tree.map_structure(lambda a: convert_to_tensor(a).astype(dtype), x)
return np.stack(x, axis=axis)
def std(x, axis=None, keepdims=False):
axis = tuple(axis) if isinstance(axis, list) else axis
x = convert_to_tensor(x)
ori_dtype = standardize_dtype(x.dtype)
if "int" in ori_dtype or ori_dtype == "bool":
x = x.astype(config.floatx())
return np.std(x, axis=axis, keepdims=keepdims)
def swapaxes(x, axis1, axis2):
return np.swapaxes(x, axis1=axis1, axis2=axis2)
def take(x, indices, axis=None):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.take(x, indices, axis=axis)
def take_along_axis(x, indices, axis=None):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.take_along_axis(x, indices, axis=axis)
def tan(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = x.astype(dtype)
return np.tan(x)
def tanh(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = x.astype(dtype)
return np.tanh(x)
def tensordot(x1, x2, axes=2):
axes = tuple(axes) if isinstance(axes, list) else axes
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype)
x1 = x1.astype(dtype)
x2 = x2.astype(dtype)
return np.tensordot(x1, x2, axes=axes)
def round(x, decimals=0):
return np.round(x, decimals=decimals)
def tile(x, repeats):
return np.tile(x, repeats)
def trace(x, offset=0, axis1=0, axis2=1):
axis1 = tuple(axis1) if isinstance(axis1, list) else axis1
axis2 = tuple(axis2) if isinstance(axis2, list) else axis2
x = convert_to_tensor(x)
dtype = standardize_dtype(x.dtype)
if dtype not in ("int64", "uint32", "uint64"):
dtype = dtypes.result_type(dtype, "int32")
return np.trace(x, offset=offset, axis1=axis1, axis2=axis2, dtype=dtype)
def tri(N, M=None, k=0, dtype=None):
dtype = dtype or config.floatx()
return np.tri(N, M=M, k=k, dtype=dtype)
def tril(x, k=0):
return np.tril(x, k=k)
def triu(x, k=0):
return np.triu(x, k=k)
def vdot(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype)
x1 = x1.astype(dtype)
x2 = x2.astype(dtype)
return np.vdot(x1, x2)
def vstack(xs):
dtype_set = set([getattr(x, "dtype", type(x)) for x in xs])
if len(dtype_set) > 1:
dtype = dtypes.result_type(*dtype_set)
xs = tree.map_structure(
lambda x: convert_to_tensor(x).astype(dtype), xs
)
return np.vstack(xs)
def where(condition, x1, x2):
if x1 is not None and x2 is not None:
if not isinstance(x1, (int, float)):
x1 = convert_to_tensor(x1)
if not isinstance(x2, (int, float)):
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)),
getattr(x2, "dtype", type(x2)),
)
x1 = convert_to_tensor(x1, dtype)
x2 = convert_to_tensor(x2, dtype)
return np.where(condition, x1, x2)
else:
return np.where(condition)
def divide(x1, x2):
if not isinstance(x1, (int, float)):
x1 = convert_to_tensor(x1)
if not isinstance(x2, (int, float)):
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)),
getattr(x2, "dtype", type(x2)),
float,
)
x1 = convert_to_tensor(x1, dtype)
x2 = convert_to_tensor(x2, dtype)
return np.divide(x1, x2)
def divide_no_nan(x1, x2):
if not isinstance(x1, (int, float)):
x1 = convert_to_tensor(x1)
if not isinstance(x2, (int, float)):
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)),
getattr(x2, "dtype", type(x2)),
float,
)
x1 = convert_to_tensor(x1, dtype)
x2 = convert_to_tensor(x2, dtype)
return np.where(x2 == 0, 0, np.divide(x1, x2))
def true_divide(x1, x2):
return divide(x1, x2)
def power(x1, x2):
if not isinstance(x1, (int, float)):
x1 = convert_to_tensor(x1)
if not isinstance(x2, (int, float)):
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)),
getattr(x2, "dtype", type(x2)),
)
x1 = convert_to_tensor(x1, dtype)
x2 = convert_to_tensor(x2, dtype)
return np.power(x1, x2)
def negative(x):
return np.negative(x)
def square(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "bool":
x = x.astype("int32")
return np.square(x)
def sqrt(x):
x = convert_to_tensor(x)
# upcast to float64 for int64 which matches JAX's behavior
dtype = (
config.floatx()
if standardize_dtype(x.dtype) == "int64"
else dtypes.result_type(x.dtype, float)
)
return np.sqrt(x, dtype=dtype)
def squeeze(x, axis=None):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.squeeze(x, axis=axis)
def transpose(x, axes=None):
axes = tuple(axes) if isinstance(axes, list) else axes
return np.transpose(x, axes=axes)
def var(x, axis=None, keepdims=False):
axis = tuple(axis) if isinstance(axis, list) else axis
x = convert_to_tensor(x)
compute_dtype = dtypes.result_type(x.dtype, "float32")
result_dtype = dtypes.result_type(x.dtype, float)
return np.var(x, axis=axis, keepdims=keepdims, dtype=compute_dtype).astype(
result_dtype
)
def sum(x, axis=None, keepdims=False):
axis = tuple(axis) if isinstance(axis, list) else axis
dtype = standardize_dtype(x.dtype)
# follow jax's rule
if dtype in ("bool", "int8", "int16"):
dtype = "int32"
elif dtype in ("uint8", "uint16"):
dtype = "uint32"
return np.sum(x, axis=axis, keepdims=keepdims).astype(dtype)
def eye(N, M=None, k=0, dtype=None):
dtype = dtype or config.floatx()
return np.eye(N, M=M, k=k, dtype=dtype)
def floor_divide(x1, x2):
if not isinstance(x1, (int, float)):
x1 = convert_to_tensor(x1)
if not isinstance(x2, (int, float)):
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)), getattr(x2, "dtype", type(x2))
)
x1 = convert_to_tensor(x1, dtype)
x2 = convert_to_tensor(x2, dtype)
return np.floor_divide(x1, x2)
def logical_xor(x1, x2):
return np.logical_xor(x1, x2)
|
keras/keras/backend/numpy/numpy.py/0
|
{
"file_path": "keras/keras/backend/numpy/numpy.py",
"repo_id": "keras",
"token_count": 13196
}
| 187 |
# flake8: noqa
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.python.eager import context
from keras import backend
from keras import testing
from keras.optimizers.sgd import SGD
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="The distribute test can only run with TF backend.",
)
class OptimizerDistributeTest(testing.TestCase):
def setUp(self):
super().setUp()
# Need at least 2 devices for distribution related tests.
cpus = tf.config.list_physical_devices("CPU")
context._reset_context()
tf.config.set_logical_device_configuration(
cpus[0],
[
tf.config.LogicalDeviceConfiguration(),
tf.config.LogicalDeviceConfiguration(),
],
)
self.strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
def test_config(self):
with self.strategy.scope():
optimizer = SGD(
learning_rate=0.5,
momentum=0.06,
nesterov=True,
weight_decay=0.004,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
with self.strategy.scope():
optimizer = SGD(
learning_rate=0.5,
momentum=0.06,
)
grads = tf.constant([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
self.strategy.run(
lambda: optimizer.apply_gradients(zip([grads], [vars]))
)
self.assertAllClose(
vars, [0.5, -1.0, -0.5, 3.0], rtol=1e-4, atol=1e-4
)
def test_weight_decay(self):
with self.strategy.scope():
grads, var1, var2, var3 = (
tf.zeros(()),
backend.Variable(2.0),
backend.Variable(3.0, name="exclude"),
backend.Variable(4.0),
)
optimizer_1 = SGD(learning_rate=1.0, weight_decay=0.004)
self.strategy.run(
lambda: optimizer_1.apply_gradients(zip([grads], [var1]))
)
optimizer_2 = SGD(learning_rate=1.0, weight_decay=0.004)
def opt2_run():
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
self.strategy.run(opt2_run)
optimizer_3 = SGD(learning_rate=1.0, weight_decay=0.004)
def opt3_run():
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.strategy.run(opt3_run)
self.assertAlmostEqual(var1.numpy(), 1.9760959)
self.assertAlmostEqual(var2.numpy(), 3.0)
self.assertAlmostEqual(var3.numpy(), 4.0)
def test_correctness_with_golden(self):
with self.strategy.scope():
optimizer = SGD(nesterov=True)
x = backend.Variable(np.ones([10]))
grads = np.arange(0.1, 1.1, 0.1)
first_grads = np.full((10,), 0.01)
# fmt: off
golden = np.array(
[[0.9999, 0.9999, 0.9999, 0.9999, 0.9999, 0.9999, 0.9999, 0.9999,
0.9999, 0.9999], [0.9989, 0.9979, 0.9969, 0.9959, 0.9949, 0.9939,
0.9929, 0.9919, 0.9909, 0.9899], [0.9979, 0.9959, 0.9939, 0.9919,
0.9899, 0.9879, 0.9859, 0.9839, 0.9819, 0.9799], [0.9969, 0.9939,
0.9909, 0.9879, 0.9849, 0.9819, 0.9789, 0.9759, 0.9729, 0.9699],
[0.9959, 0.9919, 0.9879, 0.9839, 0.9799, 0.9759, 0.9719, 0.9679,
0.9639, 0.9599]]
)
# fmt: on
self.strategy.run(
lambda: optimizer.apply_gradients(zip([first_grads], [x]))
)
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
self.strategy.run(
lambda: optimizer.apply_gradients(zip([grads], [x]))
)
def test_clip_norm(self):
with self.strategy.scope():
optimizer = SGD(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
with self.strategy.scope():
optimizer = SGD(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
def test_stateless_not_supported(self):
optimizer = SGD(learning_rate=0.5)
grads = [np.array([1.0, 6.0, 7.0, 2.0])]
vars = [backend.Variable([1.0, 2.0, 3.0, 4.0])]
optimizer.build(vars)
with self.assertRaisesRegex(ValueError, "not supported"):
optimizer.stateless_apply(optimizer.variables, grads, vars)
def test_ema(self):
with self.strategy.scope():
v = backend.Variable([[3.0, 4.0], [5.0, 6.0]])
grads = backend.convert_to_tensor([[1.0, 1.0], [1.0, 1.0]])
optimizer = SGD(
learning_rate=1.0,
use_ema=True,
ema_momentum=0.9,
ema_overwrite_frequency=3,
)
self.strategy.run(lambda: optimizer.apply_gradients([(grads, v)]))
self.assertAllClose(v, [[2.0, 3.0], [4.0, 5.0]])
self.assertAllClose(
optimizer._model_variables_moving_average[0],
[[2.0, 3.0], [4.0, 5.0]], # initialized after first step
)
self.strategy.run(lambda: optimizer.apply_gradients([(grads, v)]))
self.assertAllClose(v, [[1.0, 2.0], [3.0, 4.0]])
self.assertAllClose(
optimizer._model_variables_moving_average[0],
[[1.9, 2.9], [3.9, 4.9]],
)
self.strategy.run(lambda: optimizer.apply_gradients([(grads, v)]))
# Variables were overwritten with EMA
self.assertAllClose(v, [[1.71, 2.71], [3.71, 4.71]])
self.assertAllClose(
optimizer._model_variables_moving_average[0],
[[1.71, 2.71], [3.71, 4.71]],
)
def test_gradient_accumulation(self):
with self.strategy.scope():
v = backend.Variable([[1.0, 2.0], [3.0, 4.0]])
grads = backend.convert_to_tensor([[1.0, 1.0], [2.0, 2.0]])
optimizer = SGD(learning_rate=1.0, gradient_accumulation_steps=3)
self.assertEqual(optimizer.gradient_accumulation_steps, 3)
self.strategy.run(lambda: optimizer.apply_gradients([(grads, v)]))
self.assertAllClose(v, [[1.0, 2.0], [3.0, 4.0]])
self.assertAllClose(
optimizer._accumulated_gradients[0], [[1.0, 1.0], [2.0, 2.0]]
)
self.assertAllClose(optimizer.iterations, 1)
self.strategy.run(lambda: optimizer.apply_gradients([(grads, v)]))
self.assertAllClose(v, [[1.0, 2.0], [3.0, 4.0]])
self.assertAllClose(
optimizer._accumulated_gradients[0], [[2.0, 2.0], [4.0, 4.0]]
)
self.assertAllClose(optimizer.iterations, 2)
self.strategy.run(lambda: optimizer.apply_gradients([(grads, v)]))
self.assertAllClose(v, [[0.0, 1.0], [1.0, 2.0]])
self.assertAllClose(
optimizer._accumulated_gradients[0], [[0.0, 0.0], [0.0, 0.0]]
)
self.assertAllClose(optimizer.iterations, 3)
|
keras/keras/backend/tensorflow/optimizer_distribute_test.py/0
|
{
"file_path": "keras/keras/backend/tensorflow/optimizer_distribute_test.py",
"repo_id": "keras",
"token_count": 4188
}
| 188 |
import torch
import torch.nn.functional as tnn
import tree
from keras.backend import standardize_data_format
from keras.backend import standardize_dtype
from keras.backend.common.backend_utils import (
compute_conv_transpose_padding_args_for_torch,
)
from keras.backend.config import epsilon
from keras.backend.torch.core import cast
from keras.backend.torch.core import convert_to_tensor
from keras.backend.torch.core import get_device
from keras.backend.torch.numpy import expand_dims
from keras.backend.torch.numpy import maximum
from keras.backend.torch.numpy import where
from keras.utils.argument_validation import standardize_tuple
def relu(x):
x = convert_to_tensor(x)
return tnn.relu(x)
def relu6(x):
x = convert_to_tensor(x)
return tnn.relu6(x)
def sigmoid(x):
x = convert_to_tensor(x)
return tnn.sigmoid(x)
def tanh(x):
x = convert_to_tensor(x)
return tnn.tanh(x)
def softplus(x):
x = convert_to_tensor(x)
return tnn.softplus(x)
def softsign(x):
x = convert_to_tensor(x)
return tnn.softsign(x)
def silu(x, beta=1.0):
x = convert_to_tensor(x)
return x * sigmoid(beta * x)
def log_sigmoid(x):
x = convert_to_tensor(x)
return tnn.logsigmoid(x)
def leaky_relu(x, negative_slope=0.2):
x = convert_to_tensor(x)
return tnn.leaky_relu(x, negative_slope=negative_slope)
def hard_sigmoid(x):
x = convert_to_tensor(x)
return tnn.hardsigmoid(x)
def hard_silu(x):
x = convert_to_tensor(x)
return tnn.hardswish(x)
def elu(x, alpha=1.0):
x = convert_to_tensor(x)
return tnn.elu(x, alpha)
def selu(x):
x = convert_to_tensor(x)
return tnn.selu(x)
def gelu(x, approximate=True):
# TODO: torch.nn.gelu expects string approximate of `"none"` or `"tanh"`
x = convert_to_tensor(x)
if approximate:
return tnn.gelu(x, approximate="tanh")
return tnn.gelu(x)
def softmax(x, axis=-1):
x = convert_to_tensor(x)
dtype = standardize_dtype(x.dtype)
# TODO: tnn.softmax doesn't support float16 using cpu
if get_device() == "cpu" and standardize_dtype(x.dtype) == "float16":
x = cast(x, "float32")
if axis is None:
# Unlike numpy, PyTorch will handle axis=None as axis=-1.
# We need this workaround for the reduction on every dim.
output = torch.reshape(x, [-1])
output = tnn.softmax(output, dim=-1)
output = torch.reshape(output, x.shape)
else:
output = tnn.softmax(x, dim=axis)
return cast(output, dtype)
def log_softmax(x, axis=-1):
x = convert_to_tensor(x)
dtype = standardize_dtype(x.dtype)
# TODO: tnn.log_softmax doesn't support float16 using cpu
if get_device() == "cpu" and standardize_dtype(x.dtype) == "float16":
x = cast(x, "float32")
if axis is None:
# Unlike numpy, PyTorch will handle axis=None as axis=-1.
# We need this workaround for the reduction on every dim.
output = torch.reshape(x, [-1])
output = tnn.log_softmax(output, dim=-1)
output = torch.reshape(output, x.shape)
else:
output = tnn.log_softmax(x, dim=axis)
return cast(output, dtype)
def _compute_padding_length(
input_length, kernel_length, stride, dilation_rate=1
):
"""Compute padding length along one dimension."""
total_padding_length = (
dilation_rate * (kernel_length - 1) - (input_length - 1) % stride
)
left_padding = total_padding_length // 2
right_padding = (total_padding_length + 1) // 2
return (left_padding, right_padding)
def _apply_same_padding(
inputs, kernel_size, strides, operation_type, dilation_rate=1
):
"""Apply same padding to the input tensor.
This function will evaluate if the padding value is compatible with torch
functions. To avoid calling `pad()` as much as possible, which may cause
performance or memory issues, when compatible, it does not apply the padding
to the tensor, but returns the input tensor and the padding value to pass to
the torch functions. If not compatible, it returns the padded tensor and 0
as the padding value.
Returns:
tensor: A padded tensor or the inputs.
padding: The padding value, ready to pass to the torch functions.
"""
spatial_shape = inputs.shape[2:]
num_spatial_dims = len(spatial_shape)
padding = ()
for i in range(num_spatial_dims):
if operation_type == "pooling":
padding_size = _compute_padding_length(
spatial_shape[i], kernel_size[i], strides[i]
)
mode = "replicate"
else:
dilation_rate = standardize_tuple(
dilation_rate, num_spatial_dims, "dilation_rate"
)
padding_size = _compute_padding_length(
spatial_shape[i], kernel_size[i], strides[i], dilation_rate[i]
)
mode = "constant"
padding = (padding_size,) + padding
if all([left == right for left, right in padding]):
return inputs, [left for left, _ in padding]
flattened_padding = tuple(
value for left_and_right in padding for value in left_and_right
)
return tnn.pad(inputs, pad=flattened_padding, mode=mode), 0
def _transpose_spatial_inputs(inputs):
num_spatial_dims = inputs.ndim - 2
# Torch pooling does not support `channels_last` format, so
# we need to transpose to `channels_first` format.
if num_spatial_dims == 1:
inputs = torch.permute(inputs, (0, 2, 1))
elif num_spatial_dims == 2:
inputs = torch.permute(inputs, (0, 3, 1, 2))
elif num_spatial_dims == 3:
inputs = torch.permute(inputs, (0, 4, 1, 2, 3))
else:
raise ValueError(
"Inputs must have ndim=3, 4 or 5, "
"corresponding to 1D, 2D and 3D inputs. "
f"Received input shape: {inputs.shape}."
)
return inputs
def _transpose_spatial_outputs(outputs):
# Undo the tranpose in `_transpose_spatial_inputs`.
num_spatial_dims = len(outputs.shape) - 2
if num_spatial_dims == 1:
outputs = torch.permute(outputs, (0, 2, 1))
elif num_spatial_dims == 2:
outputs = torch.permute(outputs, (0, 2, 3, 1))
elif num_spatial_dims == 3:
outputs = torch.permute(outputs, (0, 2, 3, 4, 1))
return outputs
def _transpose_conv_kernel(kernel):
# Torch requires conv kernel of format
# `(out_channels, in_channels, spatial_dims)`, we need to transpose.
num_spatial_dims = len(kernel.shape) - 2
if num_spatial_dims == 1:
kernel = torch.permute(kernel, (2, 1, 0))
elif num_spatial_dims == 2:
kernel = torch.permute(kernel, (3, 2, 0, 1))
elif num_spatial_dims == 3:
kernel = torch.permute(kernel, (4, 3, 0, 1, 2))
return kernel
def max_pool(
inputs,
pool_size,
strides=None,
padding="valid",
data_format=None,
):
inputs = convert_to_tensor(inputs)
num_spatial_dims = inputs.ndim - 2
pool_size = standardize_tuple(pool_size, num_spatial_dims, "pool_size")
if strides is None:
strides = pool_size
else:
strides = standardize_tuple(strides, num_spatial_dims, "strides")
data_format = standardize_data_format(data_format)
if data_format == "channels_last":
inputs = _transpose_spatial_inputs(inputs)
if padding == "same":
# Torch does not natively support `"same"` padding, we need to manually
# apply the right amount of padding to `inputs`.
inputs, padding = _apply_same_padding(
inputs, pool_size, strides, operation_type="pooling"
)
else:
padding = 0
device = get_device()
# Torch max pooling ops do not support symbolic tensors.
# Create a real tensor to execute the ops.
if device == "meta":
inputs = torch.empty(
size=inputs.shape, dtype=inputs.dtype, device="cpu"
)
if num_spatial_dims == 1:
outputs = tnn.max_pool1d(
inputs, kernel_size=pool_size, stride=strides, padding=padding
)
elif num_spatial_dims == 2:
outputs = tnn.max_pool2d(
inputs, kernel_size=pool_size, stride=strides, padding=padding
)
elif num_spatial_dims == 3:
outputs = tnn.max_pool3d(
inputs, kernel_size=pool_size, stride=strides, padding=padding
)
else:
raise ValueError(
"Inputs to pooling op must have ndim=3, 4 or 5, "
"corresponding to 1D, 2D and 3D inputs. "
f"Received input shape: {inputs.shape}."
)
outputs = outputs.to(device)
if data_format == "channels_last":
outputs = _transpose_spatial_outputs(outputs)
return outputs
def average_pool(
inputs,
pool_size,
strides=None,
padding="valid",
data_format=None,
):
inputs = convert_to_tensor(inputs)
num_spatial_dims = inputs.ndim - 2
pool_size = standardize_tuple(pool_size, num_spatial_dims, "pool_size")
if strides is None:
strides = pool_size
else:
strides = standardize_tuple(strides, num_spatial_dims, "strides")
data_format = standardize_data_format(data_format)
if data_format == "channels_last":
inputs = _transpose_spatial_inputs(inputs)
padding_value = 0
if padding == "same":
spatial_shape = inputs.shape[2:]
num_spatial_dims = len(spatial_shape)
padding_value = []
uneven_padding = []
for i in range(num_spatial_dims):
padding_size = _compute_padding_length(
spatial_shape[i], pool_size[i], strides[i]
)
# Torch only supports even padding on each dim, to replicate the
# behavior of "same" padding of `tf.keras` as much as possible,
# we need to pad evenly using the shorter padding.
padding_value.append(padding_size[0])
if padding_size[0] != padding_size[1]:
# Handle unequal padding.
# `torch.nn.pad` sets padding value in the reverse order.
uneven_padding = [0, 1] + uneven_padding
# Only call tnn.pad when needed.
if len(uneven_padding) > 0:
inputs = tnn.pad(inputs, uneven_padding)
if num_spatial_dims == 1:
outputs = tnn.avg_pool1d(
inputs,
kernel_size=pool_size,
stride=strides,
padding=padding_value,
count_include_pad=False,
)
elif num_spatial_dims == 2:
outputs = tnn.avg_pool2d(
inputs,
kernel_size=pool_size,
stride=strides,
padding=padding_value,
count_include_pad=False,
)
elif num_spatial_dims == 3:
outputs = tnn.avg_pool3d(
inputs,
kernel_size=pool_size,
stride=strides,
padding=padding_value,
count_include_pad=False,
)
else:
raise ValueError(
"Inputs to pooling op must have ndim=3, 4 or 5, "
"corresponding to 1D, 2D and 3D inputs. "
f"Received input shape: {inputs.shape}."
)
if data_format == "channels_last":
outputs = _transpose_spatial_outputs(outputs)
return outputs
def conv(
inputs,
kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
inputs = convert_to_tensor(inputs)
kernel = convert_to_tensor(kernel)
num_spatial_dims = inputs.ndim - 2
strides = standardize_tuple(strides, num_spatial_dims, "strides")
data_format = standardize_data_format(data_format)
if data_format == "channels_last":
inputs = _transpose_spatial_inputs(inputs)
# Transpose kernel from keras format to torch format.
kernel = _transpose_conv_kernel(kernel)
if padding == "same" and any(d != 1 for d in tree.flatten(strides)):
# Torch does not support this case in conv2d().
# Manually pad the tensor.
inputs, padding = _apply_same_padding(
inputs,
kernel.shape[2:],
strides,
operation_type="conv",
dilation_rate=dilation_rate,
)
channels = inputs.shape[1]
kernel_in_channels = kernel.shape[1]
if channels % kernel_in_channels > 0:
raise ValueError(
"The number of input channels must be evenly divisible by "
f"kernel.shape[1]. Received: inputs.shape={inputs.shape}, "
f"kernel.shape={kernel.shape}"
)
groups = channels // kernel_in_channels
if num_spatial_dims == 1:
outputs = tnn.conv1d(
inputs,
kernel,
stride=strides,
dilation=dilation_rate,
groups=groups,
padding=padding,
)
elif num_spatial_dims == 2:
outputs = tnn.conv2d(
inputs,
kernel,
stride=strides,
dilation=dilation_rate,
groups=groups,
padding=padding,
)
elif num_spatial_dims == 3:
outputs = tnn.conv3d(
inputs,
kernel,
stride=strides,
dilation=dilation_rate,
groups=groups,
padding=padding,
)
else:
raise ValueError(
"Inputs to conv operation should have ndim=3, 4, or 5,"
"corresponding to 1D, 2D and 3D inputs. Received input "
f"shape: {inputs.shape}."
)
if data_format == "channels_last":
outputs = _transpose_spatial_outputs(outputs)
return outputs
def depthwise_conv(
inputs,
kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
kernel = convert_to_tensor(kernel)
kernel = torch.reshape(
kernel, kernel.shape[:-2] + (1, kernel.shape[-2] * kernel.shape[-1])
)
return conv(inputs, kernel, strides, padding, data_format, dilation_rate)
def separable_conv(
inputs,
depthwise_kernel,
pointwise_kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
depthwise_conv_output = depthwise_conv(
inputs,
depthwise_kernel,
strides,
padding,
data_format,
dilation_rate,
)
return conv(
depthwise_conv_output,
pointwise_kernel,
strides=1,
padding="valid",
data_format=data_format,
dilation_rate=dilation_rate,
)
def conv_transpose(
inputs,
kernel,
strides=1,
padding="valid",
output_padding=None,
data_format=None,
dilation_rate=1,
):
inputs = convert_to_tensor(inputs)
kernel = convert_to_tensor(kernel)
num_spatial_dims = inputs.ndim - 2
strides = standardize_tuple(strides, num_spatial_dims, "strides")
data_format = standardize_data_format(data_format)
(
torch_padding,
torch_output_padding,
) = compute_conv_transpose_padding_args_for_torch(
input_shape=inputs.shape,
kernel_shape=kernel.shape,
strides=strides,
padding=padding,
output_padding=output_padding,
dilation_rate=dilation_rate,
)
if data_format == "channels_last":
inputs = _transpose_spatial_inputs(inputs)
# Transpose kernel from keras format to torch format.
kernel = _transpose_conv_kernel(kernel)
kernel_spatial_shape = kernel.shape[2:]
if isinstance(dilation_rate, int):
dilation_rate = [dilation_rate] * len(kernel_spatial_shape)
if num_spatial_dims == 1:
outputs = tnn.conv_transpose1d(
inputs,
kernel,
stride=strides,
padding=torch_padding,
output_padding=torch_output_padding,
dilation=dilation_rate,
)
elif num_spatial_dims == 2:
outputs = tnn.conv_transpose2d(
inputs,
kernel,
stride=strides,
padding=torch_padding,
output_padding=torch_output_padding,
dilation=dilation_rate,
)
elif num_spatial_dims == 3:
outputs = tnn.conv_transpose3d(
inputs,
kernel,
stride=strides,
padding=torch_padding,
output_padding=torch_output_padding,
dilation=dilation_rate,
)
else:
raise ValueError(
"Inputs to conv transpose operation should have ndim=3, 4, or 5,"
"corresponding to 1D, 2D and 3D inputs. Received input "
f"shape: {inputs.shape}."
)
if data_format == "channels_last":
outputs = _transpose_spatial_outputs(outputs)
return outputs
def one_hot(x, num_classes, axis=-1, dtype="float32"):
# Axis is the output axis. By default, PyTorch, outputs to last axis.
# If axis is not last, change output to axis and shift remaining elements.
x = convert_to_tensor(x, dtype=torch.long)
# Torch one_hot does not natively handle negative values, so we add some
# manual handling for negatives in the input to one_hot by using max(x, 0).
# The output will have some invalid results, so we set them back to 0 using
# `where` afterwards.
output = tnn.one_hot(maximum(x, 0), num_classes)
output = where(expand_dims(x, axis=-1) >= 0, output, 0)
output = convert_to_tensor(output, dtype=dtype)
dims = output.dim()
if axis != -1 and axis != dims:
new_axes_order = list(range(dims))
new_axes_order[axis] = -1 # Shifts output to axis positon
# Shift remaining axes with offset by 1 since output moved to `axis`.
for ax in range(axis + 1, dims):
new_axes_order[ax] -= 1
output = output.permute(new_axes_order)
return output
def multi_hot(x, num_classes, axis=-1, dtype="float32"):
x = convert_to_tensor(x)
reduction_axis = 1 if len(x.shape) > 1 else 0
outputs = torch.amax(
one_hot(cast(x, "int32"), num_classes, axis=axis, dtype=dtype),
dim=reduction_axis,
)
return outputs
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
target = convert_to_tensor(target)
output = convert_to_tensor(output)
if target.shape != output.shape:
raise ValueError(
"Arguments `target` and `output` must have the same shape. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if len(target.shape) < 1:
raise ValueError(
"Arguments `target` and `output` must be at least rank 1. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if from_logits:
log_prob = tnn.log_softmax(output, dim=axis)
else:
output = output / torch.sum(output, dim=axis, keepdim=True)
output = torch.clip(output, epsilon(), 1.0 - epsilon())
log_prob = torch.log(output)
return -torch.sum(target * log_prob, dim=axis)
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
target = convert_to_tensor(target, dtype=torch.long)
output = convert_to_tensor(output)
if len(target.shape) == len(output.shape) and target.shape[-1] == 1:
target = torch.squeeze(target, dim=-1)
if len(output.shape) < 1:
raise ValueError(
"Argument `output` must be at least rank 1. "
"Received: "
f"output.shape={output.shape}"
)
if target.shape != output.shape[:-1]:
raise ValueError(
"Arguments `target` and `output` must have the same shape "
"up until the last dimension: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if from_logits:
log_prob = tnn.log_softmax(output, dim=axis)
else:
output = output / torch.sum(output, dim=axis, keepdim=True)
output = torch.clip(output, epsilon(), 1.0 - epsilon())
log_prob = torch.log(output)
target = one_hot(target, output.shape[axis], axis=axis)
return -torch.sum(target * log_prob, dim=axis)
def binary_crossentropy(target, output, from_logits=False):
target = convert_to_tensor(target)
output = convert_to_tensor(output)
if target.shape != output.shape:
raise ValueError(
"Arguments `target` and `output` must have the same shape. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
# By default, PyTorch, does reduction of `sum` over all rows,
# change reduction to `none` to keep dim
if from_logits:
return tnn.binary_cross_entropy_with_logits(
output, target, reduction="none"
)
else:
output = torch.clip(output, epsilon(), 1.0 - epsilon())
return tnn.binary_cross_entropy(output, target, reduction="none")
def moments(x, axes, keepdims=False, synchronized=False):
if synchronized:
raise NotImplementedError(
"Argument synchronized=True is not supported with PyTorch."
)
x = convert_to_tensor(x)
# The dynamic range of float16 is too limited for statistics. As a
# workaround, we simply perform the operations on float32 and convert back
# to float16
need_cast = False
ori_dtype = standardize_dtype(x.dtype)
if ori_dtype == "float16":
need_cast = True
x = cast(x, "float32")
mean = torch.mean(x, dim=axes, keepdim=True)
# The variance is computed using $Var = E[|x|^2] - |E[x]|^2$, It is faster
# but less numerically stable.
# Note: stop_gradient does not change the gradient to the mean, because that
# gradient is zero.
variance = torch.mean(
torch.square(x), dim=axes, keepdim=True
) - torch.square(mean)
if not keepdims:
mean = torch.squeeze(mean, axes)
variance = torch.squeeze(variance, axes)
if need_cast:
# avoid overflow and underflow when casting from float16 to float32
mean = torch.clip(
mean,
torch.finfo(torch.float16).min,
torch.finfo(torch.float16).max,
)
variance = torch.clip(
variance,
torch.finfo(torch.float16).min,
torch.finfo(torch.float16).max,
)
mean = cast(mean, ori_dtype)
variance = cast(variance, ori_dtype)
return mean, variance
def batch_normalization(
x, mean, variance, axis, offset=None, scale=None, epsilon=1e-3
):
x = convert_to_tensor(x)
mean = convert_to_tensor(mean)
variance = convert_to_tensor(variance)
shape = [1] * len(x.shape)
shape[axis] = mean.shape[0]
mean = torch.reshape(mean, shape)
variance = torch.reshape(variance, shape)
if offset is not None:
offset = convert_to_tensor(offset)
offset = torch.reshape(offset, shape)
else:
offset = torch.zeros_like(mean)
if scale is not None:
scale = convert_to_tensor(scale)
scale = torch.reshape(scale, shape)
else:
scale = torch.ones_like(variance)
return (
x.subtract(mean)
.mul_(variance.add(epsilon).rsqrt_().mul(scale))
.add_(offset)
)
def ctc_loss(
target,
output,
target_length,
output_length,
mask_index=0,
):
target = convert_to_tensor(target)
output = convert_to_tensor(output)
target_length = convert_to_tensor(target_length)
output_length = convert_to_tensor(output_length)
output = torch.transpose(output, 1, 0)
logits = tnn.log_softmax(output, dim=-1)
return tnn.ctc_loss(
logits,
target,
output_length,
target_length,
blank=mask_index,
reduction="none",
)
|
keras/keras/backend/torch/nn.py/0
|
{
"file_path": "keras/keras/backend/torch/nn.py",
"repo_id": "keras",
"token_count": 10616
}
| 189 |
import warnings
import numpy as np
import torch
import tree
from packaging.version import parse
from keras import backend
from keras import callbacks as callbacks_module
from keras import optimizers as optimizers_module
from keras.trainers import trainer as base_trainer
from keras.trainers.data_adapters import data_adapter_utils
from keras.trainers.epoch_iterator import EpochIterator
from keras.utils import traceback_utils
class TorchTrainer(base_trainer.Trainer):
def __init__(self):
super().__init__()
self.train_function = None
self.test_function = None
self.predict_function = None
def _should_torch_compile(self):
# require torch>=2.1.0 to enable dynamo since it
# includes many improvements/fixes to torch.compile()
# TODO eventually we want to get rid of this when
# torch is upgraded to >=2.1 (from 2.0.1) in g3
if self.jit_compile and parse(torch.__version__) < parse("2.1.0"):
warnings.warn(
"Please upgrade to torch>=2.1.0 for `jit_compile=True` "
"to take effect. Using `jit_compile=False`"
)
self.jit_compile = False
return self.jit_compile
def train_step(self, data):
x, y, sample_weight = data_adapter_utils.unpack_x_y_sample_weight(data)
# Compute predictions
if self._call_has_training_arg:
y_pred = self(x, training=True)
else:
y_pred = self(x)
# Call torch.nn.Module.zero_grad() to clear the leftover gradients
# for the weights from the previous train step.
self.zero_grad()
loss = self.compute_loss(
x=x, y=y, y_pred=y_pred, sample_weight=sample_weight
)
self._loss_tracker.update_state(loss)
if self.optimizer is not None:
loss = self.optimizer.scale_loss(loss)
# Compute gradients
if self.trainable_weights:
# Call torch.Tensor.backward() on the loss to compute gradients
# for the weights.
loss.backward()
trainable_weights = self.trainable_weights[:]
gradients = [v.value.grad for v in trainable_weights]
# Update weights
with torch.no_grad():
self.optimizer.apply(gradients, trainable_weights)
else:
warnings.warn("The model does not have any trainable weights.")
return self.compute_metrics(x, y, y_pred, sample_weight=sample_weight)
def test_step(self, data):
(
x,
y,
sample_weight,
) = data_adapter_utils.unpack_x_y_sample_weight(data)
if self._call_has_training_arg:
y_pred = self(x, training=False)
else:
y_pred = self(x)
loss = self.compute_loss(
x=x, y=y, y_pred=y_pred, sample_weight=sample_weight
)
self._loss_tracker.update_state(loss)
return self.compute_metrics(x, y, y_pred, sample_weight=sample_weight)
def predict_step(self, data):
x, _, _ = data_adapter_utils.unpack_x_y_sample_weight(data)
if self._call_has_training_arg:
y_pred = self(x, training=False)
else:
y_pred = self(x)
return y_pred
def make_train_function(self, force=False):
if self.train_function is not None and not force:
return self.train_function
if self.steps_per_execution > 1:
raise ValueError(
"`steps_per_execution` must be 1 with the PyTorch backend. "
f"Received: steps_per_execution={self.steps_per_execution}"
)
def one_step_on_data(data):
"""Runs a single training step on a batch of data."""
data = data[0]
return self.train_step(data)
if self._should_torch_compile():
self.train_function = torch.compile(one_step_on_data)
else:
self.train_function = one_step_on_data
def make_test_function(self, force=False):
if self.test_function is not None and not force:
return self.test_function
if self.steps_per_execution > 1:
raise ValueError(
"`steps_per_execution` must be 1 with the PyTorch backend. "
f"Received: steps_per_execution={self.steps_per_execution}"
)
def one_step_on_data(data):
"""Runs a single test step on a batch of data."""
data = data[0]
with torch.no_grad():
return self.test_step(data)
if self._should_torch_compile():
self.test_function = torch.compile(one_step_on_data)
else:
self.test_function = one_step_on_data
def make_predict_function(self, force=False):
if self.predict_function is not None and not force:
return self.predict_function
if self.steps_per_execution > 1:
raise ValueError(
"`steps_per_execution` must be 1 with the PyTorch backend. "
f"Received: steps_per_execution={self.steps_per_execution}"
)
def one_step_on_data(data):
"""Runs a predict test step on a batch of data."""
data = data[0]
with torch.no_grad():
return self.predict_step(data)
if self._should_torch_compile():
self.predict_function = torch.compile(one_step_on_data)
else:
self.predict_function = one_step_on_data
@traceback_utils.filter_traceback
def fit(
self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose="auto",
callbacks=None,
validation_split=0.0,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_batch_size=None,
validation_freq=1,
):
if not self.compiled:
raise ValueError(
"You must call `compile()` before calling `fit()`."
)
# TODO: respect compiled trainable state
self._eval_epoch_iterator = None
if validation_split and validation_data is None:
# Create the validation data using the training data. Only supported
# for TF/numpy/jax arrays.
# TODO: Support torch tensors for validation data.
(
x,
y,
sample_weight,
), validation_data = data_adapter_utils.train_validation_split(
(x, y, sample_weight), validation_split=validation_split
)
if validation_data is not None:
(
val_x,
val_y,
val_sample_weight,
) = data_adapter_utils.unpack_x_y_sample_weight(validation_data)
# Create an iterator that yields batches for one epoch.
epoch_iterator = TorchEpochIterator(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch,
shuffle=shuffle,
class_weight=class_weight,
steps_per_execution=self.steps_per_execution,
)
self._symbolic_build(iterator=epoch_iterator)
# Container that configures and calls callbacks.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_history=True,
add_progbar=verbose != 0,
verbose=verbose,
epochs=epochs,
steps=epoch_iterator.num_batches,
model=self,
)
self.stop_training = False
self.make_train_function()
callbacks.on_train_begin()
for epoch in range(initial_epoch, epochs):
self.reset_metrics()
callbacks.on_epoch_begin(epoch)
# Switch the torch Module to training mode. Inform torch layers to
# do training behavior in case the user did not use `self.training`
# when implementing a custom layer with torch layers.
self.train()
for step, data in epoch_iterator.enumerate_epoch():
# Callbacks
callbacks.on_train_batch_begin(step)
logs = self.train_function(data)
# Callbacks
callbacks.on_train_batch_end(step, self._pythonify_logs(logs))
if self.stop_training:
break
# Override with model metrics instead of last step logs
epoch_logs = self.get_metrics_result()
# Switch the torch Module back to testing mode.
self.eval()
# Run validation.
if validation_data is not None and self._should_eval(
epoch, validation_freq
):
# Create TorchEpochIterator for evaluation and cache it.
if getattr(self, "_eval_epoch_iterator", None) is None:
self._eval_epoch_iterator = TorchEpochIterator(
x=val_x,
y=val_y,
sample_weight=val_sample_weight,
batch_size=validation_batch_size or batch_size,
steps_per_execution=self.steps_per_execution,
steps_per_epoch=validation_steps,
shuffle=False,
)
val_logs = self.evaluate(
x=val_x,
y=val_y,
sample_weight=val_sample_weight,
batch_size=validation_batch_size or batch_size,
steps=validation_steps,
callbacks=callbacks,
return_dict=True,
_use_cached_eval_dataset=True,
)
val_logs = {
"val_" + name: val for name, val in val_logs.items()
}
epoch_logs.update(val_logs)
callbacks.on_epoch_end(epoch, epoch_logs)
training_logs = epoch_logs
if self.stop_training:
break
if (
isinstance(self.optimizer, optimizers_module.Optimizer)
and epochs > 0
):
self.optimizer.finalize_variable_values(self.trainable_weights)
# If _eval_epoch_iterator exists, delete it after all epochs are done.
if getattr(self, "_eval_epoch_iterator", None) is not None:
del self._eval_epoch_iterator
callbacks.on_train_end(logs=training_logs)
return self.history
@traceback_utils.filter_traceback
def evaluate(
self,
x=None,
y=None,
batch_size=None,
verbose="auto",
sample_weight=None,
steps=None,
callbacks=None,
return_dict=False,
**kwargs,
):
# TODO: respect compiled trainable state
use_cached_eval_dataset = kwargs.pop("_use_cached_eval_dataset", False)
if kwargs:
raise ValueError(f"Arguments not recognized: {kwargs}")
if use_cached_eval_dataset:
epoch_iterator = self._eval_epoch_iterator
else:
# Create an iterator that yields batches of input/target data.
epoch_iterator = TorchEpochIterator(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps,
shuffle=False,
steps_per_execution=self.steps_per_execution,
)
self._symbolic_build(iterator=epoch_iterator)
# Container that configures and calls callbacks.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_history=True,
add_progbar=verbose != 0,
verbose=verbose,
epochs=1,
steps=epoch_iterator.num_batches,
model=self,
)
# Switch the torch Module back to testing mode.
self.eval()
self.make_test_function()
self.stop_evaluating = False
callbacks.on_test_begin()
logs = None
self.reset_metrics()
for step, data in epoch_iterator.enumerate_epoch():
callbacks.on_test_batch_begin(step)
logs = self.test_function(data)
callbacks.on_test_batch_end(step, self._pythonify_logs(logs))
if self.stop_evaluating:
break
logs = self.get_metrics_result()
callbacks.on_test_end(logs)
if return_dict:
return logs
return self._flatten_metrics_in_order(logs)
@traceback_utils.filter_traceback
def predict(
self, x, batch_size=None, verbose="auto", steps=None, callbacks=None
):
# Create an iterator that yields batches of input data.
epoch_iterator = TorchEpochIterator(
x=x,
batch_size=batch_size,
steps_per_epoch=steps,
shuffle=False,
steps_per_execution=self.steps_per_execution,
)
# Container that configures and calls callbacks.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_history=True,
add_progbar=verbose != 0,
verbose=verbose,
epochs=1,
steps=epoch_iterator.num_batches,
model=self,
)
def append_to_outputs(batch_outputs, outputs):
if outputs is None:
outputs = tree.map_structure(
lambda batch_output: [batch_output],
batch_outputs,
)
else:
tree.map_structure_up_to(
batch_outputs,
lambda output, batch_output: output.append(batch_output),
outputs,
batch_outputs,
)
return outputs
# Switch the torch Module back to testing mode.
self.eval()
self.make_predict_function()
self.stop_predicting = False
callbacks.on_predict_begin()
outputs = None
for step, data in epoch_iterator.enumerate_epoch():
callbacks.on_predict_batch_begin(step)
batch_outputs = self.predict_function(data)
outputs = append_to_outputs(batch_outputs, outputs)
callbacks.on_predict_batch_end(step, {"outputs": batch_outputs})
if self.stop_predicting:
break
callbacks.on_predict_end()
outputs = tree.map_structure(backend.convert_to_numpy, outputs)
return tree.map_structure_up_to(batch_outputs, np.concatenate, outputs)
def train_on_batch(
self,
x,
y=None,
sample_weight=None,
class_weight=None,
return_dict=False,
):
self._assert_compile_called("train_on_batch")
if class_weight is not None:
if sample_weight is not None:
raise ValueError(
"Arguments `sample_weight` and `class_weight` "
"cannot be specified at the same time. "
f"Received: sample_weight={sample_weight}, "
f"class_weight={class_weight}"
)
sample_weight = data_adapter_utils.class_weight_to_sample_weights(
y, class_weight
)
data = (x, y, sample_weight)
# Maybe build model
self._symbolic_build(data_batch=data)
self.make_train_function()
logs = self.train_function([data])
logs = tree.map_structure(lambda x: np.array(x), logs)
if return_dict:
return logs
return self._flatten_metrics_in_order(logs)
def test_on_batch(
self,
x,
y=None,
sample_weight=None,
return_dict=False,
):
self._assert_compile_called("test_on_batch")
data = (x, y, sample_weight)
# Maybe build model
self._symbolic_build(data_batch=data)
self.make_test_function()
logs = self.test_function([data])
logs = tree.map_structure(lambda x: np.array(x), logs)
if return_dict:
return logs
return self._flatten_metrics_in_order(logs)
def predict_on_batch(self, x):
self.make_predict_function()
batch_outputs = self.predict_function([(x,)])
batch_outputs = tree.map_structure(
backend.convert_to_numpy, batch_outputs
)
return batch_outputs
class TorchEpochIterator(EpochIterator):
def _get_iterator(self):
return self.data_adapter.get_torch_dataloader()
|
keras/keras/backend/torch/trainer.py/0
|
{
"file_path": "keras/keras/backend/torch/trainer.py",
"repo_id": "keras",
"token_count": 8612
}
| 190 |
import os
import re
import warnings
import numpy as np
from keras import backend
from keras.api_export import keras_export
from keras.callbacks.callback import Callback
from keras.utils import file_utils
from keras.utils import io_utils
@keras_export("keras.callbacks.ModelCheckpoint")
class ModelCheckpoint(Callback):
"""Callback to save the Keras model or model weights at some frequency.
`ModelCheckpoint` callback is used in conjunction with training using
`model.fit()` to save a model or weights (in a checkpoint file) at some
interval, so the model or weights can be loaded later to continue the
training from the state saved.
A few options this callback provides include:
- Whether to only keep the model that has achieved the "best performance" so
far, or whether to save the model at the end of every epoch regardless of
performance.
- Definition of "best"; which quantity to monitor and whether it should be
maximized or minimized.
- The frequency it should save at. Currently, the callback supports saving
at the end of every epoch, or after a fixed number of training batches.
- Whether only weights are saved, or the whole model is saved.
Example:
```python
model.compile(loss=..., optimizer=...,
metrics=['accuracy'])
EPOCHS = 10
checkpoint_filepath = '/tmp/ckpt/checkpoint.model.keras'
model_checkpoint_callback = keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
monitor='val_accuracy',
mode='max',
save_best_only=True)
# Model is saved at the end of every epoch, if it's the best seen so far.
model.fit(epochs=EPOCHS, callbacks=[model_checkpoint_callback])
# The model (that are considered the best) can be loaded as -
keras.models.load_model(checkpoint_filepath)
# Alternatively, one could checkpoint just the model weights as -
checkpoint_filepath = '/tmp/ckpt/checkpoint.weights.h5'
model_checkpoint_callback = keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=True,
monitor='val_accuracy',
mode='max',
save_best_only=True)
# Model weights are saved at the end of every epoch, if it's the best seen
# so far.
model.fit(epochs=EPOCHS, callbacks=[model_checkpoint_callback])
# The model weights (that are considered the best) can be loaded as -
model.load_weights(checkpoint_filepath)
```
Args:
filepath: string or `PathLike`, path to save the model file.
`filepath` can contain named formatting options,
which will be filled the value of `epoch` and keys in `logs`
(passed in `on_epoch_end`).
The `filepath` name needs to end with `".weights.h5"` when
`save_weights_only=True` or should end with `".keras"` when
checkpoint saving the whole model (default).
For example:
if `filepath` is `"{epoch:02d}-{val_loss:.2f}.keras"`, then the
model checkpoints will be saved with the epoch number and the
validation loss in the filename. The directory of the filepath
should not be reused by any other callbacks to avoid conflicts.
monitor: The metric name to monitor. Typically the metrics are set by
the `Model.compile` method. Note:
* Prefix the name with `"val_"` to monitor validation metrics.
* Use `"loss"` or `"val_loss"` to monitor the model's total loss.
* If you specify metrics as strings, like `"accuracy"`, pass the
same string (with or without the `"val_"` prefix).
* If you pass `metrics.Metric` objects, `monitor` should be set to
`metric.name`
* If you're not sure about the metric names you can check the
contents of the `history.history` dictionary returned by
`history = model.fit()`
* Multi-output models set additional prefixes on the metric names.
verbose: Verbosity mode, 0 or 1. Mode 0 is silent, and mode 1
displays messages when the callback takes an action.
save_best_only: if `save_best_only=True`, it only saves when the model
is considered the "best" and the latest best model according to the
quantity monitored will not be overwritten. If `filepath` doesn't
contain formatting options like `{epoch}` then `filepath` will be
overwritten by each new better model.
mode: one of {`"auto"`, `"min"`, `"max"`}. If `save_best_only=True`, the
decision to overwrite the current save file is made based on either
the maximization or the minimization of the monitored quantity.
For `val_acc`, this should be `"max"`, for `val_loss` this should be
`"min"`, etc. In `"auto"` mode, the mode is set to `"max"` if the
quantities monitored are `"acc"` or start with `"fmeasure"` and are
set to `"min"` for the rest of the quantities.
save_weights_only: if `True`, then only the model's weights will be
saved (`model.save_weights(filepath)`), else the full model is
saved (`model.save(filepath)`).
save_freq: `"epoch"` or integer. When using `"epoch"`, the callback
saves the model after each epoch. When using integer, the callback
saves the model at end of this many batches. If the `Model` is
compiled with `steps_per_execution=N`, then the saving criteria will
be checked every Nth batch. Note that if the saving isn't aligned to
epochs, the monitored metric may potentially be less reliable (it
could reflect as little as 1 batch, since the metrics get reset
every epoch). Defaults to `"epoch"`.
initial_value_threshold: Floating point initial "best" value of the
metric to be monitored. Only applies if `save_best_value=True`. Only
overwrites the model weights already saved if the performance of
current model is better than this value.
"""
def __init__(
self,
filepath,
monitor="val_loss",
verbose=0,
save_best_only=False,
save_weights_only=False,
mode="auto",
save_freq="epoch",
initial_value_threshold=None,
):
super().__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = file_utils.path_to_string(filepath)
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.save_freq = save_freq
self._batches_seen_since_last_saving = 0
self._last_batch_seen = 0
self.best = initial_value_threshold
if mode not in ["auto", "min", "max"]:
warnings.warn(
f"ModelCheckpoint mode '{mode}' is unknown, "
"fallback to auto mode.",
stacklevel=2,
)
mode = "auto"
if mode == "min":
self.monitor_op = np.less
if self.best is None:
self.best = np.Inf
elif mode == "max":
self.monitor_op = np.greater
if self.best is None:
self.best = -np.Inf
else:
if "acc" in self.monitor or self.monitor.startswith("fmeasure"):
self.monitor_op = np.greater
if self.best is None:
self.best = -np.Inf
else:
self.monitor_op = np.less
if self.best is None:
self.best = np.Inf
if self.save_freq != "epoch" and not isinstance(self.save_freq, int):
raise ValueError(
f"Unrecognized save_freq: {self.save_freq}. "
"Expected save_freq are 'epoch' or integer values"
)
if save_weights_only:
if not self.filepath.endswith(".weights.h5"):
raise ValueError(
"When using `save_weights_only=True` in `ModelCheckpoint`"
", the filepath provided must end in `.weights.h5` "
"(Keras weights format). Received: "
f"filepath={self.filepath}"
)
else:
if not self.filepath.endswith(".keras"):
raise ValueError(
"The filepath provided must end in `.keras` "
"(Keras model format). Received: "
f"filepath={self.filepath}"
)
def on_train_batch_end(self, batch, logs=None):
if self._should_save_on_batch(batch):
self._save_model(epoch=self._current_epoch, batch=batch, logs=logs)
def on_epoch_begin(self, epoch, logs=None):
self._current_epoch = epoch
def on_epoch_end(self, epoch, logs=None):
if self.save_freq == "epoch":
self._save_model(epoch=epoch, batch=None, logs=logs)
def _should_save_on_batch(self, batch):
"""Handles batch-level saving logic, supports steps_per_execution."""
if self.save_freq == "epoch":
return False
if batch <= self._last_batch_seen: # New epoch.
add_batches = batch + 1 # batches are zero-indexed.
else:
add_batches = batch - self._last_batch_seen
self._batches_seen_since_last_saving += add_batches
self._last_batch_seen = batch
if self._batches_seen_since_last_saving >= self.save_freq:
self._batches_seen_since_last_saving = 0
return True
return False
def _save_model(self, epoch, batch, logs):
"""Saves the model.
Args:
epoch: the epoch this iteration is in.
batch: the batch this iteration is in. `None` if the `save_freq`
is set to `"epoch"`.
logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.
"""
logs = logs or {}
filepath = self._get_file_path(epoch, batch, logs)
# Create host directory if it doesn't exist.
dirname = os.path.dirname(filepath)
if dirname and not file_utils.exists(dirname):
file_utils.makedirs(dirname)
try:
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
warnings.warn(
f"Can save best model only with {self.monitor} "
"available, skipping.",
stacklevel=2,
)
elif (
isinstance(current, np.ndarray)
or backend.is_tensor(current)
) and len(current.shape) > 0:
warnings.warn(
"Can save best model only when `monitor` is "
f"a scalar value. Received: {current}. "
"Falling back to `save_best_only=False`."
)
self.model.save(filepath, overwrite=True)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
io_utils.print_msg(
f"\nEpoch {epoch + 1}: {self.monitor} "
"improved "
f"from {self.best:.5f} to {current:.5f}, "
f"saving model to {filepath}"
)
self.best = current
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
else:
if self.verbose > 0:
io_utils.print_msg(
f"\nEpoch {epoch + 1}: "
f"{self.monitor} did not improve "
f"from {self.best:.5f}"
)
else:
if self.verbose > 0:
io_utils.print_msg(
f"\nEpoch {epoch + 1}: saving model to {filepath}"
)
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
except IsADirectoryError: # h5py 3.x
raise IOError(
"Please specify a non-directory filepath for "
"ModelCheckpoint. Filepath used is an existing "
f"directory: {filepath}"
)
except IOError as e: # h5py 2.x
# `e.errno` appears to be `None` so checking the content of
# `e.args[0]`.
if "is a directory" in str(e.args[0]).lower():
raise IOError(
"Please specify a non-directory filepath for "
"ModelCheckpoint. Filepath used is an existing "
f"directory: f{filepath}"
)
# Re-throw the error for any other causes.
raise e
def _get_file_path(self, epoch, batch, logs):
"""Returns the file path for checkpoint."""
try:
# `filepath` may contain placeholders such as
# `{epoch:02d}`,`{batch:02d}` and `{mape:.2f}`. A mismatch between
# logged metrics and the path's placeholders can cause formatting to
# fail.
if batch is None or "batch" in logs:
file_path = self.filepath.format(epoch=epoch + 1, **logs)
else:
file_path = self.filepath.format(
epoch=epoch + 1, batch=batch + 1, **logs
)
except KeyError as e:
raise KeyError(
f'Failed to format this callback filepath: "{self.filepath}". '
f"Reason: {e}"
)
return file_path
def _checkpoint_exists(self, filepath):
"""Returns whether the checkpoint `filepath` refers to exists."""
return file_utils.exists(filepath)
def _get_most_recently_modified_file_matching_pattern(self, pattern):
"""Returns the most recently modified filepath matching pattern.
In the rare case where there are more than one pattern-matching file
having the same modified time that is most recent among all, return the
filepath that is largest (by `>` operator, lexicographically using the
numeric equivalents). This provides a tie-breaker when multiple files
are most recent. Note that a larger `filepath` can sometimes indicate a
later time of modification (for instance, when epoch/batch is used as
formatting option), but not necessarily (when accuracy or loss is used).
The tie-breaker is put in the logic as best effort to return the most
recent, and to avoid undeterministic result.
Modified time of a file is obtained with `os.path.getmtime()`.
This utility function is best demonstrated via an example:
```python
file_pattern = 'batch{batch:02d}epoch{epoch:02d}.keras'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['batch03epoch02.keras',
'batch02epoch02.keras', 'batch01epoch01.keras']
]
for file_path in file_paths:
# Write something to each of the files
...
self.assertEqual(
_get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-1])
```
Args:
pattern: The file pattern that may optionally contain python
placeholder such as `{epoch:02d}`.
Returns:
The most recently modified file's full filepath matching `pattern`.
If `pattern` does not contain any placeholder, this returns the
filepath that exactly matches `pattern`. Returns `None` if no match
is found.
"""
dir_name = os.path.dirname(pattern)
base_name = os.path.basename(pattern)
base_name_regex = "^" + re.sub(r"{.*}", r".*", base_name) + "$"
latest_mod_time = 0
file_path_with_latest_mod_time = None
n_file_with_latest_mod_time = 0
file_path_with_largest_file_name = None
if file_utils.exists(dir_name):
for file_name in os.listdir(dir_name):
# Only consider if `file_name` matches the pattern.
if re.match(base_name_regex, file_name):
file_path = os.path.join(dir_name, file_name)
mod_time = os.path.getmtime(file_path)
if (
file_path_with_largest_file_name is None
or file_path > file_path_with_largest_file_name
):
file_path_with_largest_file_name = file_path
if mod_time > latest_mod_time:
latest_mod_time = mod_time
file_path_with_latest_mod_time = file_path
# In the case a file with later modified time is found,
# reset the counter for the number of files with latest
# modified time.
n_file_with_latest_mod_time = 1
elif mod_time == latest_mod_time:
# In the case a file has modified time tied with the
# most recent, increment the counter for the number of
# files with latest modified time by 1.
n_file_with_latest_mod_time += 1
if n_file_with_latest_mod_time == 1:
# Return the sole file that has most recent modified time.
return file_path_with_latest_mod_time
else:
# If there are more than one file having latest modified time,
# return the file path with the largest file name.
return file_path_with_largest_file_name
|
keras/keras/callbacks/model_checkpoint.py/0
|
{
"file_path": "keras/keras/callbacks/model_checkpoint.py",
"repo_id": "keras",
"token_count": 8633
}
| 191 |
"""Small NumPy datasets for debugging/testing."""
from keras.datasets import boston_housing
from keras.datasets import california_housing
from keras.datasets import cifar10
from keras.datasets import cifar100
from keras.datasets import fashion_mnist
from keras.datasets import imdb
from keras.datasets import mnist
from keras.datasets import reuters
|
keras/keras/datasets/__init__.py/0
|
{
"file_path": "keras/keras/datasets/__init__.py",
"repo_id": "keras",
"token_count": 113
}
| 192 |
from keras.export.export_lib import ExportArchive
|
keras/keras/export/__init__.py/0
|
{
"file_path": "keras/keras/export/__init__.py",
"repo_id": "keras",
"token_count": 14
}
| 193 |
import numpy as np
import pytest
from keras import testing
from keras.layers.activations import leaky_relu
class LeakyReLUTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_leaky_relu(self):
self.run_layer_test(
leaky_relu.LeakyReLU,
init_kwargs={
"negative_slope": 1,
},
input_shape=(2, 3, 4),
supports_masking=True,
)
def test_leaky_relu_correctness(self):
leaky_relu_layer = leaky_relu.LeakyReLU(negative_slope=0.5)
input = np.array([-10, -5, 0.0, 5, 10])
expected_output = np.array([-5.0, -2.5, 0.0, 5.0, 10.0])
result = leaky_relu_layer(input)
self.assertAllClose(result, expected_output)
def test_invalid_usage(self):
with self.assertRaisesRegex(
ValueError,
"The negative_slope value of a Leaky ReLU layer cannot be None",
):
self.run_layer_test(
leaky_relu.LeakyReLU,
init_kwargs={"negative_slope": None},
input_shape=(2, 3, 4),
supports_masking=True,
)
|
keras/keras/layers/activations/leaky_relu_test.py/0
|
{
"file_path": "keras/keras/layers/activations/leaky_relu_test.py",
"repo_id": "keras",
"token_count": 606
}
| 194 |
from keras.api_export import keras_export
from keras.layers.convolutional.base_separable_conv import BaseSeparableConv
@keras_export(
[
"keras.layers.SeparableConv1D",
"keras.layers.SeparableConvolution1D",
]
)
class SeparableConv1D(BaseSeparableConv):
"""1D separable convolution layer.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output. It then optionally applies an
activation function to produce the final output.
Args:
filters: int, the dimensionality of the output space (i.e. the number
of filters in the pointwise convolution).
kernel_size: int or tuple/list of 1 integers, specifying the size of the
depthwise convolution window.
strides: int or tuple/list of 1 integers, specifying the stride length
of the depthwise convolution. If only one int is specified, the same
stride size will be used for all dimensions. `strides > 1` is
incompatible with `dilation_rate > 1`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input. When `padding="same"` and
`strides=1`, the output has the same size as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
dilation_rate: int or tuple/list of 1 integers, specifying the dilation
rate to use for dilated convolution. If only one int is specified,
the same dilation rate will be used for all dimensions.
depth_multiplier: The number of depthwise convolution output channels
for each input channel. The total number of depthwise convolution
output channels will be equal to `input_channel * depth_multiplier`.
activation: Activation function. If `None`, no activation is applied.
use_bias: bool, if `True`, bias will be added to the output.
depthwise_initializer: An initializer for the depthwise convolution
kernel. If None, then the default initializer (`"glorot_uniform"`)
will be used.
pointwise_initializer: An initializer for the pointwise convolution
kernel. If None, then the default initializer (`"glorot_uniform"`)
will be used.
bias_initializer: An initializer for the bias vector. If None, the
default initializer ('"zeros"') will be used.
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used
for norm constraints or value constraints for layer weights). The
function must take as input the unprojected variable and must return
the projected variable (which must have the same shape).
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
Input shape:
- If `data_format="channels_last"`:
A 3D tensor with shape: `(batch_shape, steps, channels)`
- If `data_format="channels_first"`:
A 3D tensor with shape: `(batch_shape, channels, steps)`
Output shape:
- If `data_format="channels_last"`:
A 3D tensor with shape: `(batch_shape, new_steps, filters)`
- If `data_format="channels_first"`:
A 3D tensor with shape: `(batch_shape, filters, new_steps)`
Returns:
A 3D tensor representing
`activation(separable_conv1d(inputs, kernel) + bias)`.
Examples:
>>> x = np.random.rand(4, 10, 12)
>>> y = keras.layers.SeparableConv1D(3, 4, 3, 2, activation='relu')(x)
>>> print(y.shape)
(4, 4, 4)
"""
def __init__(
self,
filters,
kernel_size,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer="glorot_uniform",
pointwise_initializer="glorot_uniform",
bias_initializer="zeros",
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
rank=1,
depth_multiplier=depth_multiplier,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
depthwise_initializer=depthwise_initializer,
pointwise_initializer=pointwise_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=depthwise_regularizer,
pointwise_regularizer=pointwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=depthwise_constraint,
pointwise_constraint=pointwise_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
|
keras/keras/layers/convolutional/separable_conv1d.py/0
|
{
"file_path": "keras/keras/layers/convolutional/separable_conv1d.py",
"repo_id": "keras",
"token_count": 2604
}
| 195 |
from keras import backend
from keras import ops
from keras.api_export import keras_export
from keras.layers.layer import Layer
@keras_export("keras.layers.Masking")
class Masking(Layer):
"""Masks a sequence by using a mask value to skip timesteps.
For each timestep in the input tensor (dimension #1 in the tensor),
if all values in the input tensor at that timestep
are equal to `mask_value`, then the timestep will be masked (skipped)
in all downstream layers (as long as they support masking).
If any downstream layer does not support masking yet receives such
an input mask, an exception will be raised.
Example:
Consider a NumPy data array `x` of shape `(samples, timesteps, features)`,
to be fed to an LSTM layer. You want to mask timestep #3 and #5 because you
lack data for these timesteps. You can:
- Set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`
- Insert a `Masking` layer with `mask_value=0.` before the LSTM layer:
```python
samples, timesteps, features = 32, 10, 8
inputs = np.random.random([samples, timesteps, features]).astype(np.float32)
inputs[:, 3, :] = 0.
inputs[:, 5, :] = 0.
model = keras.models.Sequential()
model.add(keras.layers.Masking(mask_value=0.)
model.add(keras.layers.LSTM(32))
output = model(inputs)
# The time step 3 and 5 will be skipped from LSTM calculation.
```
Note: in the Keras masking convention, a masked timestep is denoted by
a mask value of `False`, while a non-masked (i.e. usable) timestep
is denoted by a mask value of `True`.
"""
def __init__(self, mask_value=0.0, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
self.mask_value = mask_value
def compute_mask(self, inputs, mask=None):
return ops.any(ops.not_equal(inputs, self.mask_value), axis=-1)
def call(self, inputs):
boolean_mask = ops.any(
ops.not_equal(inputs, self.mask_value), axis=-1, keepdims=True
)
# Set masked outputs to 0
outputs = inputs * backend.cast(boolean_mask, dtype=inputs.dtype)
# Compute the mask and outputs simultaneously.
try:
outputs._keras_mask = ops.squeeze(boolean_mask, axis=-1)
except AttributeError:
# tensor is a C type.
pass
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {"mask_value": self.mask_value}
return {**base_config, **config}
|
keras/keras/layers/core/masking.py/0
|
{
"file_path": "keras/keras/layers/core/masking.py",
"repo_id": "keras",
"token_count": 1025
}
| 196 |
from keras import ops
from keras.api_export import keras_export
from keras.layers.merging.base_merge import Merge
@keras_export("keras.layers.Multiply")
class Multiply(Merge):
"""Performs elementwise multiplication.
It takes as input a list of tensors, all of the same shape,
and returns a single tensor (also of the same shape).
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.Multiply()([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> # equivalent to `y = keras.layers.multiply([x1, x2])`
>>> y = keras.layers.Multiply()([x1, x2])
>>> out = keras.layers.Dense(4)(y)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output = ops.multiply(output, inputs[i])
return output
@keras_export("keras.layers.multiply")
def multiply(inputs, **kwargs):
"""Functional interface to the `keras.layers.Multiply` layer.
Args:
inputs: A list of input tensors , all of the same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor as the elementwise product of the inputs with the same
shape as the inputs.
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.multiply([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> y = keras.layers.multiply([x1, x2])
>>> out = keras.layers.Dense(4)(y)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
return Multiply(**kwargs)(inputs)
|
keras/keras/layers/merging/multiply.py/0
|
{
"file_path": "keras/keras/layers/merging/multiply.py",
"repo_id": "keras",
"token_count": 913
}
| 197 |
from keras.api_export import keras_export
from keras.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.AveragePooling3D", "keras.layers.AvgPool3D"])
class AveragePooling3D(BasePooling):
"""Average pooling operation for 3D data (spatial or spatio-temporal).
Downsamples the input along its spatial dimensions (depth, height, and
width) by taking the average value over an input window (of size defined by
`pool_size`) for each channel of the input. The window is shifted by
`strides` along each dimension.
Args:
pool_size: int or tuple of 3 integers, factors by which to downscale
(dim1, dim2, dim3). If only one integer is specified, the same
window length will be used for all dimensions.
strides: int or tuple of 3 integers, or None. Strides values. If None,
it will default to `pool_size`. If only one int is specified, the
same stride size will be used for all dimensions.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while
`"channels_first"` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your Keras
config file at `~/.keras/keras.json`. If you never set it, then it
will be `"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format="channels_first"`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format="channels_last"`:
5D tensor with shape:
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
- If `data_format="channels_first"`:
5D tensor with shape:
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
Example:
```python
depth = 30
height = 30
width = 30
channels = 3
inputs = keras.layers.Input(shape=(depth, height, width, channels))
layer = keras.layers.AveragePooling3D(pool_size=3)
outputs = layer(inputs) # Shape: (batch_size, 10, 10, 10, 3)
```
"""
def __init__(
self,
pool_size,
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs
):
super().__init__(
pool_size,
strides,
pool_dimensions=3,
pool_mode="average",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
|
keras/keras/layers/pooling/average_pooling3d.py/0
|
{
"file_path": "keras/keras/layers/pooling/average_pooling3d.py",
"repo_id": "keras",
"token_count": 1359
}
| 198 |
import numpy as np
from tensorflow import data as tf_data
from keras import backend
from keras import layers
from keras import testing
class IntegerLookupTest(testing.TestCase):
# TODO: increase coverage. Most features aren't being tested.
def test_config(self):
layer = layers.IntegerLookup(
output_mode="int",
vocabulary=[1, 2, 3],
oov_token=1,
mask_token=0,
)
self.run_class_serialization_test(layer)
def test_adapt_flow(self):
adapt_data = [1, 1, 1, 2, 2, 3]
single_sample_input_data = [1, 2, 4]
batch_input_data = [[1, 2, 4], [2, 3, 5]]
# int mode
layer = layers.IntegerLookup(
output_mode="int",
)
layer.adapt(adapt_data)
output = layer(single_sample_input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([1, 2, 0]))
output = layer(batch_input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([[1, 2, 0], [2, 3, 0]]))
# one_hot mode
layer = layers.IntegerLookup(
output_mode="one_hot",
)
layer.adapt(adapt_data)
output = layer(single_sample_input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(
output, np.array([[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]])
)
# multi_hot mode
layer = layers.IntegerLookup(
output_mode="multi_hot",
)
layer.adapt(adapt_data)
output = layer(single_sample_input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([1, 1, 1, 0]))
# tf_idf mode
layer = layers.IntegerLookup(
output_mode="tf_idf",
)
layer.adapt(adapt_data)
output = layer(single_sample_input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(
output, np.array([1.133732, 0.916291, 1.098612, 0.0])
)
# count mode
layer = layers.IntegerLookup(
output_mode="count",
)
layer.adapt(adapt_data)
output = layer([1, 2, 3, 4, 1, 2, 1])
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([1, 3, 2, 1]))
def test_fixed_vocabulary(self):
layer = layers.IntegerLookup(
output_mode="int",
vocabulary=[1, 2, 3, 4],
)
input_data = [2, 3, 4, 5]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 4, 0]))
def test_set_vocabulary(self):
layer = layers.IntegerLookup(
output_mode="int",
)
layer.set_vocabulary([1, 2, 3, 4])
input_data = [2, 3, 4, 5]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 4, 0]))
def test_tf_data_compatibility(self):
layer = layers.IntegerLookup(
output_mode="int",
vocabulary=[1, 2, 3, 4],
)
input_data = [2, 3, 4, 5]
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(4).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertAllClose(output, np.array([2, 3, 4, 0]))
|
keras/keras/layers/preprocessing/integer_lookup_test.py/0
|
{
"file_path": "keras/keras/layers/preprocessing/integer_lookup_test.py",
"repo_id": "keras",
"token_count": 1710
}
| 199 |
import numpy as np
import pytest
from absl.testing import parameterized
from tensorflow import data as tf_data
from keras import backend
from keras import layers
from keras import models
from keras import testing
class RandomZoomTest(testing.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
("random_zoom_in_4_by_6", -0.4, -0.6),
("random_zoom_in_2_by_3", -0.2, -0.3),
("random_zoom_in_tuple_factor", (-0.4, -0.5), (-0.2, -0.3)),
("random_zoom_out_4_by_6", 0.4, 0.6),
("random_zoom_out_2_by_3", 0.2, 0.3),
("random_zoom_out_tuple_factor", (0.4, 0.5), (0.2, 0.3)),
)
def test_random_zoom(self, height_factor, width_factor):
self.run_layer_test(
layers.RandomZoom,
init_kwargs={
"height_factor": height_factor,
"width_factor": width_factor,
},
input_shape=(2, 3, 4),
expected_output_shape=(2, 3, 4),
supports_masking=False,
run_training_check=False,
)
def test_random_zoom_out_correctness(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (1, 5, 5, 1)
else:
input_shape = (1, 1, 5, 5)
input_image = np.reshape(np.arange(0, 25), input_shape)
expected_output = np.asarray(
[
[0, 0, 0, 0, 0],
[0, 2.7, 4.5, 6.3, 0],
[0, 10.2, 12.0, 13.8, 0],
[0, 17.7, 19.5, 21.3, 0],
[0, 0, 0, 0, 0],
]
)
expected_output = backend.convert_to_tensor(
np.reshape(expected_output, input_shape)
)
self.run_layer_test(
layers.RandomZoom,
init_kwargs={
"height_factor": (0.5, 0.5),
"width_factor": (0.8, 0.8),
"interpolation": "bilinear",
"fill_mode": "constant",
},
input_shape=None,
input_data=input_image,
expected_output=expected_output,
supports_masking=False,
run_training_check=False,
)
def test_random_zoom_in_correctness(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (1, 5, 5, 1)
else:
input_shape = (1, 1, 5, 5)
input_image = np.reshape(np.arange(0, 25), input_shape)
expected_output = np.asarray(
[
[6.0, 6.5, 7.0, 7.5, 8.0],
[8.5, 9.0, 9.5, 10.0, 10.5],
[11.0, 11.5, 12.0, 12.5, 13.0],
[13.5, 14.0, 14.5, 15.0, 15.5],
[16.0, 16.5, 17.0, 17.5, 18.0],
]
)
expected_output = backend.convert_to_tensor(
np.reshape(expected_output, input_shape)
)
self.run_layer_test(
layers.RandomZoom,
init_kwargs={
"height_factor": (-0.5, -0.5),
"width_factor": (-0.5, -0.5),
"interpolation": "bilinear",
"fill_mode": "constant",
},
input_shape=None,
input_data=input_image,
expected_output=expected_output,
supports_masking=False,
run_training_check=False,
)
def test_tf_data_compatibility(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (1, 5, 5, 1)
else:
input_shape = (1, 1, 5, 5)
input_image = np.reshape(np.arange(0, 25), input_shape)
layer = layers.RandomZoom(
height_factor=(0.5, 0.5),
width_factor=(0.8, 0.8),
interpolation="nearest",
fill_mode="constant",
)
ds = tf_data.Dataset.from_tensor_slices(input_image).batch(1).map(layer)
expected_output = np.asarray(
[
[0, 0, 0, 0, 0],
[0, 5, 7, 9, 0],
[0, 10, 12, 14, 0],
[0, 20, 22, 24, 0],
[0, 0, 0, 0, 0],
]
).reshape(input_shape)
for output in ds.take(1):
output = output.numpy()
self.assertAllClose(expected_output, output)
def test_dynamic_shape(self):
inputs = layers.Input((None, None, 3))
outputs = layers.RandomZoom(
height_factor=(0.5, 0.5),
width_factor=(0.8, 0.8),
interpolation="nearest",
fill_mode="constant",
)(inputs)
model = models.Model(inputs, outputs)
model.predict(np.random.random((1, 6, 6, 3)))
@pytest.mark.skipif(
backend.backend() == "numpy",
reason="The NumPy backend does not implement fit.",
)
def test_connect_with_flatten(self):
model = models.Sequential(
[
layers.RandomZoom((-0.5, 0.0), (-0.5, 0.0)),
layers.Flatten(),
layers.Dense(1, activation="relu"),
],
)
model.compile(loss="mse")
model.fit(np.random.random((2, 2, 2, 1)), y=np.random.random((2,)))
|
keras/keras/layers/preprocessing/random_zoom_test.py/0
|
{
"file_path": "keras/keras/layers/preprocessing/random_zoom_test.py",
"repo_id": "keras",
"token_count": 2907
}
| 200 |
import numpy as np
import pytest
from keras import backend
from keras import layers
from keras import testing
class DropoutTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_dropout_basics(self):
self.run_layer_test(
layers.Dropout,
init_kwargs={
"rate": 0.2,
},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
)
def test_dropout_rescaling(self):
inputs = np.ones((20, 500))
layer = layers.Dropout(0.5, seed=1337)
outputs = layer(inputs, training=True)
outputs = backend.convert_to_numpy(outputs)
self.assertAllClose(np.mean(outputs), 1.0, atol=0.02)
self.assertAllClose(np.max(outputs), 2.0)
def test_dropout_partial_noise_shape_dynamic(self):
inputs = np.ones((20, 5, 10))
layer = layers.Dropout(0.5, noise_shape=(None, 1, None))
outputs = layer(inputs, training=True)
self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :])
def test_dropout_partial_noise_shape_static(self):
inputs = np.ones((20, 5, 10))
layer = layers.Dropout(0.5, noise_shape=(20, 1, 10))
outputs = layer(inputs, training=True)
self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :])
def test_dropout_negative_rate(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `rate`. "
"Expected a float value between 0 and 1.",
):
_ = layers.Dropout(rate=-0.5)
def test_dropout_rate_greater_than_one(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `rate`. "
"Expected a float value between 0 and 1.",
):
_ = layers.Dropout(rate=1.5)
|
keras/keras/layers/regularization/dropout_test.py/0
|
{
"file_path": "keras/keras/layers/regularization/dropout_test.py",
"repo_id": "keras",
"token_count": 974
}
| 201 |
from keras import ops
from keras.api_export import keras_export
from keras.backend.common.keras_tensor import KerasTensor
from keras.layers.input_spec import InputSpec
from keras.layers.layer import Layer
@keras_export("keras.layers.Permute")
class Permute(Layer):
"""Permutes the dimensions of the input according to a given pattern.
Useful e.g. connecting RNNs and convnets.
Args:
dims: Tuple of integers. Permutation pattern does not include the
batch dimension. Indexing starts at 1.
For instance, `(2, 1)` permutes the first and second dimensions
of the input.
Input shape:
Arbitrary.
Output shape:
Same as the input shape, but with the dimensions re-ordered according
to the specified pattern.
Example:
>>> x = keras.Input(shape=(10, 64))
>>> y = keras.layers.Permute((2, 1))(x)
>>> y.shape
(None, 64, 10)
"""
def __init__(self, dims, **kwargs):
super().__init__(**kwargs)
self.dims = tuple(dims)
if sorted(dims) != list(range(1, len(dims) + 1)):
raise ValueError(
"Invalid permutation argument `dims` for Permute Layer. "
"The set of indices in `dims` must be consecutive and start "
f"from 1. Received dims={dims}"
)
self.input_spec = InputSpec(ndim=len(self.dims) + 1)
def compute_output_shape(self, input_shape):
output_shape = [input_shape[0]]
for dim in self.dims:
output_shape.append(input_shape[dim])
return tuple(output_shape)
def compute_output_spec(self, inputs):
output_shape = self.compute_output_shape(inputs.shape)
return KerasTensor(
shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse
)
def call(self, inputs):
return ops.transpose(inputs, axes=(0,) + self.dims)
def get_config(self):
config = {"dims": self.dims}
base_config = super().get_config()
return {**base_config, **config}
|
keras/keras/layers/reshaping/permute.py/0
|
{
"file_path": "keras/keras/layers/reshaping/permute.py",
"repo_id": "keras",
"token_count": 883
}
| 202 |
from keras import backend
from keras import ops
from keras.api_export import keras_export
from keras.layers.input_spec import InputSpec
from keras.layers.layer import Layer
from keras.utils import argument_validation
@keras_export("keras.layers.ZeroPadding3D")
class ZeroPadding3D(Layer):
"""Zero-padding layer for 3D data (spatial or spatio-temporal).
Examples:
>>> input_shape = (1, 1, 2, 2, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> y = keras.layers.ZeroPadding3D(padding=2)(x)
>>> y.shape
(1, 5, 6, 6, 3)
Args:
padding: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.
- If int: the same symmetric padding is applied to depth, height,
and width.
- If tuple of 3 ints: interpreted as three different symmetric
padding values for depth, height, and width:
`(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad)`.
- If tuple of 3 tuples of 2 ints: interpreted as
`((left_dim1_pad, right_dim1_pad), (left_dim2_pad,
right_dim2_pad), (left_dim3_pad, right_dim3_pad))`.
data_format: A string, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
When unspecified, uses `image_data_format` value found in your Keras
config file at `~/.keras/keras.json` (if exists). Defaults to
`"channels_last"`.
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, first_axis_to_pad, second_axis_to_pad,
third_axis_to_pad, depth)`
- If `data_format` is `"channels_first"`:
`(batch_size, depth, first_axis_to_pad, second_axis_to_pad,
third_axis_to_pad)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, first_padded_axis, second_padded_axis,
third_axis_to_pad, depth)`
- If `data_format` is `"channels_first"`:
`(batch_size, depth, first_padded_axis, second_padded_axis,
third_axis_to_pad)`
"""
def __init__(
self, padding=((1, 1), (1, 1), (1, 1)), data_format=None, **kwargs
):
super().__init__(**kwargs)
self.data_format = backend.standardize_data_format(data_format)
if isinstance(padding, int):
self.padding = (
(padding, padding),
(padding, padding),
(padding, padding),
)
elif hasattr(padding, "__len__"):
if len(padding) != 3:
raise ValueError(
f"`padding` should have 3 elements. Received: {padding}."
)
dim1_padding = argument_validation.standardize_tuple(
padding[0], 2, "1st entry of padding", allow_zero=True
)
dim2_padding = argument_validation.standardize_tuple(
padding[1], 2, "2nd entry of padding", allow_zero=True
)
dim3_padding = argument_validation.standardize_tuple(
padding[2], 2, "3rd entry of padding", allow_zero=True
)
self.padding = (dim1_padding, dim2_padding, dim3_padding)
else:
raise ValueError(
"`padding` should be either an int, a tuple of 3 ints "
"(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad), "
"or a tuple of 3 tuples of 2 ints "
"((left_dim1_pad, right_dim1_pad),"
" (left_dim2_pad, right_dim2_pad),"
" (left_dim3_pad, right_dim2_pad)). "
f"Received: padding={padding}."
)
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
output_shape = list(input_shape)
spatial_dims_offset = 2 if self.data_format == "channels_first" else 1
for index in range(0, 3):
if output_shape[index + spatial_dims_offset] is not None:
output_shape[index + spatial_dims_offset] += (
self.padding[index][0] + self.padding[index][1]
)
return tuple(output_shape)
def call(self, inputs):
if self.data_format == "channels_first":
all_dims_padding = ((0, 0), (0, 0), *self.padding)
else:
all_dims_padding = ((0, 0), *self.padding, (0, 0))
return ops.pad(inputs, all_dims_padding)
def get_config(self):
config = {"padding": self.padding, "data_format": self.data_format}
base_config = super().get_config()
return {**base_config, **config}
|
keras/keras/layers/reshaping/zero_padding3d.py/0
|
{
"file_path": "keras/keras/layers/reshaping/zero_padding3d.py",
"repo_id": "keras",
"token_count": 2396
}
| 203 |
import numpy as np
import pytest
from absl.testing import parameterized
from keras import initializers
from keras import layers
from keras import testing
class GRUTest(testing.TestCase, parameterized.TestCase):
@pytest.mark.requires_trainable_backend
def test_basics(self):
self.run_layer_test(
layers.GRU,
init_kwargs={"units": 3, "dropout": 0.5, "recurrent_dropout": 0.5},
input_shape=(3, 2, 4),
call_kwargs={"training": True},
expected_output_shape=(3, 3),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
self.run_layer_test(
layers.GRU,
init_kwargs={
"units": 3,
"return_sequences": True,
"bias_regularizer": "l1",
"kernel_regularizer": "l2",
"recurrent_regularizer": "l2",
},
input_shape=(3, 2, 4),
expected_output_shape=(3, 2, 3),
expected_num_losses=3,
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
@parameterized.parameters([1, 2])
def test_correctness(self, implementation):
sequence = np.arange(72).reshape((3, 6, 4)).astype("float32")
layer = layers.GRU(
3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.5217289, 0.5217289, 0.5217289],
[0.6371659, 0.6371659, 0.6371659],
[0.39384964, 0.39384964, 0.3938496],
]
),
output,
)
layer = layers.GRU(
3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
go_backwards=True,
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.24406259, 0.24406259, 0.24406259],
[0.611516, 0.611516, 0.611516],
[0.3928808, 0.3928808, 0.3928808],
]
),
output,
)
layer = layers.GRU(
3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
unroll=True,
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.5217289, 0.5217289, 0.5217289],
[0.6371659, 0.6371659, 0.6371659],
[0.39384964, 0.39384964, 0.3938496],
]
),
output,
)
layer = layers.GRU(
3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
reset_after=False,
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.51447755, 0.51447755, 0.51447755],
[0.6426879, 0.6426879, 0.6426879],
[0.40208298, 0.40208298, 0.40208298],
]
),
output,
)
layer = layers.GRU(
3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
use_bias=False,
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.49988455, 0.49988455, 0.49988455],
[0.64701194, 0.64701194, 0.64701194],
[0.4103359, 0.4103359, 0.4103359],
]
),
output,
)
def test_statefulness(self):
sequence = np.arange(24).reshape((2, 3, 4)).astype("float32")
layer = layers.GRU(
4,
stateful=True,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
layer(sequence)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.29542392, 0.29542392, 0.29542392, 0.29542392],
[0.5885018, 0.5885018, 0.5885018, 0.5885018],
]
),
output,
)
layer.reset_state()
layer(sequence)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.29542392, 0.29542392, 0.29542392, 0.29542392],
[0.5885018, 0.5885018, 0.5885018, 0.5885018],
]
),
output,
)
def test_pass_initial_state(self):
sequence = np.arange(24).reshape((2, 4, 3)).astype("float32")
initial_state = np.arange(4).reshape((2, 2)).astype("float32")
layer = layers.GRU(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
output = layer(sequence, initial_state=initial_state)
self.assertAllClose(
np.array([[0.23774096, 0.33508456], [0.83659905, 1.0227708]]),
output,
)
layer = layers.GRU(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
go_backwards=True,
)
output = layer(sequence, initial_state=initial_state)
self.assertAllClose(
np.array([[0.13486053, 0.23261218], [0.78257304, 0.9691353]]),
output,
)
def test_masking(self):
sequence = np.arange(24).reshape((2, 4, 3)).astype("float32")
mask = np.array([[True, True, False, True], [True, False, False, True]])
layer = layers.GRU(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
unroll=True,
)
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array([[0.19393763, 0.19393763], [0.30818558, 0.30818558]]),
output,
)
layer = layers.GRU(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
return_sequences=True,
)
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array(
[
[0.03606692, 0.03606692],
[0.09497581, 0.09497581],
[0.09497581, 0.09497581],
[0.19393763, 0.19393763],
],
),
output[0],
)
self.assertAllClose(
np.array(
[
[0.16051409, 0.16051409],
[0.16051409, 0.16051409],
[0.16051409, 0.16051409],
[0.30818558, 0.30818558],
],
),
output[1],
)
layer = layers.GRU(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
return_sequences=True,
zero_output_for_mask=True,
)
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array(
[
[0.03606692, 0.03606692],
[0.09497581, 0.09497581],
[0.0, 0.0],
[0.19393763, 0.19393763],
],
),
output[0],
)
self.assertAllClose(
np.array(
[
[0.16051409, 0.16051409],
[0.0, 0.0],
[0.0, 0.0],
[0.30818558, 0.30818558],
],
),
output[1],
)
layer = layers.GRU(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
go_backwards=True,
)
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array([[0.11669192, 0.11669192], [0.28380975, 0.28380975]]),
output,
)
|
keras/keras/layers/rnn/gru_test.py/0
|
{
"file_path": "keras/keras/layers/rnn/gru_test.py",
"repo_id": "keras",
"token_count": 5535
}
| 204 |
"""Deprecated image preprocessing APIs from Keras 1."""
import collections
import multiprocessing
import os
import threading
import warnings
import numpy as np
from keras import backend
from keras.api_export import keras_export
from keras.trainers.data_adapters.py_dataset_adapter import PyDataset
from keras.utils import image_utils
from keras.utils import io_utils
from keras.utils.module_utils import scipy
@keras_export("keras._legacy.preprocessing.image.Iterator")
class Iterator(PyDataset):
"""Base class for image data iterators.
DEPRECATED.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
Args:
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
white_list_formats = ("png", "jpg", "jpeg", "bmp", "ppm", "tif", "tiff")
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError(
"Asked to retrieve element {idx}, "
"but the Sequence "
"has length {length}".format(idx=idx, length=len(self))
)
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[
self.batch_size * idx : self.batch_size * (idx + 1)
]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
if self.n == 0:
# Avoiding modulo by zero error
current_index = 0
else:
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[
current_index : current_index + self.batch_size
]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self):
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
Args:
index_array: Array of sample indices to include in batch.
Returns:
A batch of transformed samples.
"""
raise NotImplementedError
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension.
Args:
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean, follow symbolic links to subdirectories.
Yields:
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(
os.walk(subpath, followlinks=follow_links), key=lambda x: x[0]
)
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
if fname.lower().endswith(".tiff"):
warnings.warn(
'Using ".tiff" files with multiple bands '
"will cause distortion. Please verify your output."
)
if fname.lower().endswith(white_list_formats):
yield root, fname
def _list_valid_filenames_in_directory(
directory, white_list_formats, split, class_indices, follow_links
):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
Args:
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean, follow symbolic links to subdirectories.
Returns:
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
all_files = list(
_iter_valid_files(directory, white_list_formats, follow_links)
)
num_files = len(all_files)
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = all_files[start:stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links
)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory)
)
filenames.append(relative_path)
return classes, filenames
class BatchFromFilesMixin:
"""Adds methods related to getting batches from filenames.
It includes the logic to transform image files to batches.
"""
def set_processing_attrs(
self,
image_data_generator,
target_size,
color_mode,
data_format,
save_to_dir,
save_prefix,
save_format,
subset,
interpolation,
keep_aspect_ratio,
):
"""Sets attributes to use later for processing files into a batch.
Args:
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images
to.
color_mode: One of `"rgb"`, `"rgba"`, `"grayscale"`.
Color mode to read images.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if
the target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic". If
PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
keep_aspect_ratio: Boolean, whether to resize images to a target
size without aspect ratio distortion. The image is cropped in
the center with target aspect ratio before resizing.
"""
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
self.keep_aspect_ratio = keep_aspect_ratio
if color_mode not in {"rgb", "rgba", "grayscale"}:
raise ValueError(
f"Invalid color mode: {color_mode}"
'; expected "rgb", "rgba", or "grayscale".'
)
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == "rgba":
if self.data_format == "channels_last":
self.image_shape = self.target_size + (4,)
else:
self.image_shape = (4,) + self.target_size
elif self.color_mode == "rgb":
if self.data_format == "channels_last":
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == "channels_last":
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == "validation":
split = (0, validation_split)
elif subset == "training":
split = (validation_split, 1)
else:
raise ValueError(
f"Invalid subset name: {subset};"
'expected "training" or "validation"'
)
else:
split = None
self.split = split
self.subset = subset
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
Args:
index_array: Array of sample indices to include in batch.
Returns:
A batch of transformed samples.
"""
batch_x = np.zeros(
(len(index_array),) + self.image_shape, dtype=self.dtype
)
# build batch of image data
# self.filepaths is dynamic, is better to call it once outside the loop
filepaths = self.filepaths
for i, j in enumerate(index_array):
img = image_utils.load_img(
filepaths[j],
color_mode=self.color_mode,
target_size=self.target_size,
interpolation=self.interpolation,
keep_aspect_ratio=self.keep_aspect_ratio,
)
x = image_utils.img_to_array(img, data_format=self.data_format)
# Pillow images should be closed after `load_img`,
# but not PIL images.
if hasattr(img, "close"):
img.close()
if self.image_data_generator:
params = self.image_data_generator.get_random_transform(x.shape)
x = self.image_data_generator.apply_transform(x, params)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = image_utils.array_to_img(
batch_x[i], self.data_format, scale=True
)
fname = "{prefix}_{index}_{hash}.{format}".format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format,
)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == "input":
batch_y = batch_x.copy()
elif self.class_mode in {"binary", "sparse"}:
batch_y = np.empty(len(batch_x), dtype=self.dtype)
for i, n_observation in enumerate(index_array):
batch_y[i] = self.classes[n_observation]
elif self.class_mode == "categorical":
batch_y = np.zeros(
(len(batch_x), len(self.class_indices)), dtype=self.dtype
)
for i, n_observation in enumerate(index_array):
batch_y[i, self.classes[n_observation]] = 1.0
elif self.class_mode == "multi_output":
batch_y = [output[index_array] for output in self.labels]
elif self.class_mode == "raw":
batch_y = self.labels[index_array]
else:
return batch_x
if self.sample_weight is None:
return batch_x, batch_y
else:
return batch_x, batch_y, self.sample_weight[index_array]
@property
def filepaths(self):
"""List of absolute paths to image files."""
raise NotImplementedError(
"`filepaths` property method has not "
"been implemented in {}.".format(type(self).__name__)
)
@property
def labels(self):
"""Class labels of every observation."""
raise NotImplementedError(
"`labels` property method has not been implemented in {}.".format(
type(self).__name__
)
)
@property
def sample_weight(self):
raise NotImplementedError(
"`sample_weight` property method has not "
"been implemented in {}.".format(type(self).__name__)
)
@keras_export("keras._legacy.preprocessing.image.DirectoryIterator")
class DirectoryIterator(BatchFromFilesMixin, Iterator):
"""Iterator capable of reading images from a directory on disk.
DEPRECATED.
"""
allowed_class_modes = {"categorical", "binary", "sparse", "input", None}
def __init__(
self,
directory,
image_data_generator,
target_size=(256, 256),
color_mode="rgb",
classes=None,
class_mode="categorical",
batch_size=32,
shuffle=True,
seed=None,
data_format=None,
save_to_dir=None,
save_prefix="",
save_format="png",
follow_links=False,
subset=None,
interpolation="nearest",
keep_aspect_ratio=False,
dtype=None,
):
if data_format is None:
data_format = backend.image_data_format()
if dtype is None:
dtype = backend.floatx()
super().set_processing_attrs(
image_data_generator,
target_size,
color_mode,
data_format,
save_to_dir,
save_prefix,
save_format,
subset,
interpolation,
keep_aspect_ratio,
)
self.directory = directory
self.classes = classes
if class_mode not in self.allowed_class_modes:
raise ValueError(
"Invalid class_mode: {}; expected one of: {}".format(
class_mode, self.allowed_class_modes
)
)
self.class_mode = class_mode
self.dtype = dtype
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(
_list_valid_filenames_in_directory,
(
dirpath,
self.white_list_formats,
self.split,
self.class_indices,
follow_links,
),
)
)
classes_list = []
for res in results:
classes, filenames = res.get()
classes_list.append(classes)
self.filenames += filenames
self.samples = len(self.filenames)
self.classes = np.zeros((self.samples,), dtype="int32")
for classes in classes_list:
self.classes[i : i + len(classes)] = classes
i += len(classes)
io_utils.print_msg(
f"Found {self.samples} images belonging to "
f"{self.num_classes} classes."
)
pool.close()
pool.join()
self._filepaths = [
os.path.join(self.directory, fname) for fname in self.filenames
]
super().__init__(self.samples, batch_size, shuffle, seed)
@property
def filepaths(self):
return self._filepaths
@property
def labels(self):
return self.classes
@property # mixin needs this property to work
def sample_weight(self):
# no sample weights will be returned
return None
@keras_export("keras._legacy.preprocessing.image.NumpyArrayIterator")
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
DEPRECATED.
"""
def __init__(
self,
x,
y,
image_data_generator,
batch_size=32,
shuffle=False,
sample_weight=None,
seed=None,
data_format=None,
save_to_dir=None,
save_prefix="",
save_format="png",
subset=None,
ignore_class_split=False,
dtype=None,
):
if data_format is None:
data_format = backend.image_data_format()
if dtype is None:
dtype = backend.floatx()
self.dtype = dtype
if isinstance(x, tuple) or isinstance(x, list):
if not isinstance(x[1], list):
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
"All of the arrays in `x` "
"should have the same length. "
"Found a pair with: "
f"len(x[0]) = {len(x)}, len(x[?]) = {len(xx)}"
)
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError(
"`x` (images tensor) and `y` (labels) "
"should have the same length. "
f"Found: x.shape = {np.asarray(x).shape}, "
f"y.shape = {np.asarray(y).shape}"
)
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError(
"`x` (images tensor) and `sample_weight` "
"should have the same length. "
f"Found: x.shape = {np.asarray(x).shape}, "
f"sample_weight.shape = {np.asarray(sample_weight).shape}"
)
if subset is not None:
if subset not in {"training", "validation"}:
raise ValueError(
f"Invalid subset name: {subset}"
'; expected "training" or "validation".'
)
split_idx = int(len(x) * image_data_generator._validation_split)
if (
y is not None
and not ignore_class_split
and not np.array_equal(
np.unique(y[:split_idx]), np.unique(y[split_idx:])
)
):
raise ValueError(
"Training and validation subsets "
"have different number of classes after "
"the split. If your numpy arrays are "
"sorted by the label, you might want "
"to shuffle them."
)
if subset == "validation":
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
self.x = np.asarray(x, dtype=self.dtype)
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError(
"Input data in `NumpyArrayIterator` "
"should have rank 4. You passed an array "
f"with shape {self.x.shape}"
)
channels_axis = 3 if data_format == "channels_last" else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn(
'NumpyArrayIterator is set to use the data format convention "'
+ data_format
+ '" (channels on axis '
+ str(channels_axis)
+ "), i.e. expected either 1, 3, or 4 channels on axis "
+ str(channels_axis)
+ ". However, it was passed an array with shape "
+ str(self.x.shape)
+ " ("
+ str(self.x.shape[channels_axis])
+ " channels)."
)
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super().__init__(x.shape[0], batch_size, shuffle, seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
tuple([len(index_array)] + list(self.x.shape)[1:]), dtype=self.dtype
)
for i, j in enumerate(index_array):
x = self.x[j]
params = self.image_data_generator.get_random_transform(x.shape)
x = self.image_data_generator.apply_transform(
x.astype(self.dtype), params
)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = image_utils.array_to_img(
batch_x[i], self.data_format, scale=True
)
fname = "{prefix}_{index}_{hash}.{format}".format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format,
)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if not batch_x_miscs else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def validate_filename(filename, white_list_formats):
"""Check if a filename refers to a valid file.
Args:
filename: String, absolute path to a file
white_list_formats: Set, allowed file extensions
Returns:
A boolean value indicating if the filename is valid or not
"""
return filename.lower().endswith(white_list_formats) and os.path.isfile(
filename
)
class DataFrameIterator(BatchFromFilesMixin, Iterator):
"""Iterator capable of reading images from a directory as a dataframe."""
allowed_class_modes = {
"binary",
"categorical",
"input",
"multi_output",
"raw",
"sparse",
None,
}
def __init__(
self,
dataframe,
directory=None,
image_data_generator=None,
x_col="filename",
y_col="class",
weight_col=None,
target_size=(256, 256),
color_mode="rgb",
classes=None,
class_mode="categorical",
batch_size=32,
shuffle=True,
seed=None,
data_format="channels_last",
save_to_dir=None,
save_prefix="",
save_format="png",
subset=None,
interpolation="nearest",
keep_aspect_ratio=False,
dtype="float32",
validate_filenames=True,
):
super().set_processing_attrs(
image_data_generator,
target_size,
color_mode,
data_format,
save_to_dir,
save_prefix,
save_format,
subset,
interpolation,
keep_aspect_ratio,
)
df = dataframe.copy()
self.directory = directory or ""
self.class_mode = class_mode
self.dtype = dtype
# check that inputs match the required class_mode
self._check_params(df, x_col, y_col, weight_col, classes)
if (
validate_filenames
): # check which image files are valid and keep them
df = self._filter_valid_filepaths(df, x_col)
if class_mode not in ["input", "multi_output", "raw", None]:
df, classes = self._filter_classes(df, y_col, classes)
num_classes = len(classes)
# build an index of all the unique classes
self.class_indices = dict(zip(classes, range(len(classes))))
# retrieve only training or validation set
if self.split:
num_files = len(df)
start = int(self.split[0] * num_files)
stop = int(self.split[1] * num_files)
df = df.iloc[start:stop, :]
# get labels for each observation
if class_mode not in ["input", "multi_output", "raw", None]:
self.classes = self.get_classes(df, y_col)
self.filenames = df[x_col].tolist()
self._sample_weight = df[weight_col].values if weight_col else None
if class_mode == "multi_output":
self._targets = [np.array(df[col].tolist()) for col in y_col]
if class_mode == "raw":
self._targets = df[y_col].values
self.samples = len(self.filenames)
validated_string = (
"validated" if validate_filenames else "non-validated"
)
if class_mode in ["input", "multi_output", "raw", None]:
io_utils.print_msg(
f"Found {self.samples} {validated_string} image filenames."
)
else:
io_utils.print_msg(
f"Found {self.samples} {validated_string} image filenames "
f"belonging to {num_classes} classes."
)
self._filepaths = [
os.path.join(self.directory, fname) for fname in self.filenames
]
super().__init__(self.samples, batch_size, shuffle, seed)
def _check_params(self, df, x_col, y_col, weight_col, classes):
# check class mode is one of the currently supported
if self.class_mode not in self.allowed_class_modes:
raise ValueError(
"Invalid class_mode: {}; expected one of: {}".format(
self.class_mode, self.allowed_class_modes
)
)
# check that y_col has several column names if class_mode is
# multi_output
if (self.class_mode == "multi_output") and not isinstance(y_col, list):
raise TypeError(
'If class_mode="{}", y_col must be a list. Received {}.'.format(
self.class_mode, type(y_col).__name__
)
)
# check that filenames/filepaths column values are all strings
if not all(df[x_col].apply(lambda x: isinstance(x, str))):
raise TypeError(
f"All values in column x_col={x_col} must be strings."
)
# check labels are string if class_mode is binary or sparse
if self.class_mode in {"binary", "sparse"}:
if not all(df[y_col].apply(lambda x: isinstance(x, str))):
raise TypeError(
'If class_mode="{}", y_col="{}" column '
"values must be strings.".format(self.class_mode, y_col)
)
# check that if binary there are only 2 different classes
if self.class_mode == "binary":
if classes:
classes = set(classes)
if len(classes) != 2:
raise ValueError(
'If class_mode="binary" there must be 2 '
"classes. {} class/es were given.".format(len(classes))
)
elif df[y_col].nunique() != 2:
raise ValueError(
'If class_mode="binary" there must be 2 classes. '
"Found {} classes.".format(df[y_col].nunique())
)
# check values are string, list or tuple if class_mode is categorical
if self.class_mode == "categorical":
types = (str, list, tuple)
if not all(df[y_col].apply(lambda x: isinstance(x, types))):
raise TypeError(
'If class_mode="{}", y_col="{}" column '
"values must be type string, list or tuple.".format(
self.class_mode, y_col
)
)
# raise warning if classes are given but will be unused
if classes and self.class_mode in {
"input",
"multi_output",
"raw",
None,
}:
warnings.warn(
'`classes` will be ignored given the class_mode="{}"'.format(
self.class_mode
)
)
# check that if weight column that the values are numerical
if weight_col and not issubclass(df[weight_col].dtype.type, np.number):
raise TypeError(f"Column weight_col={weight_col} must be numeric.")
def get_classes(self, df, y_col):
labels = []
for label in df[y_col]:
if isinstance(label, (list, tuple)):
labels.append([self.class_indices[lbl] for lbl in label])
else:
labels.append(self.class_indices[label])
return labels
@staticmethod
def _filter_classes(df, y_col, classes):
df = df.copy()
def remove_classes(labels, classes):
if isinstance(labels, (list, tuple)):
labels = [cls for cls in labels if cls in classes]
return labels or None
elif isinstance(labels, str):
return labels if labels in classes else None
else:
raise TypeError(
"Expect string, list or tuple "
"but found {} in {} column ".format(type(labels), y_col)
)
if classes:
# prepare for membership lookup
classes = list(collections.OrderedDict.fromkeys(classes).keys())
df[y_col] = df[y_col].apply(lambda x: remove_classes(x, classes))
else:
classes = set()
for v in df[y_col]:
if isinstance(v, (list, tuple)):
classes.update(v)
else:
classes.add(v)
classes = sorted(classes)
return df.dropna(subset=[y_col]), classes
def _filter_valid_filepaths(self, df, x_col):
"""Keep only dataframe rows with valid filenames.
Args:
df: Pandas dataframe containing filenames in a column
x_col: string, column in `df` that contains the filenames or
filepaths
Returns:
absolute paths to image files
"""
filepaths = df[x_col].map(
lambda fname: os.path.join(self.directory, fname)
)
mask = filepaths.apply(
validate_filename, args=(self.white_list_formats,)
)
n_invalid = (~mask).sum()
if n_invalid:
warnings.warn(
'Found {} invalid image filename(s) in x_col="{}". '
"These filename(s) will be ignored.".format(n_invalid, x_col)
)
return df[mask]
@property
def filepaths(self):
return self._filepaths
@property
def labels(self):
if self.class_mode in {"multi_output", "raw"}:
return self._targets
else:
return self.classes
@property
def sample_weight(self):
return self._sample_weight
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
@keras_export("keras._legacy.preprocessing.image.ImageDataGenerator")
class ImageDataGenerator:
"""DEPRECATED."""
def __init__(
self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0,
width_shift_range=0.0,
height_shift_range=0.0,
brightness_range=None,
shear_range=0.0,
zoom_range=0.0,
channel_shift_range=0.0,
fill_mode="nearest",
cval=0.0,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0,
interpolation_order=1,
dtype=None,
):
if data_format is None:
data_format = backend.image_data_format()
if dtype is None:
dtype = backend.floatx()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
self.dtype = dtype
self.interpolation_order = interpolation_order
if data_format not in {"channels_last", "channels_first"}:
raise ValueError(
'`data_format` should be `"channels_last"` '
"(channel after row and column) or "
'`"channels_first"` (channel before row and column). '
f"Received: {data_format}"
)
self.data_format = data_format
if data_format == "channels_first":
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == "channels_last":
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
"`validation_split` must be strictly between 0 and 1. "
f" Received: {validation_split}"
)
self._validation_split = validation_split
self.mean = None
self.std = None
self.zca_whitening_matrix = None
if isinstance(zoom_range, (float, int)):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2 and all(
isinstance(val, (float, int)) for val in zoom_range
):
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError(
"`zoom_range` should be a float or "
"a tuple or list of two floats. "
f"Received: {zoom_range}"
)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn(
"This ImageDataGenerator specifies "
"`zca_whitening`, which overrides "
"setting of `featurewise_center`."
)
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn(
"This ImageDataGenerator specifies "
"`zca_whitening` "
"which overrides setting of"
"`featurewise_std_normalization`."
)
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn(
"This ImageDataGenerator specifies "
"`featurewise_std_normalization`, "
"which overrides setting of "
"`featurewise_center`."
)
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn(
"This ImageDataGenerator specifies "
"`samplewise_std_normalization`, "
"which overrides setting of "
"`samplewise_center`."
)
if brightness_range is not None:
if (
not isinstance(brightness_range, (tuple, list))
or len(brightness_range) != 2
):
raise ValueError(
"`brightness_range should be tuple or list of two floats. "
f"Received: {brightness_range}"
)
self.brightness_range = brightness_range
def flow(
self,
x,
y=None,
batch_size=32,
shuffle=True,
sample_weight=None,
seed=None,
save_to_dir=None,
save_prefix="",
save_format="png",
ignore_class_split=False,
subset=None,
):
return NumpyArrayIterator(
x,
y,
self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
ignore_class_split=ignore_class_split,
subset=subset,
dtype=self.dtype,
)
def flow_from_directory(
self,
directory,
target_size=(256, 256),
color_mode="rgb",
classes=None,
class_mode="categorical",
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix="",
save_format="png",
follow_links=False,
subset=None,
interpolation="nearest",
keep_aspect_ratio=False,
):
return DirectoryIterator(
directory,
self,
target_size=target_size,
color_mode=color_mode,
keep_aspect_ratio=keep_aspect_ratio,
classes=classes,
class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation,
dtype=self.dtype,
)
def flow_from_dataframe(
self,
dataframe,
directory=None,
x_col="filename",
y_col="class",
weight_col=None,
target_size=(256, 256),
color_mode="rgb",
classes=None,
class_mode="categorical",
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix="",
save_format="png",
subset=None,
interpolation="nearest",
validate_filenames=True,
**kwargs,
):
if "has_ext" in kwargs:
warnings.warn(
"has_ext is deprecated, filenames in the dataframe have "
"to match the exact filenames in disk.",
DeprecationWarning,
)
if "sort" in kwargs:
warnings.warn(
"sort is deprecated, batches will be created in the"
"same order than the filenames provided if shuffle"
"is set to False.",
DeprecationWarning,
)
if class_mode == "other":
warnings.warn(
'`class_mode` "other" is deprecated, please use '
'`class_mode` "raw".',
DeprecationWarning,
)
class_mode = "raw"
if "drop_duplicates" in kwargs:
warnings.warn(
"drop_duplicates is deprecated, you can drop duplicates "
"by using the pandas.DataFrame.drop_duplicates method.",
DeprecationWarning,
)
return DataFrameIterator(
dataframe,
directory,
self,
x_col=x_col,
y_col=y_col,
weight_col=weight_col,
target_size=target_size,
color_mode=color_mode,
classes=classes,
class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset,
interpolation=interpolation,
validate_filenames=validate_filenames,
dtype=self.dtype,
)
def standardize(self, x):
"""Applies the normalization configuration in-place to a batch of
inputs.
`x` is changed in-place since the function is mainly used internally
to standardize images and feed them to your network. If a copy of `x`
would be created instead it would have a significant performance cost.
If you want to apply this method without changing the input in-place
you can call the method creating a copy before:
standardize(np.copy(x))
Args:
x: Batch of inputs to be normalized.
Returns:
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= np.std(x, keepdims=True) + 1e-6
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn(
"This ImageDataGenerator specifies "
"`featurewise_center`, but it hasn't "
"been fit on any training data. Fit it "
"first by calling `.fit(numpy_data)`."
)
if self.featurewise_std_normalization:
if self.std is not None:
x /= self.std + 1e-6
else:
warnings.warn(
"This ImageDataGenerator specifies "
"`featurewise_std_normalization`, "
"but it hasn't "
"been fit on any training data. Fit it "
"first by calling `.fit(numpy_data)`."
)
if self.zca_whitening:
if self.zca_whitening_matrix is not None:
flat_x = x.reshape(-1, np.prod(x.shape[-3:]))
white_x = flat_x @ self.zca_whitening_matrix
x = np.reshape(white_x, x.shape)
else:
warnings.warn(
"This ImageDataGenerator specifies "
"`zca_whitening`, but it hasn't "
"been fit on any training data. Fit it "
"first by calling `.fit(numpy_data)`."
)
return x
def get_random_transform(self, img_shape, seed=None):
"""Generates random parameters for a transformation.
Args:
img_shape: Tuple of integers.
Shape of the image that is transformed.
seed: Random seed.
Returns:
A dictionary containing randomly chosen parameters describing the
transformation.
"""
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
if seed is not None:
np.random.seed(seed)
if self.rotation_range:
theta = np.random.uniform(-self.rotation_range, self.rotation_range)
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(
-self.height_shift_range, self.height_shift_range
)
if np.max(self.height_shift_range) < 1:
tx *= img_shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(
-self.width_shift_range, self.width_shift_range
)
if np.max(self.width_shift_range) < 1:
ty *= img_shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.random.uniform(-self.shear_range, self.shear_range)
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0], self.zoom_range[1], 2
)
flip_horizontal = (np.random.random() < 0.5) * self.horizontal_flip
flip_vertical = (np.random.random() < 0.5) * self.vertical_flip
channel_shift_intensity = None
if self.channel_shift_range != 0:
channel_shift_intensity = np.random.uniform(
-self.channel_shift_range, self.channel_shift_range
)
brightness = None
if self.brightness_range is not None:
brightness = np.random.uniform(
self.brightness_range[0], self.brightness_range[1]
)
transform_parameters = {
"theta": theta,
"tx": tx,
"ty": ty,
"shear": shear,
"zx": zx,
"zy": zy,
"flip_horizontal": flip_horizontal,
"flip_vertical": flip_vertical,
"channel_shift_intensity": channel_shift_intensity,
"brightness": brightness,
}
return transform_parameters
def apply_transform(self, x, transform_parameters):
"""Applies a transformation to an image according to given parameters.
Args:
x: 3D tensor, single image.
transform_parameters: Dictionary with string - parameter pairs
describing the transformation.
Currently, the following parameters
from the dictionary are used:
- `'theta'`: Float. Rotation angle in degrees.
- `'tx'`: Float. Shift in the x direction.
- `'ty'`: Float. Shift in the y direction.
- `'shear'`: Float. Shear angle in degrees.
- `'zx'`: Float. Zoom in the x direction.
- `'zy'`: Float. Zoom in the y direction.
- `'flip_horizontal'`: Boolean. Horizontal flip.
- `'flip_vertical'`: Boolean. Vertical flip.
- `'channel_shift_intensity'`: Float. Channel shift intensity.
- `'brightness'`: Float. Brightness shift intensity.
Returns:
A transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
x = apply_affine_transform(
x,
transform_parameters.get("theta", 0),
transform_parameters.get("tx", 0),
transform_parameters.get("ty", 0),
transform_parameters.get("shear", 0),
transform_parameters.get("zx", 1),
transform_parameters.get("zy", 1),
row_axis=img_row_axis,
col_axis=img_col_axis,
channel_axis=img_channel_axis,
fill_mode=self.fill_mode,
cval=self.cval,
order=self.interpolation_order,
)
if transform_parameters.get("channel_shift_intensity") is not None:
x = apply_channel_shift(
x,
transform_parameters["channel_shift_intensity"],
img_channel_axis,
)
if transform_parameters.get("flip_horizontal", False):
x = flip_axis(x, img_col_axis)
if transform_parameters.get("flip_vertical", False):
x = flip_axis(x, img_row_axis)
if transform_parameters.get("brightness") is not None:
x = apply_brightness_shift(
x, transform_parameters["brightness"], False
)
return x
def random_transform(self, x, seed=None):
"""Applies a random transformation to an image.
Args:
x: 3D tensor, single image.
seed: Random seed.
Returns:
A randomly transformed version of the input (same shape).
"""
params = self.get_random_transform(x.shape, seed)
return self.apply_transform(x, params)
def fit(self, x, augment=False, rounds=1, seed=None):
"""Fits the data generator to some sample data.
This computes the internal data stats related to the
data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
When `rescale` is set to a value, rescaling is applied to
sample data before computing the internal data stats.
Args:
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, in case
of RGB data, it should have value 3, and in case
of RGBA data, it should have value 4.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=self.dtype)
if x.ndim != 4:
raise ValueError(
"Input to `.fit()` should have rank 4. Got array with shape: "
+ str(x.shape)
)
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
"Expected input to be images (as Numpy array) "
'following the data format convention "'
+ self.data_format
+ '" (channels on axis '
+ str(self.channel_axis)
+ "), i.e. expected either 1, 3 or 4 channels on axis "
+ str(self.channel_axis)
+ ". However, it was passed an array with shape "
+ str(x.shape)
+ " ("
+ str(x.shape[self.channel_axis])
+ " channels)."
)
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if self.rescale:
x *= self.rescale
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=self.dtype,
)
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= self.std + 1e-6
if self.zca_whitening:
n = len(x)
flat_x = np.reshape(x, (n, -1))
u, s, _ = np.linalg.svd(flat_x.T, full_matrices=False)
s_inv = np.sqrt(n) / (s + self.zca_epsilon)
self.zca_whitening_matrix = (u * s_inv).dot(u.T)
@keras_export("keras._legacy.preprocessing.image.random_rotation")
def random_rotation(
x,
rg,
row_axis=1,
col_axis=2,
channel_axis=0,
fill_mode="nearest",
cval=0.0,
interpolation_order=1,
):
"""DEPRECATED."""
theta = np.random.uniform(-rg, rg)
x = apply_affine_transform(
x,
theta=theta,
row_axis=row_axis,
col_axis=col_axis,
channel_axis=channel_axis,
fill_mode=fill_mode,
cval=cval,
order=interpolation_order,
)
return x
@keras_export("keras._legacy.preprocessing.image.random_shift")
def random_shift(
x,
wrg,
hrg,
row_axis=1,
col_axis=2,
channel_axis=0,
fill_mode="nearest",
cval=0.0,
interpolation_order=1,
):
"""DEPRECATED."""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
x = apply_affine_transform(
x,
tx=tx,
ty=ty,
row_axis=row_axis,
col_axis=col_axis,
channel_axis=channel_axis,
fill_mode=fill_mode,
cval=cval,
order=interpolation_order,
)
return x
@keras_export("keras._legacy.preprocessing.image.random_shear")
def random_shear(
x,
intensity,
row_axis=1,
col_axis=2,
channel_axis=0,
fill_mode="nearest",
cval=0.0,
interpolation_order=1,
):
"""DEPRECATED."""
shear = np.random.uniform(-intensity, intensity)
x = apply_affine_transform(
x,
shear=shear,
row_axis=row_axis,
col_axis=col_axis,
channel_axis=channel_axis,
fill_mode=fill_mode,
cval=cval,
order=interpolation_order,
)
return x
@keras_export("keras._legacy.preprocessing.image.random_zoom")
def random_zoom(
x,
zoom_range,
row_axis=1,
col_axis=2,
channel_axis=0,
fill_mode="nearest",
cval=0.0,
interpolation_order=1,
):
"""DEPRECATED."""
if len(zoom_range) != 2:
raise ValueError(
"`zoom_range` should be a tuple or list of two floats. "
f"Received: {zoom_range}"
)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
x = apply_affine_transform(
x,
zx=zx,
zy=zy,
row_axis=row_axis,
col_axis=col_axis,
channel_axis=channel_axis,
fill_mode=fill_mode,
cval=cval,
order=interpolation_order,
)
return x
@keras_export("keras._legacy.preprocessing.image.apply_channel_shift")
def apply_channel_shift(x, intensity, channel_axis=0):
"""Performs a channel shift.
DEPRECATED.
Args:
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
Returns:
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + intensity, min_x, max_x) for x_channel in x
]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
@keras_export("keras._legacy.preprocessing.image.random_channel_shift")
def random_channel_shift(x, intensity_range, channel_axis=0):
"""Performs a random channel shift.
DEPRECATED.
Args:
x: Input tensor. Must be 3D.
intensity_range: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
Returns:
Numpy image tensor.
"""
intensity = np.random.uniform(-intensity_range, intensity_range)
return apply_channel_shift(x, intensity, channel_axis=channel_axis)
@keras_export("keras._legacy.preprocessing.image.apply_brightness_shift")
def apply_brightness_shift(x, brightness, scale=True):
"""Performs a brightness shift.
DEPRECATED.
Args:
x: Input tensor. Must be 3D.
brightness: Float. The new brightness value.
scale: Whether to rescale the image such that minimum and maximum values
are 0 and 255 respectively. Default: True.
Returns:
Numpy image tensor.
Raises:
ImportError: if PIL is not available.
"""
from PIL import ImageEnhance
x_min, x_max = np.min(x), np.max(x)
local_scale = (x_min < 0) or (x_max > 255)
x = image_utils.array_to_img(x, scale=local_scale or scale)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
x = imgenhancer_Brightness.enhance(brightness)
x = image_utils.img_to_array(x)
if not scale and local_scale:
x = x / 255 * (x_max - x_min) + x_min
return x
@keras_export("keras._legacy.preprocessing.image.random_brightness")
def random_brightness(x, brightness_range, scale=True):
"""Performs a random brightness shift.
DEPRECATED.
Args:
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
scale: Whether to rescale the image such that minimum and maximum values
are 0 and 255 respectively. Default: True.
Returns:
Numpy image tensor.
Raises:
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
"`brightness_range should be tuple or list of two floats. "
f"Received: {brightness_range}"
)
u = np.random.uniform(brightness_range[0], brightness_range[1])
return apply_brightness_shift(x, u, scale)
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 - 0.5
o_y = float(y) / 2 - 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
@keras_export("keras._legacy.preprocessing.image.apply_affine_transform")
def apply_affine_transform(
x,
theta=0,
tx=0,
ty=0,
shear=0,
zx=1,
zy=1,
row_axis=1,
col_axis=2,
channel_axis=0,
fill_mode="nearest",
cval=0.0,
order=1,
):
"""Applies an affine transformation specified by the parameters given.
DEPRECATED.
"""
# Input sanity checks:
# 1. x must 2D image with one or more channels (i.e., a 3D tensor)
# 2. channels must be either first or last dimension
if np.unique([row_axis, col_axis, channel_axis]).size != 3:
raise ValueError(
"'row_axis', 'col_axis', and 'channel_axis' must be distinct"
)
# shall we support negative indices?
valid_indices = set([0, 1, 2])
actual_indices = set([row_axis, col_axis, channel_axis])
if actual_indices != valid_indices:
raise ValueError(
f"Invalid axis' indices: {actual_indices - valid_indices}"
)
if x.ndim != 3:
raise ValueError("Input arrays must be multi-channel 2D images.")
if channel_axis not in [0, 2]:
raise ValueError(
"Channels are allowed and the first and last dimensions."
)
transform_matrix = None
if theta != 0:
theta = np.deg2rad(theta)
rotation_matrix = np.array(
[
[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1],
]
)
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]])
if transform_matrix is None:
transform_matrix = shift_matrix
else:
transform_matrix = np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear = np.deg2rad(shear)
shear_matrix = np.array(
[[1, -np.sin(shear), 0], [0, np.cos(shear), 0], [0, 0, 1]]
)
if transform_matrix is None:
transform_matrix = shear_matrix
else:
transform_matrix = np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]])
if transform_matrix is None:
transform_matrix = zoom_matrix
else:
transform_matrix = np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w
)
x = np.rollaxis(x, channel_axis, 0)
# Matrix construction assumes that coordinates are x, y (in that order).
# However, regular numpy arrays use y,x (aka i,j) indexing.
# Possible solution is:
# 1. Swap the x and y axes.
# 2. Apply transform.
# 3. Swap the x and y axes again to restore image-like data ordering.
# Mathematically, it is equivalent to the following transformation:
# M' = PMP, where P is the permutation matrix, M is the original
# transformation matrix.
if col_axis > row_axis:
transform_matrix[:, [0, 1]] = transform_matrix[:, [1, 0]]
transform_matrix[[0, 1]] = transform_matrix[[1, 0]]
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [
scipy.ndimage.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=order,
mode=fill_mode,
cval=cval,
)
for x_channel in x
]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
|
keras/keras/legacy/preprocessing/image.py/0
|
{
"file_path": "keras/keras/legacy/preprocessing/image.py",
"repo_id": "keras",
"token_count": 32621
}
| 205 |
import inspect
from keras.api_export import keras_export
from keras.metrics.accuracy_metrics import Accuracy
from keras.metrics.accuracy_metrics import BinaryAccuracy
from keras.metrics.accuracy_metrics import CategoricalAccuracy
from keras.metrics.accuracy_metrics import SparseCategoricalAccuracy
from keras.metrics.accuracy_metrics import SparseTopKCategoricalAccuracy
from keras.metrics.accuracy_metrics import TopKCategoricalAccuracy
from keras.metrics.confusion_metrics import AUC
from keras.metrics.confusion_metrics import FalseNegatives
from keras.metrics.confusion_metrics import FalsePositives
from keras.metrics.confusion_metrics import Precision
from keras.metrics.confusion_metrics import PrecisionAtRecall
from keras.metrics.confusion_metrics import Recall
from keras.metrics.confusion_metrics import RecallAtPrecision
from keras.metrics.confusion_metrics import SensitivityAtSpecificity
from keras.metrics.confusion_metrics import SpecificityAtSensitivity
from keras.metrics.confusion_metrics import TrueNegatives
from keras.metrics.confusion_metrics import TruePositives
from keras.metrics.f_score_metrics import F1Score
from keras.metrics.f_score_metrics import FBetaScore
from keras.metrics.hinge_metrics import CategoricalHinge
from keras.metrics.hinge_metrics import Hinge
from keras.metrics.hinge_metrics import SquaredHinge
from keras.metrics.iou_metrics import BinaryIoU
from keras.metrics.iou_metrics import IoU
from keras.metrics.iou_metrics import MeanIoU
from keras.metrics.iou_metrics import OneHotIoU
from keras.metrics.iou_metrics import OneHotMeanIoU
from keras.metrics.metric import Metric
from keras.metrics.probabilistic_metrics import BinaryCrossentropy
from keras.metrics.probabilistic_metrics import CategoricalCrossentropy
from keras.metrics.probabilistic_metrics import KLDivergence
from keras.metrics.probabilistic_metrics import Poisson
from keras.metrics.probabilistic_metrics import SparseCategoricalCrossentropy
from keras.metrics.reduction_metrics import Mean
from keras.metrics.reduction_metrics import MeanMetricWrapper
from keras.metrics.reduction_metrics import Sum
from keras.metrics.regression_metrics import CosineSimilarity
from keras.metrics.regression_metrics import LogCoshError
from keras.metrics.regression_metrics import MeanAbsoluteError
from keras.metrics.regression_metrics import MeanAbsolutePercentageError
from keras.metrics.regression_metrics import MeanSquaredError
from keras.metrics.regression_metrics import MeanSquaredLogarithmicError
from keras.metrics.regression_metrics import R2Score
from keras.metrics.regression_metrics import RootMeanSquaredError
from keras.saving import serialization_lib
from keras.utils.naming import to_snake_case
ALL_OBJECTS = {
# Base
Metric,
Mean,
Sum,
MeanMetricWrapper,
# Regression
MeanSquaredError,
RootMeanSquaredError,
MeanAbsoluteError,
MeanAbsolutePercentageError,
MeanSquaredLogarithmicError,
CosineSimilarity,
LogCoshError,
R2Score,
# Classification
AUC,
FalseNegatives,
FalsePositives,
Precision,
PrecisionAtRecall,
Recall,
RecallAtPrecision,
SensitivityAtSpecificity,
SpecificityAtSensitivity,
TrueNegatives,
TruePositives,
# Hinge
Hinge,
SquaredHinge,
CategoricalHinge,
# Probabilistic
KLDivergence,
Poisson,
BinaryCrossentropy,
CategoricalCrossentropy,
SparseCategoricalCrossentropy,
# Accuracy
Accuracy,
BinaryAccuracy,
CategoricalAccuracy,
SparseCategoricalAccuracy,
TopKCategoricalAccuracy,
SparseTopKCategoricalAccuracy,
# F-Score
F1Score,
FBetaScore,
# IoU
IoU,
BinaryIoU,
MeanIoU,
OneHotIoU,
OneHotMeanIoU,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
ALL_OBJECTS_DICT.update(
{to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS}
)
# TODO: Align with `tf.keras` and set the name attribute of metrics
# with the key name. Currently it uses default name of class definitions.
ALL_OBJECTS_DICT.update(
{
"bce": BinaryCrossentropy,
"BCE": BinaryCrossentropy,
"mse": MeanSquaredError,
"MSE": MeanSquaredError,
"mae": MeanAbsoluteError,
"MAE": MeanAbsoluteError,
"mape": MeanAbsolutePercentageError,
"MAPE": MeanAbsolutePercentageError,
"msle": MeanSquaredLogarithmicError,
"MSLE": MeanSquaredLogarithmicError,
}
)
@keras_export("keras.metrics.serialize")
def serialize(metric):
"""Serializes metric function or `Metric` instance.
Args:
metric: A Keras `Metric` instance or a metric function.
Returns:
Metric configuration dictionary.
"""
return serialization_lib.serialize_keras_object(metric)
@keras_export("keras.metrics.deserialize")
def deserialize(config, custom_objects=None):
"""Deserializes a serialized metric class/function instance.
Args:
config: Metric configuration.
custom_objects: Optional dictionary mapping names (strings)
to custom objects (classes and functions) to be
considered during deserialization.
Returns:
A Keras `Metric` instance or a metric function.
"""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.metrics.get")
def get(identifier):
"""Retrieves a Keras metric as a `function`/`Metric` class instance.
The `identifier` may be the string name of a metric function or class.
>>> metric = metrics.get("categorical_crossentropy")
>>> type(metric)
<class 'function'>
>>> metric = metrics.get("CategoricalCrossentropy")
>>> type(metric)
<class '...metrics.CategoricalCrossentropy'>
You can also specify `config` of the metric to this function by passing dict
containing `class_name` and `config` as an identifier. Also note that the
`class_name` must map to a `Metric` class
>>> identifier = {"class_name": "CategoricalCrossentropy",
... "config": {"from_logits": True}}
>>> metric = metrics.get(identifier)
>>> type(metric)
<class '...metrics.CategoricalCrossentropy'>
Args:
identifier: A metric identifier. One of None or string name of a metric
function/class or metric configuration dictionary or a metric
function or a metric class instance
Returns:
A Keras metric as a `function`/ `Metric` class instance.
"""
if identifier is None:
return None
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
if inspect.isclass(obj):
obj = obj()
return obj
else:
raise ValueError(f"Could not interpret metric identifier: {identifier}")
|
keras/keras/metrics/__init__.py/0
|
{
"file_path": "keras/keras/metrics/__init__.py",
"repo_id": "keras",
"token_count": 2583
}
| 206 |
from keras import initializers
from keras import losses
from keras import ops
from keras.api_export import keras_export
from keras.metrics.metric import Metric
from keras.saving import serialization_lib
def reduce_to_samplewise_values(values, sample_weight, reduce_fn, dtype):
mask = getattr(values, "_keras_mask", None)
values = ops.cast(values, dtype=dtype)
if sample_weight is not None:
sample_weight = ops.cast(sample_weight, dtype=dtype)
if mask is not None:
sample_weight = losses.loss.apply_mask(
sample_weight, mask, dtype=dtype, reduction="sum"
)
# Update dimensions of weights to match with values if possible.
values, sample_weight = losses.loss.squeeze_or_expand_to_same_rank(
values, sample_weight
)
# Reduce values to same ndim as weight array
weight_ndim = len(sample_weight.shape)
values_ndim = len(values.shape)
if values_ndim > weight_ndim:
values = reduce_fn(
values, axis=list(range(weight_ndim, values_ndim))
)
values = values * sample_weight
if values_ndim > 1:
sample_weight = reduce_fn(
sample_weight, axis=list(range(1, weight_ndim))
)
values_ndim = len(values.shape)
if values_ndim > 1:
values = reduce_fn(values, axis=list(range(1, values_ndim)))
return values, sample_weight
return values, sample_weight
@keras_export("keras.metrics.Sum")
class Sum(Metric):
"""Compute the (weighted) sum of the given values.
For example, if `values` is `[1, 3, 5, 7]` then their sum is 16.
If `sample_weight` was specified as `[1, 1, 0, 0]` then the sum would be 4.
This metric creates one variable, `total`.
This is ultimately returned as the sum value.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Example:
>>> m = metrics.Sum()
>>> m.update_state([1, 3, 5, 7])
>>> m.result()
16.0
>>> m = metrics.Sum()
>>> m.update_state([1, 3, 5, 7], sample_weight=[1, 1, 0, 0])
>>> m.result()
4.0
"""
def __init__(self, name="sum", dtype=None):
super().__init__(name=name, dtype=dtype)
self.total = self.add_variable(
shape=(),
initializer=initializers.Zeros(),
dtype=self.dtype,
name="total",
)
def update_state(self, values, sample_weight=None):
values, _ = reduce_to_samplewise_values(
values, sample_weight, reduce_fn=ops.sum, dtype=self.dtype
)
self.total.assign(self.total + ops.sum(values))
def reset_state(self):
self.total.assign(0.0)
def result(self):
return ops.cast(self.total, self.dtype)
@keras_export("keras.metrics.Mean")
class Mean(Metric):
"""Compute the (weighted) mean of the given values.
For example, if values is `[1, 3, 5, 7]` then the mean is 4.
If `sample_weight` was specified as `[1, 1, 0, 0]` then the mean would be 2.
This metric creates two variables, `total` and `count`.
The mean value returned is simply `total` divided by `count`.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Example:
>>> m = Mean()
>>> m.update_state([1, 3, 5, 7])
>>> m.result()
4.0
>>> m.reset_state()
>>> m.update_state([1, 3, 5, 7], sample_weight=[1, 1, 0, 0])
>>> m.result()
2.0
```
"""
def __init__(self, name="mean", dtype=None):
super().__init__(name=name, dtype=dtype)
self.total = self.add_variable(
shape=(),
initializer=initializers.Zeros(),
dtype=self.dtype,
name="total",
)
self.count = self.add_variable(
shape=(),
initializer=initializers.Zeros(),
dtype=self.dtype,
name="count",
)
def update_state(self, values, sample_weight=None):
values, sample_weight = reduce_to_samplewise_values(
values, sample_weight, reduce_fn=ops.mean, dtype=self.dtype
)
self.total.assign(self.total + ops.sum(values))
if len(values.shape) >= 1:
num_samples = ops.shape(values)[0]
else:
num_samples = 1
if sample_weight is not None:
num_samples = ops.sum(sample_weight)
self.count.assign(self.count + ops.cast(num_samples, dtype=self.dtype))
def reset_state(self):
self.total.assign(0.0)
self.count.assign(0)
def result(self):
return ops.divide_no_nan(
self.total, ops.cast(self.count, dtype=self.dtype)
)
@keras_export("keras.metrics.MeanMetricWrapper")
class MeanMetricWrapper(Mean):
"""Wrap a stateless metric function with the `Mean` metric.
You could use this class to quickly build a mean metric from a function. The
function needs to have the signature `fn(y_true, y_pred)` and return a
per-sample loss array. `MeanMetricWrapper.result()` will return
the average metric value across all samples seen so far.
For example:
```python
def mse(y_true, y_pred):
return (y_true - y_pred) ** 2
mse_metric = MeanMetricWrapper(fn=mse)
```
Args:
fn: The metric function to wrap, with signature
`fn(y_true, y_pred, **kwargs)`.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
**kwargs: Keyword arguments to pass on to `fn`.
"""
def __init__(self, fn, name=None, dtype=None, **kwargs):
super().__init__(name=name, dtype=dtype)
self._fn = fn
self._fn_kwargs = kwargs
# If we are wrapping a Keras loss, register the metric's
# direction as "down" (needs to be minimized during training).
if (
self._fn in losses.ALL_OBJECTS
or hasattr(self._fn, "__class__")
and self._fn.__class__ in losses.ALL_OBJECTS
):
self._direction = "down"
def update_state(self, y_true, y_pred, sample_weight=None):
mask = getattr(y_pred, "_keras_mask", None)
values = self._fn(y_true, y_pred, **self._fn_kwargs)
if sample_weight is not None and mask is not None:
sample_weight = losses.loss.apply_mask(
sample_weight, mask, dtype=self.dtype, reduction="sum"
)
return super().update_state(values, sample_weight=sample_weight)
def get_config(self):
base_config = super().get_config()
config = {"fn": serialization_lib.serialize_keras_object(self._fn)}
config.update(serialization_lib.serialize_keras_object(self._fn_kwargs))
return {**base_config, **config}
@classmethod
def from_config(cls, config):
if "fn" in config:
config = serialization_lib.deserialize_keras_object(config)
return cls(**config)
|
keras/keras/metrics/reduction_metrics.py/0
|
{
"file_path": "keras/keras/metrics/reduction_metrics.py",
"repo_id": "keras",
"token_count": 3115
}
| 207 |
"""
scatter
scatter_update
slice
slice_update
while_loop
stop_gradient
shape
cast
convert_to_tensor
convert_to_numpy
cond
is_tensor
"""
import numpy as np
from keras import backend
from keras.api_export import keras_export
from keras.backend import KerasTensor
from keras.backend import any_symbolic_tensors
from keras.ops.operation import Operation
from keras.utils import traceback_utils
class Scatter(Operation):
def call(self, indices, values, shape):
return backend.core.scatter(indices, values, shape)
def compute_output_spec(self, indices, values, shape):
return KerasTensor(shape, dtype=values.dtype)
@keras_export("keras.ops.scatter")
def scatter(indices, values, shape):
"""Returns a tensor of shape `shape` where `indices` are set to `values`.
At a high level, this operation does `zeros[indices] = updates` and
returns the output. It is equivalent to:
```python
zeros = keras.ops.zeros(shape)
output = keras.ops.scatter_update(zeros, indices, values)
```
Args:
indices: A tensor or list/tuple specifying
indices for the values in `values`.
values: A tensor, the values to be set at `indices`.
shape: Shape of the output tensor.
Example:
>>> indices = [[0, 1], [1, 1]]
>>> values = np.array([1., 1.])
>>> keras.ops.scatter(indices, values, shape=(2, 2))
array([[0., 1.],
[0., 1.]])
"""
if any_symbolic_tensors((indices, values, shape)):
return Scatter().symbolic_call(indices, values, shape)
return backend.core.scatter(indices, values, shape)
class ScatterUpdate(Operation):
def call(self, inputs, indices, updates):
return backend.core.scatter_update(inputs, indices, updates)
def compute_output_spec(self, inputs, indices, updates):
return KerasTensor(inputs.shape, dtype=inputs.dtype)
@keras_export("keras.ops.scatter_update")
def scatter_update(inputs, indices, updates):
"""Update inputs via updates at scattered (sparse) indices.
At a high level, this operation does `inputs[indices] = updates`.
Assume `inputs` is a tensor of shape `(D0, D1, ..., Dn)`, there are 2 main
usages of `scatter_update`.
1. `indices` is a 2D tensor of shape `(num_updates, n)`, where `num_updates`
is the number of updates to perform, and `updates` is a 1D tensor of
shape `(num_updates,)`. For example, if `inputs` is `zeros((4, 4, 4))`,
and we want to update `inputs[1, 2, 3]` and `inputs[0, 1, 3]` as 1, then
we can use:
```python
inputs = np.zeros((4, 4, 4))
indices = [[1, 2, 3], [0, 1, 3]]
updates = np.array([1., 1.])
inputs = keras.ops.scatter_update(inputs, indices, updates)
```
2 `indices` is a 2D tensor of shape `(num_updates, k)`, where `num_updates`
is the number of updates to perform, and `k` (`k < n`) is the size of
each index in `indices`. `updates` is a `n - k`-D tensor of shape
`(num_updates, inputs.shape[k:])`. For example, if
`inputs = np.zeros((4, 4, 4))`, and we want to update `inputs[1, 2, :]`
and `inputs[2, 3, :]` as `[1, 1, 1, 1]`, then `indices` would have shape
`(num_updates, 2)` (`k = 2`), and `updates` would have shape
`(num_updates, 4)` (`inputs.shape[2:] = 4`). See the code below:
```python
inputs = np.zeros((4, 4, 4))
indices = [[1, 2], [2, 3]]
updates = np.array([[1., 1., 1, 1,], [1., 1., 1, 1,])
inputs = keras.ops.scatter_update(inputs, indices, updates)
```
Args:
inputs: A tensor, the tensor to be updated.
indices: A tensor or list/tuple of shape `(N, inputs.ndim)`, specifying
indices to update. `N` is the number of indices to update, must be
equal to the first dimension of `updates`.
updates: A tensor, the new values to be put to `inputs` at `indices`.
Returns:
A tensor, has the same shape and dtype as `inputs`.
"""
if any_symbolic_tensors((inputs, indices, updates)):
return ScatterUpdate().symbolic_call(inputs, indices, updates)
return backend.core.scatter_update(inputs, indices, updates)
class Slice(Operation):
def call(self, inputs, start_indices, shape):
return backend.core.slice(inputs, start_indices, shape)
def compute_output_spec(self, inputs, start_indices, shape):
return KerasTensor(shape, dtype=inputs.dtype)
@keras_export("keras.ops.slice")
def slice(inputs, start_indices, shape):
"""Return a slice of an input tensor.
At a high level, this operation is an explicit replacement for array slicing
e.g. `inputs[start_indices: start_indices + shape]`.
Unlike slicing via brackets, this operation will accept tensor start
indices on all backends, which is useful when indices dynamically computed
via other tensor operations.
```python
inputs = np.zeros((5, 5))
start_indices = np.array([3, 3])
shape = np.array([2, 2])
inputs = keras.ops.slice(inputs, start_indices, updates)
```
Args:
inputs: A tensor, the tensor to be updated.
start_indices: A list/tuple of shape `(inputs.ndim,)`, specifying
the starting indices for updating.
shape: The full shape of the returned slice.
Returns:
A tensor, has the same shape and dtype as `inputs`.
"""
if any_symbolic_tensors((inputs, start_indices, shape)):
return Slice().symbolic_call(inputs, start_indices, shape)
return backend.core.slice(inputs, start_indices, shape)
class SliceUpdate(Operation):
def call(self, inputs, start_indices, updates):
return backend.core.slice_update(inputs, start_indices, updates)
def compute_output_spec(self, inputs, start_indices, updates):
return KerasTensor(inputs.shape, dtype=inputs.dtype)
@keras_export("keras.ops.slice_update")
def slice_update(inputs, start_indices, updates):
"""Update an input by slicing in a tensor of updated values.
At a high level, this operation does
`inputs[start_indices: start_indices + updates.shape] = updates`.
Assume inputs is a tensor of shape `(D0, D1, ..., Dn)`,
`start_indices` must be a list/tuple of n integers, specifying the starting
indices. `updates` must have the same rank as `inputs`, and the size of each
dim must not exceed `Di - start_indices[i]`. For example, if we have 2D
inputs `inputs = np.zeros((5, 5))`, and we want to update the intersection
of last 2 rows and last 2 columns as 1, i.e.,
`inputs[3:, 3:] = np.ones((2, 2))`, then we can use the code below:
```python
inputs = np.zeros((5, 5))
start_indices = [3, 3]
updates = np.ones((2, 2))
inputs = keras.ops.slice_update(inputs, start_indices, updates)
```
Args:
inputs: A tensor, the tensor to be updated.
start_indices: A list/tuple of shape `(inputs.ndim,)`, specifying
the starting indices for updating.
updates: A tensor, the new values to be put to `inputs` at `indices`.
`updates` must have the same rank as `inputs`.
Returns:
A tensor, has the same shape and dtype as `inputs`.
"""
if any_symbolic_tensors((inputs, start_indices, updates)):
return SliceUpdate().symbolic_call(inputs, start_indices, updates)
return backend.core.slice_update(inputs, start_indices, updates)
class WhileLoop(Operation):
def __init__(self, cond, body, maximum_iterations):
super().__init__()
self.cond = cond
self.body = body
self.maximum_iterations = maximum_iterations
def call(self, loop_vars):
return backend.core.while_loop(
self.cond,
self.body,
loop_vars,
maximum_iterations=self.maximum_iterations,
)
def compute_output_spec(self, loop_vars):
return [KerasTensor(v.shape, dtype=v.dtype) for v in loop_vars]
@keras_export("keras.ops.while_loop")
def while_loop(
cond,
body,
loop_vars,
maximum_iterations=None,
):
"""While loop implementation.
Args:
cond: A callable that represents the termination condition of the loop.
Must accept a `loop_vars` like structure as an argument. If
`loop_vars` is a tuple or list, each element of `loop_vars` will be
passed positionally to the callable.
body: A callable that represents the loop body. Must accept a
`loop_vars` like structure as an argument, and return update value
with the same structure. If `loop_vars` is a tuple or list, each
element of `loop_vars` will be passed positionally to the callable.
loop_vars: An arbitrary nested structure of tensor state to persist
across loop iterations.
maximum_iterations: Optional maximum number of iterations of the while
loop to run. If provided, the `cond` output is AND-ed with an
additional condition ensuring the number of iterations executed is
no greater than `maximum_iterations`.
Returns:
A list/tuple of tensors, has the same shape and dtype as `inputs`.
Examples:
>>> i = 0
>>> cond = lambda i: i < 10
>>> body = lambda i: i + 1
>>> keras.ops.while_loop(cond, body, i)
10
>>> x, y = 0, 1
>>> cond = lambda x, y: x < 10
>>> body = lambda x, y: (x + 1, y + 1)
>>> keras.ops.while_loop(cond, body, (x, y))
10, 11
"""
return backend.core.while_loop(
cond,
body,
loop_vars,
maximum_iterations=maximum_iterations,
)
class StopGradient(Operation):
def __init__(self):
super().__init__()
def call(self, variable):
return backend.core.stop_gradient(variable)
def compute_output_spec(self, variable):
return KerasTensor(variable.shape, dtype=variable.dtype)
@keras_export("keras.ops.stop_gradient")
def stop_gradient(variable):
"""Stops gradient computation.
Args:
variable: A tensor variable for which the gradient
computation is to be disabled.
Returns:
The variable with gradient computation disabled.
Examples:
>>> var = keras.backend.convert_to_tensor(
... [1., 2., 3.],
... dtype="float32"
... )
>>> var = keras.ops.stop_gradient(var)
"""
return backend.core.stop_gradient(variable)
class ForiLoop(Operation):
def __init__(self, lower, upper, body_fun):
super().__init__()
self.lower = lower
self.upper = upper
self.body_fun = body_fun
def call(self, init_val):
return backend.core.fori_loop(
self.lower,
self.upper,
self.body_fun,
init_val,
)
def compute_output_spec(self, init_val):
return KerasTensor(init_val.shape, dtype=init_val.dtype)
@keras_export("keras.ops.fori_loop")
def fori_loop(lower, upper, body_fun, init_val):
"""For loop implementation.
Args:
lower: The initial value of the loop variable.
upper: The upper bound of the loop variable.
body_fun: A callable that represents the loop body. Must take two
arguments: the loop variable and the loop state. The loop state
should be updated and returned by this function.
init_val: The initial value of the loop state.
Returns:
The final state after the loop.
Example:
>>> lower = 0
>>> upper = 10
>>> body_fun = lambda i, s: (i + 1, s + i)
>>> init_val = 0
>>> keras.ops.fori_loop(lower, upper, body_fun, init_val)
45
"""
if any_symbolic_tensors((lower, upper, init_val)):
return ForiLoop(lower, upper, body_fun).symbolic_call(init_val)
return backend.core.fori_loop(lower, upper, body_fun, init_val)
class Unstack(Operation):
def __init__(self, num=None, axis=0):
super().__init__()
self.num = num
self.axis = axis
def call(self, x):
return backend.core.unstack(x, self.num, self.axis)
def compute_output_spec(self, x):
axis = self.axis
if axis < 0:
axis = len(x.shape) + axis
output_shapes = x.shape[:axis] + x.shape[axis + 1 :]
num = self.num
if num is None:
num = x.shape[axis]
if num is None:
raise ValueError(
"Cannot infer argument `num` from shape "
f"{x.shape}. Either provide a tensor with a "
"concrete shape in the `axis` dimension or "
"explicitly pass the `num` argument."
)
output = [
KerasTensor(shape=output_shapes, dtype=x.dtype) for _ in range(num)
]
return output
@keras_export("keras.ops.unstack")
def unstack(x, num=None, axis=0):
"""Unpacks the given dimension of a rank-R tensor into rank-(R-1) tensors.
Args:
x: The input tensor.
num: The length of the dimension axis. Automatically inferred
if `None`.
axis: The axis along which to unpack.
Returns:
A list of tensors unpacked along the given axis.
Example:
>>> x = keras.ops.array([[1, 2], [3, 4]])
>>> keras.ops.unstack(x, axis=0)
[array([1, 2]), array([3, 4])]
"""
if any_symbolic_tensors((x,)):
return Unstack(num, axis).symbolic_call(x)
return backend.core.unstack(x, num=num, axis=axis)
@keras_export("keras.ops.shape")
def shape(x):
"""Gets the shape of the tensor input.
Note: On the TensorFlow backend, when `x` is a `tf.Tensor` with dynamic
shape, dimensions which are dynamic in the context of a compiled function
will have a `tf.Tensor` value instead of a static integer value.
Args:
x: A tensor. This function will try to access the `shape` attribute of
the input tensor.
Returns:
A tuple of integers or None values, indicating the shape of the input
tensor.
Example:
>>> x = keras.zeros((8, 12))
>>> keras.ops.shape(x)
(8, 12)
"""
if any_symbolic_tensors((x,)):
return x.shape
return backend.core.shape(x)
class Cast(Operation):
def __init__(self, dtype):
super().__init__()
self.dtype = backend.standardize_dtype(dtype)
def call(self, x):
return backend.core.cast(x, self.dtype)
def compute_output_spec(self, x):
return backend.KerasTensor(shape=x.shape, dtype=self.dtype)
@keras_export("keras.ops.cast")
def cast(x, dtype):
"""Cast a tensor to the desired dtype.
Args:
x: A tensor or variable.
dtype: The target type.
Returns:
A tensor of the specified `dtype`.
Example:
>>> x = keras.ops.arange(4)
>>> x = keras.ops.cast(x, dtype="float16")
"""
dtype = backend.standardize_dtype(dtype)
if any_symbolic_tensors((x,)):
return Cast(dtype=dtype)(x)
return backend.core.cast(x, dtype)
@keras_export("keras.ops.convert_to_tensor")
def convert_to_tensor(x, dtype=None, sparse=None):
"""Convert a NumPy array to a tensor.
Args:
x: A NumPy array.
dtype: The target type.
sparse: Whether to keep sparse tensors. `False` will cause sparse
tensors to be densified. The default value of `None` means that
sparse tensors are kept only if the backend supports them.
Returns:
A tensor of the specified `dtype`.
Example:
>>> x = np.array([1, 2, 3])
>>> y = keras.ops.convert_to_tensor(x)
"""
return backend.convert_to_tensor(x, dtype=dtype, sparse=sparse)
@keras_export("keras.ops.convert_to_numpy")
def convert_to_numpy(x):
"""Convert a tensor to a NumPy array.
Args:
x: A tensor.
Returns:
A NumPy array.
"""
if any_symbolic_tensors((x,)):
# This will raise a `ValueError` defined in the `KerasTensor` class.
# We trigger it rather than duplicate it here.
return np.array(x)
return backend.convert_to_numpy(x)
class Cond(Operation):
@traceback_utils.filter_traceback
def __call__(self, *args, **kwargs):
def call_fn(*args, **kwargs):
if not any_symbolic_tensors(args, kwargs):
try:
return self.call(*args, **kwargs)
except (TypeError, ValueError):
# fallback on symbolic case
pass
return self.symbolic_call(*args, **kwargs)
if traceback_utils.is_traceback_filtering_enabled():
# Wrap self.call to provide helpful info in case of exception
call_fn = traceback_utils.inject_argument_info_in_traceback(
call_fn,
object_name=(f"{self.__class__.__name__}.call()"),
)
return call_fn(*args, **kwargs)
# Plain flow.
return call_fn(*args, **kwargs)
def call(self, pred, true_fn, false_fn):
return backend.core.cond(pred, true_fn, false_fn)
def compute_output_spec(self, pred, true_fn, false_fn):
def call_fn(fn):
return fn()
true_fn_spec = backend.compute_output_spec(call_fn, true_fn)
false_fn_spec = backend.compute_output_spec(call_fn, false_fn)
if not self._check_output_spec(true_fn_spec, false_fn_spec):
raise ValueError(
"`true_fn` and `false_fn` should return outputs "
"of the same kind (struct, dtype and shape). "
f"Got {true_fn_spec} and {false_fn_spec} instead."
)
return true_fn_spec
def _check_output_spec(self, true_fn_spec, false_fn_spec):
if true_fn_spec is None or false_fn_spec is None:
return true_fn_spec is None and false_fn_spec is None
elif isinstance(true_fn_spec, dict):
if not isinstance(false_fn_spec, dict):
return False
if true_fn_spec.keys() != false_fn_spec.keys():
return False
if any(
(not self._check_output_spec(true_fn_spec[k], false_fn_spec[k]))
for k in true_fn_spec.keys()
):
return False
elif isinstance(true_fn_spec, list):
if not isinstance(false_fn_spec, list):
return False
if len(true_fn_spec) != len(false_fn_spec):
return False
if any(
(not self._check_output_spec(ti, fi))
for ti, fi in zip(true_fn_spec, false_fn_spec)
):
return False
elif isinstance(true_fn_spec, tuple):
if not isinstance(false_fn_spec, tuple):
return False
if len(true_fn_spec) != len(false_fn_spec):
return False
if any(
(not self._check_output_spec(ti, fi))
for ti, fi in zip(true_fn_spec, false_fn_spec)
):
return False
else:
if true_fn_spec.dtype != false_fn_spec.dtype:
return False
if true_fn_spec.shape != false_fn_spec.shape:
return False
return True
@keras_export("keras.ops.cond")
def cond(pred, true_fn, false_fn):
"""Conditionally applies `true_fn` or `false_fn`.
Args:
pred: Boolean scalar type
true_fn: Callable returning the output for the `pred == True` case.
false_fn: Callable returning the output for the `pred == False` case.
Returns:
The output of either `true_fn` or `false_fn` depending on pred.
"""
return Cond()(pred, true_fn, false_fn)
# TODO: also create an Op subclass VectorizedMap.
@keras_export("keras.ops.vectorized_map")
def vectorized_map(function, elements):
"""Parallel map of `function` on axis 0 of tensor(s) `elements`.
Schematically, `vectorized_map` implements the following,
in the case of a single tensor input `elements`:
```python
def vectorized_map(function, elements)
outputs = []
for e in elements:
outputs.append(function(e))
return stack(outputs)
```
In the case of an iterable of tensors `elements`,
it implements the following:
```python
def vectorized_map(function, elements)
batch_size = elements[0].shape[0]
outputs = []
for index in range(batch_size):
outputs.append(function([e[index] for e in elements]))
return np.stack(outputs)
```
In this case, `function` is expected to take as input
a single list of tensor arguments.
"""
return backend.core.vectorized_map(function, elements)
@keras_export("keras.ops.is_tensor")
def is_tensor(x):
"""Check whether the given object is a tensor.
Note: This checks for backend specific tensors so passing a TensorFlow
tensor would return `False` if your backend is PyTorch or JAX.
Args:
x: A variable.
Returns:
`True` if `x` is a tensor, otherwise `False`.
"""
return backend.core.is_tensor(x)
|
keras/keras/ops/core.py/0
|
{
"file_path": "keras/keras/ops/core.py",
"repo_id": "keras",
"token_count": 8964
}
| 208 |
import inspect
import textwrap
import tree
from keras import backend
from keras.api_export import keras_export
from keras.backend.common.keras_tensor import any_symbolic_tensors
from keras.ops.node import Node
from keras.utils import python_utils
from keras.utils import traceback_utils
from keras.utils.naming import auto_name
@keras_export("keras.Operation")
class Operation:
def __init__(self, name=None):
if name is None:
name = auto_name(self.__class__.__name__)
if not isinstance(name, str) or "/" in name:
raise ValueError(
"Argument `name` must be a string and "
"cannot contain character `/`. "
f"Received: name={name} (of type {type(name)})"
)
self.name = name
self._inbound_nodes = []
self._outbound_nodes = []
@traceback_utils.filter_traceback
def __call__(self, *args, **kwargs):
if traceback_utils.is_traceback_filtering_enabled():
# Wrap self.call to provide helpful info in case of exception
if any_symbolic_tensors(args, kwargs):
call_fn = self.symbolic_call
else:
call_fn = self.call
call_fn = traceback_utils.inject_argument_info_in_traceback(
call_fn,
object_name=(f"{self.__class__.__name__}.call()"),
)
return call_fn(*args, **kwargs)
# Plain flow.
if any_symbolic_tensors(args, kwargs):
return self.symbolic_call(*args, **kwargs)
return self.call(*args, **kwargs)
def symbolic_call(self, *args, **kwargs):
# Perform shape/dtype inference.
outputs = self.compute_output_spec(*args, **kwargs)
# Record a new node in the operations graph.
# The Node wires itself to inbound and outbound ops. The
# Node constructor updates this op's self._inbound_nodes,
# sets _keras_history on the outputs, and adds itself to the
# `_outbound_nodes` of the ops that produced the inputs to this
# call.
Node(
operation=self, call_args=args, call_kwargs=kwargs, outputs=outputs
)
return outputs
def call(self, *args, **kwargs):
raise NotImplementedError
def compute_output_spec(self, *args, **kwargs):
try:
return backend.compute_output_spec(self.call, *args, **kwargs)
except Exception as e:
if isinstance(e, TypeError):
raise e
else:
new_e = RuntimeError(
"Could not automatically infer the output shape / dtype of "
f"'{self.name}' (of type {self.__class__.__name__}). "
f"Either the `{self.__class__.__name__}.call()` method "
f"is incorrect, or you need to implement the "
f"`{self.__class__.__name__}.compute_output_spec() / "
"compute_output_shape()` method. "
f"Error encountered:\n\n{e}"
)
raise new_e.with_traceback(e.__traceback__) from None
def __new__(cls, *args, **kwargs):
"""We override __new__ to saving serializable constructor arguments.
These arguments are used to auto-generate an object serialization
config, which enables user-created subclasses to be serializable
out of the box in most cases without forcing the user
to manually implement `get_config()`.
"""
# Generate a config to be returned by default by `get_config()`.
arg_names = inspect.getfullargspec(cls.__init__).args
kwargs.update(dict(zip(arg_names[1 : len(args) + 1], args)))
instance = super(Operation, cls).__new__(cls)
# For safety, we only rely on auto-configs for a small set of
# serializable types.
supported_types = (str, int, float, bool, type(None))
try:
flat_arg_values = tree.flatten(kwargs)
auto_config = True
for value in flat_arg_values:
if not isinstance(value, supported_types):
auto_config = False
break
except TypeError:
auto_config = False
try:
instance._lock = False
if auto_config:
from keras.saving import serialization_lib
instance._auto_config = serialization_lib.SerializableDict(
**kwargs
)
else:
instance._auto_config = None
instance._lock = True
except RecursionError:
# Setting an instance attribute in __new__ has the potential
# to trigger an infinite recursion if a subclass overrides
# setattr in an unsafe way.
pass
return instance
@python_utils.default
def get_config(self):
"""Returns the config of the object.
An object config is a Python dictionary (serializable)
containing the information needed to re-instantiate it.
"""
config = {
"name": self.name,
}
if not python_utils.is_default(self.get_config):
# In this case the subclass implements get_config()
return config
# In this case the subclass doesn't implement get_config():
# Let's see if we can autogenerate it.
if getattr(self, "_auto_config", None) is not None:
xtra_args = set(config.keys())
config.update(self._auto_config.config)
# Remove args non explicitly supported
argspec = inspect.getfullargspec(self.__init__)
if argspec.varkw != "kwargs":
for key in xtra_args - xtra_args.intersection(argspec.args[1:]):
config.pop(key, None)
return config
else:
raise NotImplementedError(
textwrap.dedent(
f"""
Object {self.__class__.__name__} was created by passing
non-serializable argument values in `__init__()`,
and therefore the object must override `get_config()` in
order to be serializable. Please implement `get_config()`.
Example:
class CustomLayer(keras.layers.Layer):
def __init__(self, arg1, arg2, **kwargs):
super().__init__(**kwargs)
self.arg1 = arg1
self.arg2 = arg2
def get_config(self):
config = super().get_config()
config.update({{
"arg1": self.arg1,
"arg2": self.arg2,
}})
return config"""
)
)
@classmethod
def from_config(cls, config):
"""Creates a layer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same layer from the config
dictionary. It does not handle layer connectivity
(handled by Network), nor weights (handled by `set_weights`).
Args:
config: A Python dictionary, typically the
output of get_config.
Returns:
A layer instance.
"""
try:
return cls(**config)
except Exception as e:
raise TypeError(
f"Error when deserializing class '{cls.__name__}' using "
f"config={config}.\n\nException encountered: {e}"
)
def __repr__(self):
return f"<Operation name={self.name}>"
@property
def input(self):
"""Retrieves the input tensor(s) of a symbolic operation.
Only returns the tensor(s) corresponding to the *first time*
the operation was called.
Returns:
Input tensor or list of input tensors.
"""
return self._get_node_attribute_at_index(0, "input_tensors", "input")
@property
def output(self):
"""Retrieves the output tensor(s) of a layer.
Only returns the tensor(s) corresponding to the *first time*
the operation was called.
Returns:
Output tensor or list of output tensors.
"""
return self._get_node_attribute_at_index(0, "output_tensors", "output")
def _get_node_attribute_at_index(self, node_index, attr, attr_name):
"""Private utility to retrieves an attribute (e.g. inputs) from a node.
This is used to implement the properties:
- output
- input
Args:
node_index: Integer index of the node from which
to retrieve the attribute.
attr: Exact node attribute name.
attr_name: Human-readable attribute name, for error messages.
Returns:
The operation's attribute `attr` at the node of index `node_index`.
"""
if not self._inbound_nodes:
raise ValueError(
f"The layer {self.name} has never been called "
f"and thus has no defined {attr_name}."
)
if not len(self._inbound_nodes) > node_index:
raise ValueError(
f"Asked to get {attr_name} at node "
f"{node_index}, but the operation has only "
f"{len(self._inbound_nodes)} inbound nodes."
)
values = getattr(self._inbound_nodes[node_index], attr)
if isinstance(values, list) and len(values) == 1:
return values[0]
else:
return values
# Hooks for backend layer classes
def _post_build(self):
"""Can be overridden for per backend post build actions."""
pass
def _setattr_hook(self, name, value):
"""Can be overridden for per backend post build actions."""
return name, value
|
keras/keras/ops/operation.py/0
|
{
"file_path": "keras/keras/ops/operation.py",
"repo_id": "keras",
"token_count": 4550
}
| 209 |
# flake8: noqa
import numpy as np
from keras import backend
from keras import ops
from keras import testing
from keras.optimizers.adamax import Adamax
class AdamaxTest(testing.TestCase):
def test_config(self):
optimizer = Adamax(
learning_rate=0.5,
beta_1=0.8,
beta_2=0.95,
epsilon=1e-5,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = Adamax(learning_rate=0.5)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(vars, [0.5, 1.5, 2.5, 3.5], rtol=1e-4, atol=1e-4)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = Adamax(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = Adamax(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = Adamax(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = Adamax(
learning_rate=0.2, beta_1=0.85, beta_2=0.95, epsilon=1e-6
)
x = backend.Variable(np.ones([10]))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
# fmt: off
golden = np.array(
[[0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8],
[0.6827, 0.6873, 0.6888, 0.6896, 0.6901, 0.6904, 0.6906, 0.6908, 0.6909, 0.691],
[0.5333, 0.5407, 0.5431, 0.5444, 0.5451, 0.5456, 0.546, 0.5462, 0.5464, 0.5466],
[0.368, 0.3773, 0.3804, 0.382, 0.3829, 0.3835, 0.384, 0.3843, 0.3846, 0.3848],
[0.1933, 0.204, 0.2076, 0.2094, 0.2105, 0.2112, 0.2117, 0.2121, 0.2124, 0.2126]]
)
# fmt: on
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = Adamax(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = Adamax(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
|
keras/keras/optimizers/adamax_test.py/0
|
{
"file_path": "keras/keras/optimizers/adamax_test.py",
"repo_id": "keras",
"token_count": 1684
}
| 210 |
import numpy as np
from keras import backend
from keras import ops
from keras import testing
from keras.optimizers.rmsprop import RMSprop
class RMSpropTest(testing.TestCase):
def test_config(self):
optimizer = RMSprop(
learning_rate=0.5,
rho=0.8,
momentum=0.05,
epsilon=1e-6,
centered=True,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = RMSprop(learning_rate=0.5)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(
vars, [-0.5811, 0.4189, 1.4189, 2.4189], rtol=1e-4, atol=1e-4
)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = RMSprop(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = RMSprop(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = RMSprop(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = RMSprop(centered=True)
x = backend.Variable(np.ones([10]))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
golden = np.tile(
[[0.9967], [0.9933], [0.9908], [0.9885], [0.9864]], (1, 10)
)
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = RMSprop(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = RMSprop(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
|
keras/keras/optimizers/rmsprop_test.py/0
|
{
"file_path": "keras/keras/optimizers/rmsprop_test.py",
"repo_id": "keras",
"token_count": 1398
}
| 211 |
import inspect
from keras.api_export import keras_export
from keras.backend.common import global_state
GLOBAL_CUSTOM_OBJECTS = {}
GLOBAL_CUSTOM_NAMES = {}
@keras_export(
[
"keras.saving.CustomObjectScope",
"keras.saving.custom_object_scope",
"keras.utils.CustomObjectScope",
"keras.utils.custom_object_scope",
]
)
class CustomObjectScope:
"""Exposes custom classes/functions to Keras deserialization internals.
Under a scope `with custom_object_scope(objects_dict)`, Keras methods such
as `keras.models.load_model()` or
`keras.models.model_from_config()` will be able to deserialize any
custom object referenced by a saved config (e.g. a custom layer or metric).
Example:
Consider a custom regularizer `my_regularizer`:
```python
layer = Dense(3, kernel_regularizer=my_regularizer)
# Config contains a reference to `my_regularizer`
config = layer.get_config()
...
# Later:
with custom_object_scope({'my_regularizer': my_regularizer}):
layer = Dense.from_config(config)
```
Args:
custom_objects: Dictionary of `{name: object}` pairs.
"""
def __init__(self, custom_objects):
self.custom_objects = custom_objects or {}
self.backup = None
def __enter__(self):
self.backup = global_state.get_global_attribute(
"custom_objects_scope_dict", {}
).copy()
global_state.set_global_attribute(
"custom_objects_scope_dict", self.custom_objects.copy()
)
return self
def __exit__(self, *args, **kwargs):
global_state.set_global_attribute(
"custom_objects_scope_dict", self.backup.copy()
)
# Alias.
custom_object_scope = CustomObjectScope
@keras_export(
[
"keras.saving.get_custom_objects",
"keras.utils.get_custom_objects",
]
)
def get_custom_objects():
"""Retrieves a live reference to the global dictionary of custom objects.
Custom objects set using using `custom_object_scope()` are not added to the
global dictionary of custom objects, and will not appear in the returned
dictionary.
Example:
```python
get_custom_objects().clear()
get_custom_objects()['MyObject'] = MyObject
```
Returns:
Global dictionary mapping registered class names to classes.
"""
return GLOBAL_CUSTOM_OBJECTS
@keras_export(
[
"keras.saving.register_keras_serializable",
"keras.utils.register_keras_serializable",
]
)
def register_keras_serializable(package="Custom", name=None):
"""Registers an object with the Keras serialization framework.
This decorator injects the decorated class or function into the Keras custom
object dictionary, so that it can be serialized and deserialized without
needing an entry in the user-provided custom object dict. It also injects a
function that Keras will call to get the object's serializable string key.
Note that to be serialized and deserialized, classes must implement the
`get_config()` method. Functions do not have this requirement.
The object will be registered under the key `'package>name'` where `name`,
defaults to the object name if not passed.
Example:
```python
# Note that `'my_package'` is used as the `package` argument here, and since
# the `name` argument is not provided, `'MyDense'` is used as the `name`.
@register_keras_serializable('my_package')
class MyDense(keras.layers.Dense):
pass
assert get_registered_object('my_package>MyDense') == MyDense
assert get_registered_name(MyDense) == 'my_package>MyDense'
```
Args:
package: The package that this class belongs to. This is used for the
`key` (which is `"package>name"`) to idenfify the class. Note that
this is the first argument passed into the decorator.
name: The name to serialize this class under in this package. If not
provided or `None`, the class' name will be used (note that this is
the case when the decorator is used with only one argument, which
becomes the `package`).
Returns:
A decorator that registers the decorated class with the passed names.
"""
def decorator(arg):
"""Registers a class with the Keras serialization framework."""
class_name = name if name is not None else arg.__name__
registered_name = package + ">" + class_name
if inspect.isclass(arg) and not hasattr(arg, "get_config"):
raise ValueError(
"Cannot register a class that does not have a "
"get_config() method."
)
GLOBAL_CUSTOM_OBJECTS[registered_name] = arg
GLOBAL_CUSTOM_NAMES[arg] = registered_name
return arg
return decorator
@keras_export(
[
"keras.saving.get_registered_name",
"keras.utils.get_registered_name",
]
)
def get_registered_name(obj):
"""Returns the name registered to an object within the Keras framework.
This function is part of the Keras serialization and deserialization
framework. It maps objects to the string names associated with those objects
for serialization/deserialization.
Args:
obj: The object to look up.
Returns:
The name associated with the object, or the default Python name if the
object is not registered.
"""
if obj in GLOBAL_CUSTOM_NAMES:
return GLOBAL_CUSTOM_NAMES[obj]
else:
return obj.__name__
@keras_export(
[
"keras.saving.get_registered_object",
"keras.utils.get_registered_object",
]
)
def get_registered_object(name, custom_objects=None, module_objects=None):
"""Returns the class associated with `name` if it is registered with Keras.
This function is part of the Keras serialization and deserialization
framework. It maps strings to the objects associated with them for
serialization/deserialization.
Example:
```python
def from_config(cls, config, custom_objects=None):
if 'my_custom_object_name' in config:
config['hidden_cls'] = tf.keras.saving.get_registered_object(
config['my_custom_object_name'], custom_objects=custom_objects)
```
Args:
name: The name to look up.
custom_objects: A dictionary of custom objects to look the name up in.
Generally, custom_objects is provided by the user.
module_objects: A dictionary of custom objects to look the name up in.
Generally, module_objects is provided by midlevel library
implementers.
Returns:
An instantiable class associated with `name`, or `None` if no such class
exists.
"""
custom_objects_scope_dict = global_state.get_global_attribute(
"custom_objects_scope_dict", {}
)
if name in custom_objects_scope_dict:
return custom_objects_scope_dict[name]
elif name in GLOBAL_CUSTOM_OBJECTS:
return GLOBAL_CUSTOM_OBJECTS[name]
elif custom_objects and name in custom_objects:
return custom_objects[name]
elif module_objects and name in module_objects:
return module_objects[name]
return None
|
keras/keras/saving/object_registration.py/0
|
{
"file_path": "keras/keras/saving/object_registration.py",
"repo_id": "keras",
"token_count": 2744
}
| 212 |
import math
import numpy as np
import tree
from keras import backend
from keras.trainers.data_adapters import data_adapter_utils
from keras.trainers.data_adapters.data_adapter import DataAdapter
from keras.utils.dataset_utils import is_torch_tensor
from keras.utils.nest import lists_to_tuples
try:
import pandas
except ImportError:
pandas = None
class ArrayDataAdapter(DataAdapter):
"""Adapter for array-like objects, e.g. TF/JAX Tensors, NumPy arrays."""
def __init__(
self,
x,
y=None,
sample_weight=None,
batch_size=None,
steps=None,
shuffle=False,
class_weight=None,
):
if not can_convert_arrays((x, y, sample_weight)):
raise ValueError(
"Expected all elements of `x` to be array-like. "
f"Received invalid types: x={x}"
)
x, y, sample_weight = convert_to_arrays((x, y, sample_weight))
if sample_weight is not None:
if class_weight is not None:
raise ValueError(
"You cannot `class_weight` and `sample_weight` "
"at the same time."
)
if tree.is_nested(y):
if isinstance(sample_weight, np.ndarray):
is_samplewise = len(sample_weight.shape) == 1 or (
len(sample_weight.shape) == 2
and sample_weight.shape[1] == 1
)
if not is_samplewise:
raise ValueError(
"For a model with multiple outputs, when providing "
"a single `sample_weight` array, it should only "
"have one scalar score per sample "
"(i.e. shape `(num_samples,)`). If you want to use "
"non-scalar sample weights, pass a `sample_weight` "
"argument with one array per model output."
)
# Replicate the same sample_weight array on all outputs.
sample_weight = tree.map_structure(
lambda _: sample_weight, y
)
else:
try:
tree.assert_same_structure(y, sample_weight)
except ValueError:
raise ValueError(
"You should provide one `sample_weight` array per "
"output in `y`. The two structures did not match:\n"
f"- y: {y}\n"
f"- sample_weight: {sample_weight}\n"
)
if class_weight is not None:
if tree.is_nested(y):
raise ValueError(
"`class_weight` is only supported for Models with a single "
"output."
)
sample_weight = data_adapter_utils.class_weight_to_sample_weights(
y, class_weight
)
inputs = data_adapter_utils.pack_x_y_sample_weight(x, y, sample_weight)
data_adapter_utils.check_data_cardinality(inputs)
num_samples = set(i.shape[0] for i in tree.flatten(inputs)).pop()
self._num_samples = num_samples
self._inputs = inputs
# If batch_size is not passed but steps is, calculate from the input
# data. Defaults to `32` for backwards compatibility.
if not batch_size:
batch_size = int(math.ceil(num_samples / steps)) if steps else 32
self._size = int(math.ceil(num_samples / batch_size))
self._batch_size = batch_size
self._partial_batch_size = num_samples % batch_size
self._shuffle = shuffle
def get_numpy_iterator(self):
inputs = self._inputs
if self._shuffle and self._shuffle != "batch":
inputs = data_adapter_utils.sync_shuffle(
inputs, num_samples=self._num_samples
)
for i in range(self._size):
start = i * self._batch_size
stop = min((i + 1) * self._batch_size, self._num_samples)
if self._shuffle == "batch":
def slice_and_shuffle(x):
return data_adapter_utils.sync_shuffle(
x[start:stop], num_samples=(stop - start)
)
yield tree.map_structure(slice_and_shuffle, inputs)
else:
yield tree.map_structure(lambda x: x[start:stop], inputs)
def get_tf_dataset(self):
from keras.utils.module_utils import tensorflow as tf
inputs = self._inputs
shuffle = self._shuffle
batch_size = self._batch_size
num_samples = self._num_samples
num_full_batches = int(self._num_samples // batch_size)
# Vectorized version of shuffle.
# This is a performance improvement over using `from_tensor_slices`.
# The indices of the data are shuffled and batched, and these indices
# are then zipped with the data and used to extract a batch of the data
# at each step. The performance improvements here come from:
# 1. vectorized batch using gather
# 2. parallelized map
# 3. pipelined permutation generation
# 4. optimized permutation batching
# 5. disabled static optimizations
indices_dataset = tf.data.Dataset.range(1)
def permutation(_):
# It turns out to be more performant to make a new set of indices
# rather than reusing the same range Tensor. (presumably because of
# buffer forwarding.)
indices = tf.range(num_samples, dtype=tf.int64)
if shuffle and shuffle != "batch":
indices = tf.random.shuffle(indices)
return indices
# We prefetch a single element. Computing large permutations can take
# quite a while so we don't want to wait for prefetching over an epoch
# boundary to trigger the next permutation. On the other hand, too many
# simultaneous shuffles can contend on a hardware level and degrade all
# performance.
indices_dataset = indices_dataset.map(permutation).prefetch(1)
def slice_batch_indices(indices):
"""Convert a Tensor of indices into a dataset of batched indices.
This step can be accomplished in several ways. The most natural is
to slice the Tensor in a Dataset map. (With a condition on the upper
index to handle the partial batch.) However it turns out that
coercing the Tensor into a shape which is divisible by the batch
size (and handling the last partial batch separately) allows for a
much more favorable memory access pattern and improved performance.
Args:
indices: Tensor which determines the data order for an entire
epoch.
Returns:
A Dataset of batched indices.
"""
num_in_full_batch = num_full_batches * batch_size
first_k_indices = tf.slice(indices, [0], [num_in_full_batch])
first_k_indices = tf.reshape(
first_k_indices, [num_full_batches, batch_size]
)
flat_dataset = tf.data.Dataset.from_tensor_slices(first_k_indices)
if self._partial_batch_size:
index_remainder = tf.data.Dataset.from_tensors(
tf.slice(
indices, [num_in_full_batch], [self._partial_batch_size]
)
)
flat_dataset = flat_dataset.concatenate(index_remainder)
return flat_dataset
def slice_inputs(indices_dataset, inputs):
"""Slice inputs into a Dataset of batches.
Given a Dataset of batch indices and the unsliced inputs,
this step slices the inputs in a parallelized fashion
and produces a dataset of input batches.
Args:
indices_dataset: A Dataset of batched indices.
inputs: A python data structure that contains the inputs,
targets, and possibly sample weights.
Returns:
A Dataset of input batches matching the batch indices.
"""
dataset = tf.data.Dataset.zip(
(indices_dataset, tf.data.Dataset.from_tensors(inputs).repeat())
)
def grab_batch(i, data):
return tree.map_structure(
lambda d: tf.gather(d, i, axis=0), data
)
dataset = dataset.map(
grab_batch, num_parallel_calls=tf.data.AUTOTUNE
)
# Default optimizations are disabled to avoid the overhead of
# (unnecessary) input pipeline graph serialization & deserialization
options = tf.data.Options()
options.experimental_optimization.apply_default_optimizations = (
False
)
if self._shuffle:
options.experimental_external_state_policy = (
tf.data.experimental.ExternalStatePolicy.IGNORE
)
dataset = dataset.with_options(options)
return dataset
indices_dataset = indices_dataset.flat_map(slice_batch_indices)
dataset = slice_inputs(indices_dataset, inputs)
if shuffle == "batch":
def shuffle_batch(*batch):
return tree.map_structure(tf.random.shuffle, batch)
dataset = dataset.map(shuffle_batch)
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = (
tf.data.experimental.AutoShardPolicy.DATA
)
dataset = dataset.with_options(options)
return dataset.prefetch(tf.data.AUTOTUNE)
def get_jax_iterator(self):
return data_adapter_utils.get_jax_iterator(self.get_numpy_iterator())
def get_torch_dataloader(self):
import torch
from keras.backend.torch.core import convert_to_tensor
class ArrayDataset(torch.utils.data.Dataset):
def __init__(self, array):
self.array = array
def __getitems__(self, indices):
def slice_and_convert(x):
return convert_to_tensor(np.take(x, indices, axis=0))
return tree.map_structure(slice_and_convert, self.array)
def __len__(self):
return len(self.array[0])
class RandomBatchSampler(torch.utils.data.Sampler):
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
for batch in self.sampler:
yield [batch[i] for i in torch.randperm(len(batch))]
def __len__(self):
return len(self.sampler)
if self._shuffle == "batch":
batch_sampler = RandomBatchSampler(
torch.utils.data.BatchSampler(
range(self._num_samples),
batch_size=self._batch_size,
drop_last=False,
)
)
elif self._shuffle:
batch_sampler = torch.utils.data.BatchSampler(
torch.utils.data.RandomSampler(range(self._num_samples)),
batch_size=self._batch_size,
drop_last=False,
)
else:
batch_sampler = torch.utils.data.BatchSampler(
torch.utils.data.SequentialSampler(range(self._num_samples)),
batch_size=self._batch_size,
drop_last=False,
)
# Because ArrayDataset.__getitems__ returns full batches organized in
# the expected structure, there is nothing to collate.
def no_op_collate(batch):
return batch
dataset = ArrayDataset(self._inputs)
return torch.utils.data.DataLoader(
dataset, batch_sampler=batch_sampler, collate_fn=no_op_collate
)
@property
def num_batches(self):
return self._size
@property
def batch_size(self):
return self._batch_size
@property
def has_partial_batch(self):
return self._partial_batch_size > 0
@property
def partial_batch_size(self):
return self._partial_batch_size or None
def can_convert_arrays(arrays):
"""Check if array like-inputs can be handled by `ArrayDataAdapter`
Args:
inputs: Structure of `Tensor`s, NumPy arrays, or tensor-like.
Returns:
`True` if `arrays` can be handled by `ArrayDataAdapter`, `False`
otherwise.
"""
def can_convert_single_array(x):
is_none = x is None
known_type = isinstance(x, data_adapter_utils.ARRAY_TYPES)
convertable_type = hasattr(x, "__array__")
return is_none or known_type or convertable_type
return all(
tree.flatten(tree.map_structure(can_convert_single_array, arrays))
)
def convert_to_arrays(arrays):
"""Process array-like inputs.
This function:
- Converts tf.Tensors to NumPy arrays.
- Converts `pandas.Series` to `np.ndarray`
- Converts `list`s to `tuple`s (for `tf.data` support).
Args:
inputs: Structure of `Tensor`s, NumPy arrays, or tensor-like.
Returns:
Structure of NumPy `ndarray`s.
"""
def convert_single_array(x):
if x is None:
return x
if pandas is not None:
if isinstance(x, pandas.Series):
x = np.expand_dims(x.to_numpy(), axis=-1)
elif isinstance(x, pandas.DataFrame):
x = x.to_numpy()
if is_tf_ragged_tensor(x):
from keras.utils.module_utils import tensorflow as tf
# Convert floats to floatx.
if (
backend.is_float_dtype(x.dtype)
and not backend.standardize_dtype(x.dtype) == backend.floatx()
):
x = tf.cast(x, backend.floatx())
return x
if not isinstance(x, np.ndarray):
# Using `__array__` should handle `tf.Tensor`, `jax.np.ndarray`,
# `torch.Tensor`, as well as any other tensor-like object that has
# added numpy support.
if hasattr(x, "__array__"):
if is_torch_tensor(x):
x = x.cpu()
x = np.asarray(x)
else:
raise ValueError(
"Expected a NumPy array, tf.Tensor, tf.RaggedTensor, "
"jax.np.ndarray, torch.Tensor, Pandas Dataframe, or "
"Pandas Series. Received invalid input: "
f"{x} (of type {type(x)})"
)
if x.dtype == object:
return x
# Convert floats to floatx.
if (
backend.is_float_dtype(x.dtype)
and not backend.standardize_dtype(x.dtype) == backend.floatx()
):
x = x.astype(backend.floatx())
return x
arrays = tree.map_structure(convert_single_array, arrays)
return lists_to_tuples(arrays)
def is_tf_ragged_tensor(x):
return x.__class__.__name__ == "RaggedTensor"
|
keras/keras/trainers/data_adapters/array_data_adapter.py/0
|
{
"file_path": "keras/keras/trainers/data_adapters/array_data_adapter.py",
"repo_id": "keras",
"token_count": 7526
}
| 213 |
from keras.utils.audio_dataset_utils import audio_dataset_from_directory
from keras.utils.dataset_utils import split_dataset
from keras.utils.file_utils import get_file
from keras.utils.image_dataset_utils import image_dataset_from_directory
from keras.utils.image_utils import array_to_img
from keras.utils.image_utils import img_to_array
from keras.utils.image_utils import load_img
from keras.utils.image_utils import save_img
from keras.utils.io_utils import disable_interactive_logging
from keras.utils.io_utils import enable_interactive_logging
from keras.utils.io_utils import is_interactive_logging_enabled
from keras.utils.model_visualization import model_to_dot
from keras.utils.model_visualization import plot_model
from keras.utils.numerical_utils import normalize
from keras.utils.numerical_utils import to_categorical
from keras.utils.progbar import Progbar
from keras.utils.python_utils import default
from keras.utils.python_utils import is_default
from keras.utils.python_utils import removeprefix
from keras.utils.python_utils import removesuffix
from keras.utils.rng_utils import set_random_seed
from keras.utils.sequence_utils import pad_sequences
from keras.utils.text_dataset_utils import text_dataset_from_directory
from keras.utils.timeseries_dataset_utils import timeseries_dataset_from_array
|
keras/keras/utils/__init__.py/0
|
{
"file_path": "keras/keras/utils/__init__.py",
"repo_id": "keras",
"token_count": 410
}
| 214 |
import sys
from absl import logging
from keras.api_export import keras_export
from keras.backend.common import global_state
@keras_export(
[
"keras.config.enable_interactive_logging",
"keras.utils.enable_interactive_logging",
]
)
def enable_interactive_logging():
"""Turn on interactive logging.
When interactive logging is enabled, Keras displays logs via stdout.
This provides the best experience when using Keras in an interactive
environment such as a shell or a notebook.
"""
global_state.set_global_attribute("interactive_logging", True)
@keras_export(
[
"keras.config.disable_interactive_logging",
"keras.utils.disable_interactive_logging",
]
)
def disable_interactive_logging():
"""Turn off interactive logging.
When interactive logging is disabled, Keras sends logs to `absl.logging`.
This is the best option when using Keras in a non-interactive
way, such as running a training or inference job on a server.
"""
global_state.set_global_attribute("interactive_logging", False)
@keras_export(
[
"keras.config.is_interactive_logging_enabled",
"keras.utils.is_interactive_logging_enabled",
]
)
def is_interactive_logging_enabled():
"""Check if interactive logging is enabled.
To switch between writing logs to stdout and `absl.logging`, you may use
`keras.config.enable_interactive_logging()` and
`keras.config.disable_interactie_logging()`.
Returns:
Boolean, `True` if interactive logging is enabled,
and `False` otherwise.
"""
return global_state.get_global_attribute("interactive_logging", True)
def set_logging_verbosity(level):
"""Sets the verbosity level for logging.
Supported log levels are as follows:
- `"FATAL"` (least verbose)
- `"ERROR"`
- `"WARNING"`
- `"INFO"`
- `"DEBUG"` (most verbose)
Args:
level: A string corresponding to the level of verbosity for logging.
"""
valid_levels = {
"FATAL": logging.FATAL,
"ERROR": logging.ERROR,
"WARNING": logging.WARNING,
"INFO": logging.INFO,
"DEBUG": logging.DEBUG,
}
verbosity = valid_levels.get(level)
if verbosity is None:
raise ValueError(
"Please pass a valid level for logging verbosity. "
f"Expected one of: {set(valid_levels.keys())}. "
f"Received: {level}"
)
logging.set_verbosity(verbosity)
def print_msg(message, line_break=True):
"""Print the message to absl logging or stdout."""
if is_interactive_logging_enabled():
if line_break:
sys.stdout.write(message + "\n")
else:
sys.stdout.write(message)
sys.stdout.flush()
else:
logging.info(message)
def ask_to_proceed_with_overwrite(filepath):
"""Produces a prompt asking about overwriting a file.
Args:
filepath: the path to the file to be overwritten.
Returns:
True if we can proceed with overwrite, False otherwise.
"""
overwrite = (
input(f"[WARNING] {filepath} already exists - overwrite? [y/n]")
.strip()
.lower()
)
while overwrite not in ("y", "n"):
overwrite = (
input('Enter "y" (overwrite) or "n" (cancel).').strip().lower()
)
if overwrite == "n":
return False
print_msg("[TIP] Next time specify overwrite=True!")
return True
|
keras/keras/utils/io_utils.py/0
|
{
"file_path": "keras/keras/utils/io_utils.py",
"repo_id": "keras",
"token_count": 1371
}
| 215 |
from keras import testing
from keras.utils import sequence_utils
class PadSequencesTest(testing.TestCase):
def test_pad_sequences(self):
a = [[1], [1, 2], [1, 2, 3]]
# test padding
b = sequence_utils.pad_sequences(a, maxlen=3, padding="pre")
self.assertAllClose(b, [[0, 0, 1], [0, 1, 2], [1, 2, 3]])
b = sequence_utils.pad_sequences(a, maxlen=3, padding="post")
self.assertAllClose(b, [[1, 0, 0], [1, 2, 0], [1, 2, 3]])
# test truncating
b = sequence_utils.pad_sequences(a, maxlen=2, truncating="pre")
self.assertAllClose(b, [[0, 1], [1, 2], [2, 3]])
b = sequence_utils.pad_sequences(a, maxlen=2, truncating="post")
self.assertAllClose(b, [[0, 1], [1, 2], [1, 2]])
# test value
b = sequence_utils.pad_sequences(a, maxlen=3, value=1)
self.assertAllClose(b, [[1, 1, 1], [1, 1, 2], [1, 2, 3]])
def test_pad_sequences_str(self):
a = [["1"], ["1", "2"], ["1", "2", "3"]]
# test padding
b = sequence_utils.pad_sequences(
a, maxlen=3, padding="pre", value="pad", dtype=object
)
self.assertAllEqual(
b, [["pad", "pad", "1"], ["pad", "1", "2"], ["1", "2", "3"]]
)
b = sequence_utils.pad_sequences(
a, maxlen=3, padding="post", value="pad", dtype="<U3"
)
self.assertAllEqual(
b, [["1", "pad", "pad"], ["1", "2", "pad"], ["1", "2", "3"]]
)
# test truncating
b = sequence_utils.pad_sequences(
a, maxlen=2, truncating="pre", value="pad", dtype=object
)
self.assertAllEqual(b, [["pad", "1"], ["1", "2"], ["2", "3"]])
b = sequence_utils.pad_sequences(
a, maxlen=2, truncating="post", value="pad", dtype="<U3"
)
self.assertAllEqual(b, [["pad", "1"], ["1", "2"], ["1", "2"]])
with self.assertRaisesRegex(
ValueError, "`dtype` int32 is not compatible with "
):
sequence_utils.pad_sequences(
a, maxlen=2, truncating="post", value="pad"
)
def test_pad_sequences_vector(self):
a = [[[1, 1]], [[2, 1], [2, 2]], [[3, 1], [3, 2], [3, 3]]]
# test padding
b = sequence_utils.pad_sequences(a, maxlen=3, padding="pre")
self.assertAllClose(
b,
[
[[0, 0], [0, 0], [1, 1]],
[[0, 0], [2, 1], [2, 2]],
[[3, 1], [3, 2], [3, 3]],
],
)
b = sequence_utils.pad_sequences(a, maxlen=3, padding="post")
self.assertAllClose(
b,
[
[[1, 1], [0, 0], [0, 0]],
[[2, 1], [2, 2], [0, 0]],
[[3, 1], [3, 2], [3, 3]],
],
)
# test truncating
b = sequence_utils.pad_sequences(a, maxlen=2, truncating="pre")
self.assertAllClose(
b, [[[0, 0], [1, 1]], [[2, 1], [2, 2]], [[3, 2], [3, 3]]]
)
b = sequence_utils.pad_sequences(a, maxlen=2, truncating="post")
self.assertAllClose(
b, [[[0, 0], [1, 1]], [[2, 1], [2, 2]], [[3, 1], [3, 2]]]
)
# test value
b = sequence_utils.pad_sequences(a, maxlen=3, value=1)
self.assertAllClose(
b,
[
[[1, 1], [1, 1], [1, 1]],
[[1, 1], [2, 1], [2, 2]],
[[3, 1], [3, 2], [3, 3]],
],
)
|
keras/keras/utils/sequence_utils_test.py/0
|
{
"file_path": "keras/keras/utils/sequence_utils_test.py",
"repo_id": "keras",
"token_count": 1937
}
| 216 |
[tool.black]
line-length = 80
# black needs this to be a regex
# to add more exclude expressions
# append `| <regex-expr>` (e.g. `| .*_test\\.py`) to this list
extend-exclude = """
(
examples/
)
"""
[tool.isort]
profile = "black"
force_single_line = "True"
known_first_party = ["keras_core", "tests"]
default_section = "THIRDPARTY"
line_length = 80
extend_skip_glob=["examples/*", "guides/*"]
[tool.pytest.ini_options]
filterwarnings = [
"error",
"ignore::DeprecationWarning",
"ignore::ImportWarning",
"ignore::RuntimeWarning",
"ignore::PendingDeprecationWarning",
"ignore::FutureWarning",
"ignore::UserWarning",
# Ignore a spurious warning on tf-nightly related to save model changes.
"ignore:Custom mask layers require a config",
]
addopts = "-vv"
# Do not run tests in the `build` folders
norecursedirs = ["build"]
[tool.coverage.report]
exclude_lines = [
"pragma: no cover",
"@abstract",
"raise NotImplementedError",
]
omit = [
"*/*_test.py",
"keras_core/legacy/*",
]
[tool.coverage.run]
branch = true
omit = [
"*/*_test.py",
"keras_core/legacy/*",
]
|
keras/pyproject.toml/0
|
{
"file_path": "keras/pyproject.toml",
"repo_id": "keras",
"token_count": 445
}
| 217 |
# Description:
# Contains the TF-Keras Application package (internal TensorFlow version).
# Placeholder: load unaliased py_library
load("@org_keras//tf_keras:tf_keras.bzl", "tf_py_test")
package(
# copybara:uncomment default_applicable_licenses = ["//tf_keras:license"],
default_visibility = [
# Remove this deps to integration test.
"//tf_keras:friends",
],
licenses = ["notice"],
)
py_library(
name = "applications",
srcs = [
"__init__.py",
"convnext.py",
"densenet.py",
"efficientnet.py",
"efficientnet_v2.py",
"imagenet_utils.py",
"inception_resnet_v2.py",
"inception_v3.py",
"mobilenet.py",
"mobilenet_v2.py",
"mobilenet_v3.py",
"nasnet.py",
"regnet.py",
"resnet.py",
"resnet_rs.py",
"resnet_v2.py",
"vgg16.py",
"vgg19.py",
"xception.py",
],
srcs_version = "PY3",
visibility = ["//visibility:public"],
deps = [
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras:activations",
"//tf_keras:backend",
"//tf_keras/engine",
"//tf_keras/layers",
"//tf_keras/models",
"//tf_keras/utils:data_utils",
"//tf_keras/utils:layer_utils",
],
)
tf_py_test(
name = "applications_test_channels_first",
srcs = ["applications_test.py"],
args = ["--image_data_format=channels_first"],
main = "applications_test.py",
shard_count = 50,
tags = [
"no_oss", # b/318174391
"no_rocm",
"notsan", # b/168814536
"requires-net:external",
],
deps = [
":applications",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "applications_test_channels_last",
srcs = ["applications_test.py"],
args = ["--image_data_format=channels_last"],
main = "applications_test.py",
shard_count = 50,
tags = [
"no_oss", # b/318174391
"no_rocm",
"notsan", # b/168814536
"requires-net:external",
],
deps = [
":applications",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras/testing_infra:test_combinations",
],
)
# Add target for each application module file, to make sure it only
# runs the test for the application models contained in that
# application module when it has been modified.
# TODO(b/146940090): Remove the "no_oss" tag in the following tests.
tf_py_test(
name = "applications_load_weight_test_resnet",
srcs = ["applications_load_weight_test.py"],
args = ["--module=resnet"],
main = "applications_load_weight_test.py",
tags = [
"no_oss",
"no_pip",
"notsan", # b/168814536
"requires-net:external",
],
deps = [
":applications",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras/preprocessing",
],
)
tf_py_test(
name = "applications_load_weight_test_resnet_v2",
srcs = ["applications_load_weight_test.py"],
args = ["--module=resnet_v2"],
main = "applications_load_weight_test.py",
tags = [
"no_oss",
"no_pip",
"notsan", # TODO(b/170901700)
"requires-net:external",
],
deps = [
":applications",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras/preprocessing",
],
)
tf_py_test(
name = "applications_load_weight_test_vgg16",
srcs = ["applications_load_weight_test.py"],
args = ["--module=vgg16"],
main = "applications_load_weight_test.py",
tags = [
"no_oss",
"no_pip",
"requires-net:external",
],
deps = [
":applications",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras/preprocessing",
],
)
tf_py_test(
name = "applications_load_weight_test_vgg19",
srcs = ["applications_load_weight_test.py"],
args = ["--module=vgg19"],
main = "applications_load_weight_test.py",
tags = [
"no_oss",
"no_pip",
"requires-net:external",
],
deps = [
":applications",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras/preprocessing",
],
)
tf_py_test(
name = "applications_load_weight_test_xception",
srcs = ["applications_load_weight_test.py"],
args = ["--module=xception"],
main = "applications_load_weight_test.py",
tags = [
"no_oss",
"no_pip",
"requires-net:external",
],
deps = [
":applications",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras/preprocessing",
],
)
tf_py_test(
name = "applications_load_weight_test_inception_v3",
srcs = ["applications_load_weight_test.py"],
args = ["--module=inception_v3"],
main = "applications_load_weight_test.py",
tags = [
"no_oss",
"no_pip",
"requires-net:external",
],
deps = [
":applications",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras/preprocessing",
],
)
tf_py_test(
name = "applications_load_weight_test_inception_resnet_v2",
srcs = ["applications_load_weight_test.py"],
args = ["--module=inception_resnet_v2"],
main = "applications_load_weight_test.py",
tags = [
"no_oss",
"no_pip",
"requires-net:external",
],
deps = [
":applications",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras/preprocessing",
],
)
tf_py_test(
name = "applications_load_weight_test_mobilenet",
srcs = ["applications_load_weight_test.py"],
args = ["--module=mobilenet"],
main = "applications_load_weight_test.py",
tags = [
"no_oss",
"no_pip",
"requires-net:external",
],
deps = [
":applications",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras/preprocessing",
],
)
tf_py_test(
name = "applications_load_weight_test_mobilenet_v2",
srcs = ["applications_load_weight_test.py"],
args = ["--module=mobilenet_v2"],
main = "applications_load_weight_test.py",
tags = [
"no_oss",
"no_pip",
"requires-net:external",
],
deps = [
":applications",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras/preprocessing",
],
)
tf_py_test(
name = "applications_load_weight_test_mobilenet_v3_small",
srcs = ["applications_load_weight_test.py"],
args = ["--module=mobilenet_v3_small"],
main = "applications_load_weight_test.py",
tags = [
"no_oss",
"no_pip",
"requires-net:external",
],
deps = [
":applications",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras/preprocessing",
],
)
tf_py_test(
name = "applications_load_weight_test_mobilenet_v3_large",
srcs = ["applications_load_weight_test.py"],
args = ["--module=mobilenet_v3_large"],
main = "applications_load_weight_test.py",
tags = [
"no_oss",
"no_pip",
"requires-net:external",
],
deps = [
":applications",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras/preprocessing",
],
)
tf_py_test(
name = "applications_load_weight_test_convnext",
size = "large",
srcs = ["applications_load_weight_test.py"],
args = ["--module=convnext"],
main = "applications_load_weight_test.py",
tags = [
"no_oss",
"no_pip",
],
deps = [
":applications",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras/preprocessing",
],
)
tf_py_test(
name = "applications_load_weight_test_densenet",
size = "large",
srcs = ["applications_load_weight_test.py"],
args = ["--module=densenet"],
main = "applications_load_weight_test.py",
shard_count = 3,
tags = [
"no_oss",
"no_pip",
],
deps = [
":applications",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras/preprocessing",
],
)
tf_py_test(
name = "applications_load_weight_test_efficientnet",
size = "large",
srcs = ["applications_load_weight_test.py"],
args = ["--module=efficientnet"],
main = "applications_load_weight_test.py",
shard_count = 8,
tags = [
"no_oss",
"no_pip",
],
deps = [
":applications",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras/preprocessing",
],
)
tf_py_test(
name = "applications_load_weight_test_efficientnet_v2",
size = "large",
srcs = ["applications_load_weight_test.py"],
args = ["--module=efficientnet_v2"],
main = "applications_load_weight_test.py",
shard_count = 8,
tags = [
"no_oss",
"no_pip",
],
deps = [
":applications",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras/preprocessing",
],
)
tf_py_test(
name = "applications_load_weight_test_regnet",
size = "large",
srcs = ["applications_load_weight_test.py"],
args = ["--module=regnet"],
main = "applications_load_weight_test.py",
tags = [
"no_oss",
"no_pip",
],
deps = [
":applications",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras/preprocessing",
],
)
tf_py_test(
name = "applications_load_weight_test_nasnet_mobile",
srcs = ["applications_load_weight_test.py"],
args = ["--module=nasnet_mobile"],
main = "applications_load_weight_test.py",
tags = [
"no_oss",
"no_pip",
"requires-net:external",
],
deps = [
":applications",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras/preprocessing",
],
)
tf_py_test(
name = "applications_load_weight_test_nasnet_large",
srcs = ["applications_load_weight_test.py"],
args = ["--module=nasnet_large"],
main = "applications_load_weight_test.py",
tags = [
"no_oss",
"no_pip",
"requires-net:external",
],
deps = [
":applications",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras/preprocessing",
],
)
tf_py_test(
name = "imagenet_utils_test",
size = "medium",
srcs = ["imagenet_utils_test.py"],
shard_count = 2,
deps = [
":applications",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
|
tf-keras/tf_keras/applications/BUILD/0
|
{
"file_path": "tf-keras/tf_keras/applications/BUILD",
"repo_id": "tf-keras",
"token_count": 5757
}
| 218 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RegNet models for TF-Keras.
References:
- [Designing Network Design Spaces](https://arxiv.org/abs/2003.13678)
(CVPR 2020)
- [Fast and Accurate Model Scaling](https://arxiv.org/abs/2103.06877)
(CVPR 2021)
"""
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras import layers
from tf_keras.applications import imagenet_utils
from tf_keras.engine import training
from tf_keras.utils import data_utils
from tf_keras.utils import layer_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHTS_PATH = (
"https://storage.googleapis.com/tensorflow/keras-applications/regnet/"
)
WEIGHTS_HASHES = {
"x002": (
"49fb46e56cde07fdaf57bffd851461a86548f6a3a4baef234dd37290b826c0b8",
"5445b66cd50445eb7ecab094c1e78d4d3d29375439d1a7798861c4af15ffff21",
),
"x004": (
"3523c7f5ac0dbbcc2fd6d83b3570e7540f7449d3301cc22c29547302114e4088",
"de139bf07a66c9256f2277bf5c1b6dd2d5a3a891a5f8a925a10c8a0a113fd6f3",
),
"x006": (
"340216ef334a7bae30daac9f414e693c136fac9ab868704bbfcc9ce6a5ec74bb",
"a43ec97ad62f86b2a96a783bfdc63a5a54de02eef54f26379ea05e1bf90a9505",
),
"x008": (
"8f145d6a5fae6da62677bb8d26eb92d0b9dfe143ec1ebf68b24a57ae50a2763d",
"3c7e4b0917359304dc18e644475c5c1f5e88d795542b676439c4a3acd63b7207",
),
"x016": (
"31c386f4c7bfef4c021a583099aa79c1b3928057ba1b7d182f174674c5ef3510",
"1b8e3d545d190271204a7b2165936a227d26b79bb7922bac5ee4d303091bf17a",
),
"x032": (
"6c025df1409e5ea846375bc9dfa240956cca87ef57384d93fef7d6fa90ca8c7f",
"9cd4522806c0fcca01b37874188b2bd394d7c419956d77472a4e072b01d99041",
),
"x040": (
"ba128046c588a26dbd3b3a011b26cb7fa3cf8f269c184c132372cb20b6eb54c1",
"b4ed0ca0b9a98e789e05000e830403a7ade4d8afa01c73491c44610195198afe",
),
"x064": (
"0f4489c3cd3ad979bd6b0324213998bcb36dc861d178f977997ebfe53c3ba564",
"3e706fa416a18dfda14c713423eba8041ae2509db3e0a611d5f599b5268a46c4",
),
"x080": (
"76320e43272719df648db37271a247c22eb6e810fe469c37a5db7e2cb696d162",
"7b1ce8e29ceefec10a6569640ee329dba7fbc98b5d0f6346aabade058b66cf29",
),
"x120": (
"5cafc461b78897d5e4f24e68cb406d18e75f31105ef620e7682b611bb355eb3a",
"36174ddd0299db04a42631d028abcb1cc7afec2b705e42bd28fcd325e5d596bf",
),
"x160": (
"8093f57a5824b181fb734ea21ae34b1f7ee42c5298e63cf6d587c290973195d2",
"9d1485050bdf19531ffa1ed7827c75850e0f2972118a996b91aa9264b088fd43",
),
"x320": (
"91fb3e6f4e9e44b3687e80977f7f4412ee9937c0c704232664fc83e4322ea01e",
"9db7eacc37b85c98184070e1a172e6104c00846f44bcd4e727da9e50d9692398",
),
"y002": (
"1e8091c674532b1a61c04f6393a9c570113e0197f22bd1b98cc4c4fe800c6465",
"f63221f63d625b8e201221499682587bfe29d33f50a4c4f4d53be00f66c0f12c",
),
"y004": (
"752fdbad21c78911bf1dcb8c513e5a0e14697b068e5d9e73525dbaa416d18d8e",
"45e6ba8309a17a77e67afc05228454b2e0ee6be0dae65edc0f31f1da10cc066b",
),
"y006": (
"98942e07b273da500ff9699a1f88aca78dfad4375faabb0bab784bb0dace80a9",
"b70261cba4e60013c99d130cc098d2fce629ff978a445663b6fa4f8fc099a2be",
),
"y008": (
"1b099377cc9a4fb183159a6f9b24bc998e5659d25a449f40c90cbffcbcfdcae4",
"b11f5432a216ee640fe9be6e32939defa8d08b8d136349bf3690715a98752ca1",
),
"y016": (
"b7ce1f5e223f0941c960602de922bcf846288ce7a4c33b2a4f2e4ac4b480045b",
"d7404f50205e82d793e219afb9eb2bfeb781b6b2d316a6128c6d7d7dacab7f57",
),
"y032": (
"6a6a545cf3549973554c9b94f0cd40e25f229fffb1e7f7ac779a59dcbee612bd",
"eb3ac1c45ec60f4f031c3f5180573422b1cf7bebc26c004637517372f68f8937",
),
"y040": (
"98d00118b335162bbffe8f1329e54e5c8e75ee09b2a5414f97b0ddfc56e796f6",
"b5be2a5e5f072ecdd9c0b8a437cd896df0efa1f6a1f77e41caa8719b7dfcb05d",
),
"y064": (
"65c948c7a18aaecaad2d1bd4fd978987425604ba6669ef55a1faa0069a2804b7",
"885c4b7ed7ea339daca7dafa1a62cb7d41b1068897ef90a5a3d71b4a2e2db31a",
),
"y080": (
"7a2c62da2982e369a4984d3c7c3b32d6f8d3748a71cb37a31156c436c37f3e95",
"3d119577e1e3bf8d153b895e8ea9e4ec150ff2d92abdca711b6e949c3fd7115d",
),
"y120": (
"a96ab0d27d3ae35a422ee7df0d789069b3e3217a99334e0ce861a96595bc5986",
"4a6fa387108380b730b71feea2ad80b5224b5ea9dc21dc156c93fe3c6186485c",
),
"y160": (
"45067240ffbc7ca2591313fee2f80dbdda6d66ec1a7451446f9a6d00d8f7ac6e",
"ead1e6b568be8f34447ec8941299a9df4368736ba9a8205de5427fa20a1fb316",
),
"y320": (
"b05e173e4ae635cfa22d06392ee3741284d17dadfee68f2aa6fd8cb2b7561112",
"cad78f74a586e24c61d38be17f3ae53bb9674380174d2585da1a526b8c20e1fd",
),
}
# The widths and depths are deduced from a quantized linear function. For
# more information, please refer to "Designing Network Design Spaces" by
# Radosavovic et al.
# BatchNorm momentum and epsilon values taken from original implementation.
MODEL_CONFIGS = {
"x002": {
"depths": [1, 1, 4, 7],
"widths": [24, 56, 152, 368],
"group_width": 8,
"default_size": 224,
"block_type": "X",
},
"x004": {
"depths": [1, 2, 7, 12],
"widths": [32, 64, 160, 384],
"group_width": 16,
"default_size": 224,
"block_type": "X",
},
"x006": {
"depths": [1, 3, 5, 7],
"widths": [48, 96, 240, 528],
"group_width": 24,
"default_size": 224,
"block_type": "X",
},
"x008": {
"depths": [1, 3, 7, 5],
"widths": [64, 128, 288, 672],
"group_width": 16,
"default_size": 224,
"block_type": "X",
},
"x016": {
"depths": [2, 4, 10, 2],
"widths": [72, 168, 408, 912],
"group_width": 24,
"default_size": 224,
"block_type": "X",
},
"x032": {
"depths": [2, 6, 15, 2],
"widths": [96, 192, 432, 1008],
"group_width": 48,
"default_size": 224,
"block_type": "X",
},
"x040": {
"depths": [2, 5, 14, 2],
"widths": [80, 240, 560, 1360],
"group_width": 40,
"default_size": 224,
"block_type": "X",
},
"x064": {
"depths": [2, 4, 10, 1],
"widths": [168, 392, 784, 1624],
"group_width": 56,
"default_size": 224,
"block_type": "X",
},
"x080": {
"depths": [2, 5, 15, 1],
"widths": [80, 240, 720, 1920],
"group_width": 120,
"default_size": 224,
"block_type": "X",
},
"x120": {
"depths": [2, 5, 11, 1],
"widths": [224, 448, 896, 2240],
"group_width": 112,
"default_size": 224,
"block_type": "X",
},
"x160": {
"depths": [2, 6, 13, 1],
"widths": [256, 512, 896, 2048],
"group_width": 128,
"default_size": 224,
"block_type": "X",
},
"x320": {
"depths": [2, 7, 13, 1],
"widths": [336, 672, 1344, 2520],
"group_width": 168,
"default_size": 224,
"block_type": "X",
},
"y002": {
"depths": [1, 1, 4, 7],
"widths": [24, 56, 152, 368],
"group_width": 8,
"default_size": 224,
"block_type": "Y",
},
"y004": {
"depths": [1, 3, 6, 6],
"widths": [48, 104, 208, 440],
"group_width": 8,
"default_size": 224,
"block_type": "Y",
},
"y006": {
"depths": [1, 3, 7, 4],
"widths": [48, 112, 256, 608],
"group_width": 16,
"default_size": 224,
"block_type": "Y",
},
"y008": {
"depths": [1, 3, 8, 2],
"widths": [64, 128, 320, 768],
"group_width": 16,
"default_size": 224,
"block_type": "Y",
},
"y016": {
"depths": [2, 6, 17, 2],
"widths": [48, 120, 336, 888],
"group_width": 24,
"default_size": 224,
"block_type": "Y",
},
"y032": {
"depths": [2, 5, 13, 1],
"widths": [72, 216, 576, 1512],
"group_width": 24,
"default_size": 224,
"block_type": "Y",
},
"y040": {
"depths": [2, 6, 12, 2],
"widths": [128, 192, 512, 1088],
"group_width": 64,
"default_size": 224,
"block_type": "Y",
},
"y064": {
"depths": [2, 7, 14, 2],
"widths": [144, 288, 576, 1296],
"group_width": 72,
"default_size": 224,
"block_type": "Y",
},
"y080": {
"depths": [2, 4, 10, 1],
"widths": [168, 448, 896, 2016],
"group_width": 56,
"default_size": 224,
"block_type": "Y",
},
"y120": {
"depths": [2, 5, 11, 1],
"widths": [224, 448, 896, 2240],
"group_width": 112,
"default_size": 224,
"block_type": "Y",
},
"y160": {
"depths": [2, 4, 11, 1],
"widths": [224, 448, 1232, 3024],
"group_width": 112,
"default_size": 224,
"block_type": "Y",
},
"y320": {
"depths": [2, 5, 12, 1],
"widths": [232, 696, 1392, 3712],
"group_width": 232,
"default_size": 224,
"block_type": "Y",
},
}
BASE_DOCSTRING = """Instantiates the {name} architecture.
Reference:
- [Designing Network Design Spaces](https://arxiv.org/abs/2003.13678)
(CVPR 2020)
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: Each TF-Keras Application expects a specific kind of input
preprocessing. For Regnets, preprocessing is included in the model using a
`Rescaling` layer. RegNet models expect their inputs to be float or uint8
tensors of pixels with values in the [0-255] range.
The naming of models is as follows: `RegNet<block_type><flops>` where
`block_type` is one of `(X, Y)` and `flops` signifies hundred million
floating point operations. For example RegNetY064 corresponds to RegNet with
Y block and 6.4 giga flops (64 hundred million flops).
Args:
include_top: Whether to include the fully-connected
layer at the top of the network. Defaults to `True`.
weights: One of `None` (random initialization),
`"imagenet"` (pre-training on ImageNet), or the path to the weights
file to be loaded. Defaults to `"imagenet"`.
input_tensor: Optional TF-Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: Optional shape tuple, only to be specified
if `include_top` is False.
It should have exactly 3 inputs channels.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
Defaults to `None`.
classes: Optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified. 1000 is how many
ImageNet classes there are. Defaults to `1000`.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`. Defaults to `"softmax"`.
Returns:
A `keras.Model` instance.
"""
def PreStem(name=None):
"""Rescales and normalizes inputs to [0,1] and ImageNet mean and std.
Args:
name: name prefix
Returns:
Rescaled and normalized tensor
"""
if name is None:
name = "prestem" + str(backend.get_uid("prestem"))
def apply(x):
x = layers.Rescaling(
scale=1.0 / 255.0, name=name + "_prestem_rescaling"
)(x)
return x
return apply
def Stem(name=None):
"""Implementation of RegNet stem.
(Common to all model variants)
Args:
name: name prefix
Returns:
Output tensor of the Stem
"""
if name is None:
name = "stem" + str(backend.get_uid("stem"))
def apply(x):
x = layers.Conv2D(
32,
(3, 3),
strides=2,
use_bias=False,
padding="same",
kernel_initializer="he_normal",
name=name + "_stem_conv",
)(x)
x = layers.BatchNormalization(
momentum=0.9, epsilon=1e-5, name=name + "_stem_bn"
)(x)
x = layers.ReLU(name=name + "_stem_relu")(x)
return x
return apply
def SqueezeAndExciteBlock(filters_in, se_filters, name=None):
"""Implements the Squeeze & Excite block (https://arxiv.org/abs/1709.01507).
Args:
filters_in: input filters to the block
se_filters: filters to squeeze to
name: name prefix
Returns:
A function object
"""
if name is None:
name = str(backend.get_uid("squeeze_and_excite"))
def apply(inputs):
x = layers.GlobalAveragePooling2D(
name=name + "_squeeze_and_excite_gap", keepdims=True
)(inputs)
x = layers.Conv2D(
se_filters,
(1, 1),
activation="relu",
kernel_initializer="he_normal",
name=name + "_squeeze_and_excite_squeeze",
)(x)
x = layers.Conv2D(
filters_in,
(1, 1),
activation="sigmoid",
kernel_initializer="he_normal",
name=name + "_squeeze_and_excite_excite",
)(x)
x = tf.math.multiply(x, inputs)
return x
return apply
def XBlock(filters_in, filters_out, group_width, stride=1, name=None):
"""Implementation of X Block.
Reference: [Designing Network Design
Spaces](https://arxiv.org/abs/2003.13678)
Args:
filters_in: filters in the input tensor
filters_out: filters in the output tensor
group_width: group width
stride: stride
name: name prefix
Returns:
Output tensor of the block
"""
if name is None:
name = str(backend.get_uid("xblock"))
def apply(inputs):
if filters_in != filters_out and stride == 1:
raise ValueError(
f"Input filters({filters_in}) and output "
f"filters({filters_out}) "
f"are not equal for stride {stride}. Input and output filters "
f"must be equal for stride={stride}."
)
# Declare layers
groups = filters_out // group_width
if stride != 1:
skip = layers.Conv2D(
filters_out,
(1, 1),
strides=stride,
use_bias=False,
kernel_initializer="he_normal",
name=name + "_skip_1x1",
)(inputs)
skip = layers.BatchNormalization(
momentum=0.9, epsilon=1e-5, name=name + "_skip_bn"
)(skip)
else:
skip = inputs
# Build block
# conv_1x1_1
x = layers.Conv2D(
filters_out,
(1, 1),
use_bias=False,
kernel_initializer="he_normal",
name=name + "_conv_1x1_1",
)(inputs)
x = layers.BatchNormalization(
momentum=0.9, epsilon=1e-5, name=name + "_conv_1x1_1_bn"
)(x)
x = layers.ReLU(name=name + "_conv_1x1_1_relu")(x)
# conv_3x3
x = layers.Conv2D(
filters_out,
(3, 3),
use_bias=False,
strides=stride,
groups=groups,
padding="same",
kernel_initializer="he_normal",
name=name + "_conv_3x3",
)(x)
x = layers.BatchNormalization(
momentum=0.9, epsilon=1e-5, name=name + "_conv_3x3_bn"
)(x)
x = layers.ReLU(name=name + "_conv_3x3_relu")(x)
# conv_1x1_2
x = layers.Conv2D(
filters_out,
(1, 1),
use_bias=False,
kernel_initializer="he_normal",
name=name + "_conv_1x1_2",
)(x)
x = layers.BatchNormalization(
momentum=0.9, epsilon=1e-5, name=name + "_conv_1x1_2_bn"
)(x)
x = layers.ReLU(name=name + "_exit_relu")(x + skip)
return x
return apply
def YBlock(
filters_in,
filters_out,
group_width,
stride=1,
squeeze_excite_ratio=0.25,
name=None,
):
"""Implementation of Y Block.
Reference: [Designing Network Design
Spaces](https://arxiv.org/abs/2003.13678)
Args:
filters_in: filters in the input tensor
filters_out: filters in the output tensor
group_width: group width
stride: stride
squeeze_excite_ratio: expansion ration for Squeeze and Excite block
name: name prefix
Returns:
Output tensor of the block
"""
if name is None:
name = str(backend.get_uid("yblock"))
def apply(inputs):
if filters_in != filters_out and stride == 1:
raise ValueError(
f"Input filters({filters_in}) and output "
f"filters({filters_out}) "
f"are not equal for stride {stride}. Input and output filters "
f"must be equal for stride={stride}."
)
groups = filters_out // group_width
se_filters = int(filters_in * squeeze_excite_ratio)
if stride != 1:
skip = layers.Conv2D(
filters_out,
(1, 1),
strides=stride,
use_bias=False,
kernel_initializer="he_normal",
name=name + "_skip_1x1",
)(inputs)
skip = layers.BatchNormalization(
momentum=0.9, epsilon=1e-5, name=name + "_skip_bn"
)(skip)
else:
skip = inputs
# Build block
# conv_1x1_1
x = layers.Conv2D(
filters_out,
(1, 1),
use_bias=False,
kernel_initializer="he_normal",
name=name + "_conv_1x1_1",
)(inputs)
x = layers.BatchNormalization(
momentum=0.9, epsilon=1e-5, name=name + "_conv_1x1_1_bn"
)(x)
x = layers.ReLU(name=name + "_conv_1x1_1_relu")(x)
# conv_3x3
x = layers.Conv2D(
filters_out,
(3, 3),
use_bias=False,
strides=stride,
groups=groups,
padding="same",
kernel_initializer="he_normal",
name=name + "_conv_3x3",
)(x)
x = layers.BatchNormalization(
momentum=0.9, epsilon=1e-5, name=name + "_conv_3x3_bn"
)(x)
x = layers.ReLU(name=name + "_conv_3x3_relu")(x)
# Squeeze-Excitation block
x = SqueezeAndExciteBlock(filters_out, se_filters, name=name)(x)
# conv_1x1_2
x = layers.Conv2D(
filters_out,
(1, 1),
use_bias=False,
kernel_initializer="he_normal",
name=name + "_conv_1x1_2",
)(x)
x = layers.BatchNormalization(
momentum=0.9, epsilon=1e-5, name=name + "_conv_1x1_2_bn"
)(x)
x = layers.ReLU(name=name + "_exit_relu")(x + skip)
return x
return apply
def ZBlock(
filters_in,
filters_out,
group_width,
stride=1,
squeeze_excite_ratio=0.25,
bottleneck_ratio=0.25,
name=None,
):
"""Implementation of Z block Reference: [Fast and Accurate Model
Scaling](https://arxiv.org/abs/2103.06877).
Args:
filters_in: filters in the input tensor
filters_out: filters in the output tensor
group_width: group width
stride: stride
squeeze_excite_ratio: expansion ration for Squeeze and Excite block
bottleneck_ratio: inverted bottleneck ratio
name: name prefix
Returns:
Output tensor of the block
"""
if name is None:
name = str(backend.get_uid("zblock"))
def apply(inputs):
if filters_in != filters_out and stride == 1:
raise ValueError(
f"Input filters({filters_in}) and output filters({filters_out})"
f"are not equal for stride {stride}. Input and output filters "
f"must be equal for stride={stride}."
)
groups = filters_out // group_width
se_filters = int(filters_in * squeeze_excite_ratio)
inv_btlneck_filters = int(filters_out / bottleneck_ratio)
# Build block
# conv_1x1_1
x = layers.Conv2D(
inv_btlneck_filters,
(1, 1),
use_bias=False,
kernel_initializer="he_normal",
name=name + "_conv_1x1_1",
)(inputs)
x = layers.BatchNormalization(
momentum=0.9, epsilon=1e-5, name=name + "_conv_1x1_1_bn"
)(x)
x = tf.nn.silu(x)
# conv_3x3
x = layers.Conv2D(
inv_btlneck_filters,
(3, 3),
use_bias=False,
strides=stride,
groups=groups,
padding="same",
kernel_initializer="he_normal",
name=name + "_conv_3x3",
)(x)
x = layers.BatchNormalization(
momentum=0.9, epsilon=1e-5, name=name + "_conv_3x3_bn"
)(x)
x = tf.nn.silu(x)
# Squeeze-Excitation block
x = SqueezeAndExciteBlock(inv_btlneck_filters, se_filters, name=name)
# conv_1x1_2
x = layers.Conv2D(
filters_out,
(1, 1),
use_bias=False,
kernel_initializer="he_normal",
name=name + "_conv_1x1_2",
)(x)
x = layers.BatchNormalization(
momentum=0.9, epsilon=1e-5, name=name + "_conv_1x1_2_bn"
)(x)
if stride != 1:
return x
else:
return x + inputs
return apply
def Stage(block_type, depth, group_width, filters_in, filters_out, name=None):
"""Implementation of Stage in RegNet.
Args:
block_type: must be one of "X", "Y", "Z"
depth: depth of stage, number of blocks to use
group_width: group width of all blocks in this stage
filters_in: input filters to this stage
filters_out: output filters from this stage
name: name prefix
Returns:
Output tensor of Stage
"""
if name is None:
name = str(backend.get_uid("stage"))
def apply(inputs):
x = inputs
if block_type == "X":
x = XBlock(
filters_in,
filters_out,
group_width,
stride=2,
name=f"{name}_XBlock_0",
)(x)
for i in range(1, depth):
x = XBlock(
filters_out,
filters_out,
group_width,
name=f"{name}_XBlock_{i}",
)(x)
elif block_type == "Y":
x = YBlock(
filters_in,
filters_out,
group_width,
stride=2,
name=name + "_YBlock_0",
)(x)
for i in range(1, depth):
x = YBlock(
filters_out,
filters_out,
group_width,
name=f"{name}_YBlock_{i}",
)(x)
elif block_type == "Z":
x = ZBlock(
filters_in,
filters_out,
group_width,
stride=2,
name=f"{name}_ZBlock_0",
)(x)
for i in range(1, depth):
x = ZBlock(
filters_out,
filters_out,
group_width,
name=f"{name}_ZBlock_{i}",
)(x)
else:
raise NotImplementedError(
f"Block type `{block_type}` not recognized."
"block_type must be one of (`X`, `Y`, `Z`). "
)
return x
return apply
def Head(num_classes=1000, name=None):
"""Implementation of classification head of RegNet.
Args:
num_classes: number of classes for Dense layer
name: name prefix
Returns:
Classification head function.
"""
if name is None:
name = str(backend.get_uid("head"))
def apply(x):
x = layers.GlobalAveragePooling2D(name=name + "_head_gap")(x)
x = layers.Dense(num_classes, name=name + "head_dense")(x)
return x
return apply
def RegNet(
depths,
widths,
group_width,
block_type,
default_size,
model_name="regnet",
include_preprocessing=True,
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
"""Instantiates RegNet architecture given specific configuration.
Args:
depths: An iterable containing depths for each individual stages.
widths: An iterable containing output channel width of each individual
stages
group_width: Number of channels to be used in each group. See grouped
convolutions for more information.
block_type: Must be one of `{"X", "Y", "Z"}`. For more details see the
papers "Designing network design spaces" and "Fast and Accurate Model
Scaling"
default_size: Default input image size.
model_name: An optional name for the model.
include_preprocessing: boolean denoting whther to include preprocessing in
the model
include_top: Boolean denoting whether to include classification head to
the model.
weights: one of `None` (random initialization), "imagenet" (pre-training
on ImageNet), or the path to the weights file to be loaded.
input_tensor: optional TF-Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified if `include_top`
is False. It should have exactly 3 inputs channels.
pooling: optional pooling mode for feature extraction when `include_top`
is `False`. - `None` means that the output of the model will be the 4D
tensor output of the last convolutional layer. - `avg` means that global
average pooling will be applied to the output of the last convolutional
layer, and thus the output of the model will be a 2D tensor. - `max`
means that global max pooling will be applied.
classes: optional number of classes to classify images into, only to be
specified if `include_top` is True, and if no `weights` argument is
specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
Returns:
A `keras.Model` instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
ValueError: if `classifier_activation` is not `softmax` or `None` when
using a pretrained top layer.
ValueError: if `include_top` is True but `num_classes` is not 1000.
ValueError: if `block_type` is not one of `{"X", "Y", "Z"}`
"""
if not (weights in {"imagenet", None} or tf.io.gfile.exists(weights)):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization), `imagenet` "
"(pre-training on ImageNet), "
"or the path to the weights file to be loaded."
)
if weights == "imagenet" and include_top and classes != 1000:
raise ValueError(
"If using `weights` as `'imagenet'` with `include_top`"
" as true, `classes` should be 1000"
)
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights,
)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)[0]
else:
inputs = img_input
x = inputs
if include_preprocessing:
x = PreStem(name=model_name)(x)
x = Stem(name=model_name)(x)
in_channels = 32 # Output from Stem
for num_stage in range(4):
depth = depths[num_stage]
out_channels = widths[num_stage]
x = Stage(
block_type,
depth,
group_width,
in_channels,
out_channels,
name=model_name + "_Stage_" + str(num_stage),
)(x)
in_channels = out_channels
if include_top:
x = Head(num_classes=classes)(x)
imagenet_utils.validate_activation(classifier_activation, weights)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D()(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D()(x)
model = training.Model(inputs=inputs, outputs=x, name=model_name)
# Load weights.
if weights == "imagenet":
if include_top:
file_suffix = ".h5"
file_hash = WEIGHTS_HASHES[model_name[-4:]][0]
else:
file_suffix = "_notop.h5"
file_hash = WEIGHTS_HASHES[model_name[-4:]][1]
file_name = model_name + file_suffix
weights_path = data_utils.get_file(
file_name,
BASE_WEIGHTS_PATH + file_name,
cache_subdir="models",
file_hash=file_hash,
)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
## Instantiating variants ##
@keras_export(
"keras.applications.regnet.RegNetX002", "keras.applications.RegNetX002"
)
def RegNetX002(
model_name="regnetx002",
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
return RegNet(
MODEL_CONFIGS["x002"]["depths"],
MODEL_CONFIGS["x002"]["widths"],
MODEL_CONFIGS["x002"]["group_width"],
MODEL_CONFIGS["x002"]["block_type"],
MODEL_CONFIGS["x002"]["default_size"],
model_name=model_name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
"keras.applications.regnet.RegNetX004", "keras.applications.RegNetX004"
)
def RegNetX004(
model_name="regnetx004",
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
return RegNet(
MODEL_CONFIGS["x004"]["depths"],
MODEL_CONFIGS["x004"]["widths"],
MODEL_CONFIGS["x004"]["group_width"],
MODEL_CONFIGS["x004"]["block_type"],
MODEL_CONFIGS["x004"]["default_size"],
model_name=model_name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
"keras.applications.regnet.RegNetX006", "keras.applications.RegNetX006"
)
def RegNetX006(
model_name="regnetx006",
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
return RegNet(
MODEL_CONFIGS["x006"]["depths"],
MODEL_CONFIGS["x006"]["widths"],
MODEL_CONFIGS["x006"]["group_width"],
MODEL_CONFIGS["x006"]["block_type"],
MODEL_CONFIGS["x006"]["default_size"],
model_name=model_name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
"keras.applications.regnet.RegNetX008", "keras.applications.RegNetX008"
)
def RegNetX008(
model_name="regnetx008",
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
return RegNet(
MODEL_CONFIGS["x008"]["depths"],
MODEL_CONFIGS["x008"]["widths"],
MODEL_CONFIGS["x008"]["group_width"],
MODEL_CONFIGS["x008"]["block_type"],
MODEL_CONFIGS["x008"]["default_size"],
model_name=model_name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
"keras.applications.regnet.RegNetX016", "keras.applications.RegNetX016"
)
def RegNetX016(
model_name="regnetx016",
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
return RegNet(
MODEL_CONFIGS["x016"]["depths"],
MODEL_CONFIGS["x016"]["widths"],
MODEL_CONFIGS["x016"]["group_width"],
MODEL_CONFIGS["x016"]["block_type"],
MODEL_CONFIGS["x016"]["default_size"],
model_name=model_name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
"keras.applications.regnet.RegNetX032", "keras.applications.RegNetX032"
)
def RegNetX032(
model_name="regnetx032",
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
return RegNet(
MODEL_CONFIGS["x032"]["depths"],
MODEL_CONFIGS["x032"]["widths"],
MODEL_CONFIGS["x032"]["group_width"],
MODEL_CONFIGS["x032"]["block_type"],
MODEL_CONFIGS["x032"]["default_size"],
model_name=model_name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
"keras.applications.regnet.RegNetX040", "keras.applications.RegNetX040"
)
def RegNetX040(
model_name="regnetx040",
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
return RegNet(
MODEL_CONFIGS["x040"]["depths"],
MODEL_CONFIGS["x040"]["widths"],
MODEL_CONFIGS["x040"]["group_width"],
MODEL_CONFIGS["x040"]["block_type"],
MODEL_CONFIGS["x040"]["default_size"],
model_name=model_name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
"keras.applications.regnet.RegNetX064", "keras.applications.RegNetX064"
)
def RegNetX064(
model_name="regnetx064",
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
return RegNet(
MODEL_CONFIGS["x064"]["depths"],
MODEL_CONFIGS["x064"]["widths"],
MODEL_CONFIGS["x064"]["group_width"],
MODEL_CONFIGS["x064"]["block_type"],
MODEL_CONFIGS["x064"]["default_size"],
model_name=model_name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
"keras.applications.regnet.RegNetX080", "keras.applications.RegNetX080"
)
def RegNetX080(
model_name="regnetx080",
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
return RegNet(
MODEL_CONFIGS["x080"]["depths"],
MODEL_CONFIGS["x080"]["widths"],
MODEL_CONFIGS["x080"]["group_width"],
MODEL_CONFIGS["x080"]["block_type"],
MODEL_CONFIGS["x080"]["default_size"],
model_name=model_name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
"keras.applications.regnet.RegNetX120", "keras.applications.RegNetX120"
)
def RegNetX120(
model_name="regnetx120",
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
return RegNet(
MODEL_CONFIGS["x120"]["depths"],
MODEL_CONFIGS["x120"]["widths"],
MODEL_CONFIGS["x120"]["group_width"],
MODEL_CONFIGS["x120"]["block_type"],
MODEL_CONFIGS["x120"]["default_size"],
model_name=model_name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
"keras.applications.regnet.RegNetX160", "keras.applications.RegNetX160"
)
def RegNetX160(
model_name="regnetx160",
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
return RegNet(
MODEL_CONFIGS["x160"]["depths"],
MODEL_CONFIGS["x160"]["widths"],
MODEL_CONFIGS["x160"]["group_width"],
MODEL_CONFIGS["x160"]["block_type"],
MODEL_CONFIGS["x160"]["default_size"],
model_name=model_name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
"keras.applications.regnet.RegNetX320", "keras.applications.RegNetX320"
)
def RegNetX320(
model_name="regnetx320",
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
return RegNet(
MODEL_CONFIGS["x320"]["depths"],
MODEL_CONFIGS["x320"]["widths"],
MODEL_CONFIGS["x320"]["group_width"],
MODEL_CONFIGS["x320"]["block_type"],
MODEL_CONFIGS["x320"]["default_size"],
model_name=model_name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
"keras.applications.regnet.RegNetY002", "keras.applications.RegNetY002"
)
def RegNetY002(
model_name="regnety002",
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
return RegNet(
MODEL_CONFIGS["y002"]["depths"],
MODEL_CONFIGS["y002"]["widths"],
MODEL_CONFIGS["y002"]["group_width"],
MODEL_CONFIGS["y002"]["block_type"],
MODEL_CONFIGS["y002"]["default_size"],
model_name=model_name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
"keras.applications.regnet.RegNetY004", "keras.applications.RegNetY004"
)
def RegNetY004(
model_name="regnety004",
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
return RegNet(
MODEL_CONFIGS["y004"]["depths"],
MODEL_CONFIGS["y004"]["widths"],
MODEL_CONFIGS["y004"]["group_width"],
MODEL_CONFIGS["y004"]["block_type"],
MODEL_CONFIGS["y004"]["default_size"],
model_name=model_name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
"keras.applications.regnet.RegNetY006", "keras.applications.RegNetY006"
)
def RegNetY006(
model_name="regnety006",
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
return RegNet(
MODEL_CONFIGS["y006"]["depths"],
MODEL_CONFIGS["y006"]["widths"],
MODEL_CONFIGS["y006"]["group_width"],
MODEL_CONFIGS["y006"]["block_type"],
MODEL_CONFIGS["y006"]["default_size"],
model_name=model_name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
"keras.applications.regnet.RegNetY008", "keras.applications.RegNetY008"
)
def RegNetY008(
model_name="regnety008",
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
return RegNet(
MODEL_CONFIGS["y008"]["depths"],
MODEL_CONFIGS["y008"]["widths"],
MODEL_CONFIGS["y008"]["group_width"],
MODEL_CONFIGS["y008"]["block_type"],
MODEL_CONFIGS["y008"]["default_size"],
model_name=model_name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
"keras.applications.regnet.RegNetY016", "keras.applications.RegNetY016"
)
def RegNetY016(
model_name="regnety016",
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
return RegNet(
MODEL_CONFIGS["y016"]["depths"],
MODEL_CONFIGS["y016"]["widths"],
MODEL_CONFIGS["y016"]["group_width"],
MODEL_CONFIGS["y016"]["block_type"],
MODEL_CONFIGS["y016"]["default_size"],
model_name=model_name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
"keras.applications.regnet.RegNetY032", "keras.applications.RegNetY032"
)
def RegNetY032(
model_name="regnety032",
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
return RegNet(
MODEL_CONFIGS["y032"]["depths"],
MODEL_CONFIGS["y032"]["widths"],
MODEL_CONFIGS["y032"]["group_width"],
MODEL_CONFIGS["y032"]["block_type"],
MODEL_CONFIGS["y032"]["default_size"],
model_name=model_name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
"keras.applications.regnet.RegNetY040", "keras.applications.RegNetY040"
)
def RegNetY040(
model_name="regnety040",
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
return RegNet(
MODEL_CONFIGS["y040"]["depths"],
MODEL_CONFIGS["y040"]["widths"],
MODEL_CONFIGS["y040"]["group_width"],
MODEL_CONFIGS["y040"]["block_type"],
MODEL_CONFIGS["y040"]["default_size"],
model_name=model_name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
"keras.applications.regnet.RegNetY064", "keras.applications.RegNetY064"
)
def RegNetY064(
model_name="regnety064",
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
return RegNet(
MODEL_CONFIGS["y064"]["depths"],
MODEL_CONFIGS["y064"]["widths"],
MODEL_CONFIGS["y064"]["group_width"],
MODEL_CONFIGS["y064"]["block_type"],
MODEL_CONFIGS["y064"]["default_size"],
model_name=model_name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
"keras.applications.regnet.RegNetY080", "keras.applications.RegNetY080"
)
def RegNetY080(
model_name="regnety080",
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
return RegNet(
MODEL_CONFIGS["y080"]["depths"],
MODEL_CONFIGS["y080"]["widths"],
MODEL_CONFIGS["y080"]["group_width"],
MODEL_CONFIGS["y080"]["block_type"],
MODEL_CONFIGS["y080"]["default_size"],
model_name=model_name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
"keras.applications.regnet.RegNetY120", "keras.applications.RegNetY120"
)
def RegNetY120(
model_name="regnety120",
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
return RegNet(
MODEL_CONFIGS["y120"]["depths"],
MODEL_CONFIGS["y120"]["widths"],
MODEL_CONFIGS["y120"]["group_width"],
MODEL_CONFIGS["y120"]["block_type"],
MODEL_CONFIGS["y120"]["default_size"],
model_name=model_name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
"keras.applications.regnet.RegNetY160", "keras.applications.RegNetY160"
)
def RegNetY160(
model_name="regnety160",
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
return RegNet(
MODEL_CONFIGS["y160"]["depths"],
MODEL_CONFIGS["y160"]["widths"],
MODEL_CONFIGS["y160"]["group_width"],
MODEL_CONFIGS["y160"]["block_type"],
MODEL_CONFIGS["y160"]["default_size"],
model_name=model_name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
"keras.applications.regnet.RegNetY320", "keras.applications.RegNetY320"
)
def RegNetY320(
model_name="regnety320",
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
return RegNet(
MODEL_CONFIGS["y320"]["depths"],
MODEL_CONFIGS["y320"]["widths"],
MODEL_CONFIGS["y320"]["group_width"],
MODEL_CONFIGS["y320"]["block_type"],
MODEL_CONFIGS["y320"]["default_size"],
model_name=model_name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
RegNetX002.__doc__ = BASE_DOCSTRING.format(name="RegNetX002")
RegNetX004.__doc__ = BASE_DOCSTRING.format(name="RegNetX004")
RegNetX006.__doc__ = BASE_DOCSTRING.format(name="RegNetX006")
RegNetX008.__doc__ = BASE_DOCSTRING.format(name="RegNetX008")
RegNetX016.__doc__ = BASE_DOCSTRING.format(name="RegNetX016")
RegNetX032.__doc__ = BASE_DOCSTRING.format(name="RegNetX032")
RegNetX040.__doc__ = BASE_DOCSTRING.format(name="RegNetX040")
RegNetX064.__doc__ = BASE_DOCSTRING.format(name="RegNetX064")
RegNetX080.__doc__ = BASE_DOCSTRING.format(name="RegNetX080")
RegNetX120.__doc__ = BASE_DOCSTRING.format(name="RegNetX120")
RegNetX160.__doc__ = BASE_DOCSTRING.format(name="RegNetX160")
RegNetX320.__doc__ = BASE_DOCSTRING.format(name="RegNetX320")
RegNetY002.__doc__ = BASE_DOCSTRING.format(name="RegNetY002")
RegNetY004.__doc__ = BASE_DOCSTRING.format(name="RegNetY004")
RegNetY006.__doc__ = BASE_DOCSTRING.format(name="RegNetY006")
RegNetY008.__doc__ = BASE_DOCSTRING.format(name="RegNetY008")
RegNetY016.__doc__ = BASE_DOCSTRING.format(name="RegNetY016")
RegNetY032.__doc__ = BASE_DOCSTRING.format(name="RegNetY032")
RegNetY040.__doc__ = BASE_DOCSTRING.format(name="RegNetY040")
RegNetY064.__doc__ = BASE_DOCSTRING.format(name="RegNetY064")
RegNetY080.__doc__ = BASE_DOCSTRING.format(name="RegNetY080")
RegNetY120.__doc__ = BASE_DOCSTRING.format(name="RegNetY120")
RegNetY160.__doc__ = BASE_DOCSTRING.format(name="RegNetY160")
RegNetY320.__doc__ = BASE_DOCSTRING.format(name="RegNetY320")
@keras_export("keras.applications.regnet.preprocess_input")
def preprocess_input(x, data_format=None):
"""A placeholder method for backward compatibility.
The preprocessing logic has been included in the regnet model
implementation. Users are no longer required to call this method to
normalize the input data. This method does nothing and only kept as a
placeholder to align the API surface between old and new version of model.
Args:
x: A floating point `numpy.array` or a `tf.Tensor`.
data_format: Optional data format of the image tensor/array. `None` means
the global setting `tf.keras.backend.image_data_format()` is used
(unless you changed it, it uses "channels_last").
Defaults to `None`.
Returns:
Unchanged `numpy.array` or `tf.Tensor`.
"""
return x
@keras_export("keras.applications.regnet.decode_predictions")
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
|
tf-keras/tf_keras/applications/regnet.py/0
|
{
"file_path": "tf-keras/tf_keras/applications/regnet.py",
"repo_id": "tf-keras",
"token_count": 27067
}
| 219 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for running models in a distribution setting.
Mostly from
https://github.com/tensorflow/models/blob/master/official/utils/misc/distribution_utils.py.
"""
import json
import os
import tensorflow.compat.v2 as tf
def _collective_communication(all_reduce_alg):
"""Return a CollectiveCommunication based on all_reduce_alg.
Args:
all_reduce_alg: a string specifying which collective communication to
pick, or None.
Returns:
tf.distribute.experimental.CollectiveCommunication object
Raises:
ValueError: if `all_reduce_alg` not in [None, "ring", "nccl"]
"""
collective_communication_options = {
None: tf.distribute.experimental.CollectiveCommunication.AUTO,
"ring": tf.distribute.experimental.CollectiveCommunication.RING,
"nccl": tf.distribute.experimental.CollectiveCommunication.NCCL,
}
if all_reduce_alg not in collective_communication_options:
raise ValueError(
"When used with `multi_worker_mirrored`, valid values for "
"all_reduce_alg are [`ring`, `nccl`]. Supplied value: {}".format(
all_reduce_alg
)
)
return collective_communication_options[all_reduce_alg]
def _mirrored_cross_device_ops(all_reduce_alg, num_packs):
"""Return a CrossDeviceOps based on all_reduce_alg and num_packs.
Args:
all_reduce_alg: a string specifying which cross device op to pick, or
None.
num_packs: an integer specifying number of packs for the cross device op.
Returns:
tf.distribute.CrossDeviceOps object or None.
Raises:
ValueError: if `all_reduce_alg` not in [None, "nccl",
"hierarchical_copy"].
"""
if all_reduce_alg is None:
return None
mirrored_all_reduce_options = {
"nccl": tf.distribute.NcclAllReduce,
"hierarchical_copy": tf.distribute.HierarchicalCopyAllReduce,
}
if all_reduce_alg not in mirrored_all_reduce_options:
raise ValueError(
"When used with `mirrored`, valid values for all_reduce_alg are "
"[`nccl`, `hierarchical_copy`]. Supplied value: {}".format(
all_reduce_alg
)
)
cross_device_ops_class = mirrored_all_reduce_options[all_reduce_alg]
return cross_device_ops_class(num_packs=num_packs)
def get_distribution_strategy(
distribution_strategy="mirrored",
num_gpus=0,
all_reduce_alg=None,
num_packs=1,
):
"""Return a DistributionStrategy for running the model.
Args:
distribution_strategy: a string specifying which distribution strategy to
use. Accepted values are "off", "one_device", "mirrored", and
"multi_worker_mirrored" -- case insensitive. "off" means not to use
Distribution Strategy.
num_gpus: Number of GPUs to run this model.
Returns:
tf.distribute.DistibutionStrategy object.
Raises:
ValueError: if `distribution_strategy` is "off" or "one_device" and
`num_gpus` is larger than 1; or `num_gpus` is negative.
"""
if num_gpus < 0:
raise ValueError("`num_gpus` can not be negative.")
distribution_strategy = distribution_strategy.lower()
if distribution_strategy == "off":
if num_gpus > 1:
raise ValueError(
"When {} GPUs are specified, distribution_strategy "
"flag cannot be set to `off`.".format(num_gpus)
)
return None
if distribution_strategy == "multi_worker_mirrored":
return tf.distribute.experimental.MultiWorkerMirroredStrategy(
communication=_collective_communication(all_reduce_alg)
)
if distribution_strategy == "one_device":
if num_gpus == 0:
return tf.distribute.OneDeviceStrategy("device:CPU:0")
if num_gpus > 1:
raise ValueError(
"`OneDeviceStrategy` can not be used for more than one device."
)
return tf.distribute.OneDeviceStrategy("device:GPU:0")
if distribution_strategy == "mirrored":
if num_gpus == 0:
devices = ["device:CPU:0"]
else:
devices = ["device:GPU:%d" % i for i in range(num_gpus)]
return tf.distribute.MirroredStrategy(
devices=devices,
cross_device_ops=_mirrored_cross_device_ops(
all_reduce_alg, num_packs
),
)
raise ValueError(
f"Unrecognized Distribution Strategy: {distribution_strategy}"
)
def configure_cluster(worker_hosts=None, task_index=-1):
"""Set multi-worker cluster spec in TF_CONFIG environment variable.
Args:
worker_hosts: comma-separated list of worker ip:port pairs.
Returns:
Number of workers in the cluster.
"""
tf_config = json.loads(os.environ.get("TF_CONFIG", "{}"))
if tf_config:
num_workers = len(tf_config["cluster"].get("chief", [])) + len(
tf_config["cluster"].get("worker", [])
)
elif worker_hosts:
workers = worker_hosts.split(",")
num_workers = len(workers)
if num_workers > 1 and task_index < 0:
raise ValueError(
"Must specify task_index when number of workers > 1"
)
task_index = 0 if num_workers == 1 else task_index
os.environ["TF_CONFIG"] = json.dumps(
{
"cluster": {"worker": workers},
"task": {"type": "worker", "index": task_index},
}
)
else:
num_workers = 1
return num_workers
def get_strategy_scope(strategy):
if strategy:
strategy_scope = strategy.scope()
else:
strategy_scope = DummyContextManager()
return strategy_scope
class DummyContextManager:
def __enter__(self):
pass
def __exit__(self, *args):
pass
|
tf-keras/tf_keras/benchmarks/distribution_util.py/0
|
{
"file_path": "tf-keras/tf_keras/benchmarks/distribution_util.py",
"repo_id": "tf-keras",
"token_count": 2663
}
| 220 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Benchmark base to run and report TF-Keras layers benchmark results."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import tensorflow.compat.v2 as tf
from tf_keras.benchmarks.layer_benchmarks import run_xprof
class LayerBenchmarksBase(tf.test.Benchmark):
"""Run and report benchmark results.
The first run is without any profiling to purly measure running time.
Second run is with xprof but no python trace.
Third run is with xprof and python trace.
Note: xprof runs fewer iterations, and the maximum iterations is 100.
"""
def run_report(self, func, num_iters, metadata=None):
"""Run and report benchmark results for different settings."""
# 0. Warm up.
func()
# 1. Run without profiling.
start = time.time()
for _ in range(num_iters):
func()
total_time = time.time() - start
us_mean_time = total_time * 1e6 / num_iters
metrics = [
{
"name": "examples_per_sec",
"value": float(f"{num_iters / total_time:.3f}"),
},
{
"name": "us_per_example",
"value": float(f"{us_mean_time:.3f}"),
},
]
# 2. Run with xprof with no python trace.
num_iters_xprof = min(100, num_iters)
xprof_link, us_per_example = run_xprof.run_with_xprof(
func, num_iters_xprof, False
)
# This xprof link will appear in the benchmark dashboard.
extras = {
"xprof_link": xprof_link,
"us_per_example_with_xprof": us_per_example,
}
# 3. Run with xprof and python trace.
xprof_link, us_per_example = run_xprof.run_with_xprof(
func, num_iters_xprof, True
)
extras["python_trace_xprof_link"] = xprof_link
extras["us_per_example_with_xprof_and_python"] = us_per_example
if metadata:
extras.update(metadata)
self.report_benchmark(
iters=num_iters,
wall_time=us_mean_time,
extras=extras,
metrics=metrics,
)
|
tf-keras/tf_keras/benchmarks/layer_benchmarks/layer_benchmarks_test_base.py/0
|
{
"file_path": "tf-keras/tf_keras/benchmarks/layer_benchmarks/layer_benchmarks_test_base.py",
"repo_id": "tf-keras",
"token_count": 1177
}
| 221 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Callbacks: utilities called at certain points during model training."""
import collections
import copy
import csv
import json
import os
import re
import sys
import time
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.distribute import distributed_file_utils
from tf_keras.distribute import worker_training_state
from tf_keras.optimizers import optimizer
from tf_keras.optimizers.schedules import learning_rate_schedule
from tf_keras.utils import generic_utils
from tf_keras.utils import io_utils
from tf_keras.utils import tf_utils
from tf_keras.utils import version_utils
from tf_keras.utils.data_utils import Sequence
from tf_keras.utils.generic_utils import Progbar
from tf_keras.utils.mode_keys import ModeKeys
from tf_keras.utils.timed_threads import TimedThread
# isort: off
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import keras_export
from tensorflow.tools.docs import doc_controls
try:
import requests
except ImportError:
requests = None
# Note: `configure_callbacks` is only used in TF1.
def configure_callbacks(
callbacks,
model,
do_validation=False,
batch_size=None,
epochs=None,
steps_per_epoch=None,
samples=None,
verbose=1,
count_mode="steps",
mode=ModeKeys.TRAIN,
):
"""Configures callbacks for use in various training loops.
Args:
callbacks: List of Callbacks.
model: Model being trained.
do_validation: Whether or not validation loop will be run.
batch_size: Number of samples per batch.
epochs: Number of epoch to train.
steps_per_epoch: Number of batches to run per training epoch.
samples: Number of training samples.
verbose: int, 0 or 1. TF-Keras logging verbosity to pass to
ProgbarLogger.
count_mode: One of 'steps' or 'samples'. Per-batch or per-sample count.
mode: String. One of ModeKeys.TRAIN, ModeKeys.TEST, or ModeKeys.PREDICT.
Which loop mode to configure callbacks for.
Returns:
Instance of CallbackList used to control all Callbacks.
"""
# Check if callbacks have already been configured.
if isinstance(callbacks, CallbackList):
return callbacks
if not callbacks:
callbacks = []
# Add additional callbacks during training.
if mode == ModeKeys.TRAIN:
model.history = History()
callbacks = [BaseLogger()] + (callbacks or []) + [model.history]
if verbose:
callbacks.append(ProgbarLogger(count_mode))
callback_list = CallbackList(callbacks)
# Set callback model
callback_model = model._get_callback_model()
callback_list.set_model(callback_model)
set_callback_parameters(
callback_list,
model,
do_validation=do_validation,
batch_size=batch_size,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
samples=samples,
verbose=verbose,
mode=mode,
)
callback_list.model.stop_training = False
return callback_list
def set_callback_parameters(
callback_list,
model,
do_validation=False,
batch_size=None,
epochs=None,
steps_per_epoch=None,
samples=None,
verbose=1,
mode=ModeKeys.TRAIN,
):
"""Sets callback parameters.
Args:
callback_list: CallbackList instance.
model: Model being trained.
do_validation: Whether or not validation loop will be run.
batch_size: Number of samples per batch.
epochs: Number of epoch to train.
steps_per_epoch: Number of batches to run per training epoch.
samples: Number of training samples.
verbose: int, 0 or 1. TF-Keras logging verbosity to pass to
ProgbarLogger.
mode: String. One of ModeKeys.TRAIN, ModeKeys.TEST, or ModeKeys.PREDICT.
Which loop mode to configure callbacks for.
"""
metric_names = None
for cbk in callback_list:
if isinstance(cbk, (BaseLogger, ProgbarLogger)):
if not metric_names:
metric_names = model.metrics_names
cbk.stateful_metrics = metric_names[1:] # Exclude `loss`
# Set callback parameters
callback_metrics = []
# When we have deferred build scenario with iterator input, we will compile
# when we standardize first batch of data.
if mode != ModeKeys.PREDICT:
if not metric_names:
metric_names = model.metrics_names
callback_metrics = copy.copy(metric_names)
if do_validation:
callback_metrics += ["val_" + n for n in metric_names]
callback_params = {
"batch_size": batch_size,
"epochs": epochs,
"steps": steps_per_epoch,
"samples": samples,
"verbose": verbose,
"do_validation": do_validation,
"metrics": callback_metrics,
}
callback_list.set_params(callback_params)
def _is_generator_like(data):
"""Checks if data is a generator, Sequence, or Iterator."""
return (
hasattr(data, "__next__")
or hasattr(data, "next")
or isinstance(
data, (Sequence, tf.compat.v1.data.Iterator, tf.data.Iterator)
)
)
def make_logs(model, logs, outputs, mode, prefix=""):
"""Computes logs for sending to `on_batch_end` methods."""
metric_names = model.metrics_names
if mode in {ModeKeys.TRAIN, ModeKeys.TEST} and metric_names:
for label, output in zip(metric_names, outputs):
logs[prefix + label] = output
else:
logs["outputs"] = outputs
return logs
@keras_export("keras.callbacks.CallbackList")
class CallbackList:
"""Container abstracting a list of callbacks."""
def __init__(
self,
callbacks=None,
add_history=False,
add_progbar=False,
model=None,
**params,
):
"""Container for `Callback` instances.
This object wraps a list of `Callback` instances, making it possible
to call them all at once via a single endpoint
(e.g. `callback_list.on_epoch_end(...)`).
Args:
callbacks: List of `Callback` instances.
add_history: Whether a `History` callback should be added, if one does
not already exist in the `callbacks` list.
add_progbar: Whether a `ProgbarLogger` callback should be added, if
one does not already exist in the `callbacks` list.
model: The `Model` these callbacks are used with.
**params: If provided, parameters will be passed to each `Callback`
via `Callback.set_params`.
"""
self.callbacks = tf.nest.flatten(callbacks) if callbacks else []
self._add_default_callbacks(add_history, add_progbar)
if model:
self.set_model(model)
if params:
self.set_params(params)
# Performance optimization: determines if batch hooks need to be called.
self._supports_tf_logs = all(
getattr(cb, "_supports_tf_logs", False) for cb in self.callbacks
)
self._batch_hooks_support_tf_logs = all(
getattr(cb, "_supports_tf_logs", False)
for cb in self.callbacks
if cb._implements_train_batch_hooks()
or cb._implements_test_batch_hooks()
or cb._implements_predict_batch_hooks()
)
self._should_call_train_batch_hooks = any(
cb._implements_train_batch_hooks() for cb in self.callbacks
)
self._should_call_test_batch_hooks = any(
cb._implements_test_batch_hooks() for cb in self.callbacks
)
self._should_call_predict_batch_hooks = any(
cb._implements_predict_batch_hooks() for cb in self.callbacks
)
self._disallow_batch_hooks_in_ps_strategy()
# Performance check: Check batch hooks for slowness compared to batch
# time. Only run check for custom callbacks (i.e. not present in this
# file).
self._check_timing = any(
cbk.__class__.__name__ not in globals() for cbk in self.callbacks
)
self._num_batches_for_timing_check = 5
self._hook_times = {}
self._batch_start_time = None
self._batch_times = []
def _add_default_callbacks(self, add_history, add_progbar):
"""Adds `Callback`s that are always present."""
self._progbar = None
self._history = None
for cb in self.callbacks:
if isinstance(cb, ProgbarLogger):
self._progbar = cb
elif isinstance(cb, History):
self._history = cb
if self._history is None and add_history:
self._history = History()
self.callbacks.append(self._history)
if self._progbar is None and add_progbar:
self._progbar = ProgbarLogger(count_mode="steps")
self.callbacks.append(self._progbar)
def _process_logs(self, logs, is_batch_hook=False):
"""Turns tensors into numpy arrays or Python scalars if necessary."""
if logs is None:
return {}
if self._supports_tf_logs:
return logs
if is_batch_hook and self._batch_hooks_support_tf_logs:
return logs
return tf_utils.sync_to_numpy_or_python_type(logs)
def append(self, callback):
self.callbacks.append(callback)
def set_params(self, params):
self.params = params
for callback in self.callbacks:
callback.set_params(params)
def set_model(self, model):
self.model = model
if self._history:
model.history = self._history
for callback in self.callbacks:
callback.set_model(model)
def _call_batch_hook(self, mode, hook, batch, logs=None):
"""Helper function for all batch_{begin | end} methods."""
if not self.callbacks:
return
if hook == "begin":
self._call_batch_begin_hook(mode, batch, logs)
elif hook == "end":
self._call_batch_end_hook(mode, batch, logs)
else:
raise ValueError(
f"Unrecognized hook: {hook}. "
'Expected values are ["begin", "end"]'
)
def _call_batch_begin_hook(self, mode, batch, logs):
"""Helper function for `on_*_batch_begin` methods."""
hook_name = f"on_{mode}_batch_begin"
self._call_batch_hook_helper(hook_name, batch, logs)
if self._check_timing:
self._batch_start_time = time.time()
def _call_batch_end_hook(self, mode, batch, logs):
"""Helper function for `on_*_batch_end` methods."""
hook_name = f"on_{mode}_batch_end"
if self._check_timing and batch >= 1:
batch_time = time.time() - self._batch_start_time
self._batch_times.append(batch_time)
self._call_batch_hook_helper(hook_name, batch, logs)
if len(self._batch_times) >= self._num_batches_for_timing_check:
end_hook_name = hook_name
begin_hook_name = f"on_{mode}_batch_begin"
avg_batch_time = sum(self._batch_times) / len(self._batch_times)
avg_end_hook_time = sum(self._hook_times[end_hook_name]) / len(
self._hook_times[end_hook_name]
)
avg_begin_hook_time = sum(self._hook_times[begin_hook_name]) / len(
self._hook_times[begin_hook_name]
)
threshold_time = 1.0 * avg_batch_time
warning_msg = (
"Callback method `{hook}` is slow compared to "
"the batch time (batch time: {batch_time:.4f}s vs "
"`{hook}` time: {hook_time:.4f}s). Check your callbacks."
)
if avg_begin_hook_time > threshold_time:
logging.warning(
warning_msg.format(
hook=begin_hook_name,
batch_time=avg_batch_time,
hook_time=avg_begin_hook_time,
)
)
if avg_end_hook_time > threshold_time:
logging.warning(
warning_msg.format(
hook=end_hook_name,
batch_time=avg_batch_time,
hook_time=avg_end_hook_time,
)
)
self._check_timing = False
self._batch_start_time = None
self._batch_times = []
self._hook_times = {}
def _call_batch_hook_helper(self, hook_name, batch, logs):
"""Helper function for `on_*_batch_*` methods."""
if self._check_timing:
start_time = time.time()
logs = self._process_logs(logs, is_batch_hook=True)
for callback in self.callbacks:
hook = getattr(callback, hook_name)
hook(batch, logs)
if self._check_timing:
if hook_name not in self._hook_times:
self._hook_times[hook_name] = []
self._hook_times[hook_name].append(time.time() - start_time)
def _call_begin_hook(self, mode):
"""Helper function for on_{train|test|predict}_begin methods."""
if mode == ModeKeys.TRAIN:
self.on_train_begin()
elif mode == ModeKeys.TEST:
self.on_test_begin()
else:
self.on_predict_begin()
def _call_end_hook(self, mode):
"""Helper function for on_{train|test|predict}_end methods."""
if mode == ModeKeys.TRAIN:
self.on_train_end()
elif mode == ModeKeys.TEST:
self.on_test_end()
else:
self.on_predict_end()
def on_batch_begin(self, batch, logs=None):
if self._should_call_train_batch_hooks:
self._call_batch_hook(ModeKeys.TRAIN, "begin", batch, logs=logs)
def on_batch_end(self, batch, logs=None):
if self._should_call_train_batch_hooks:
self._call_batch_hook(ModeKeys.TRAIN, "end", batch, logs=logs)
def on_epoch_begin(self, epoch, logs=None):
"""Calls the `on_epoch_begin` methods of its callbacks.
This function should only be called during TRAIN mode.
Args:
epoch: Integer, index of epoch.
logs: Dict. Currently no data is passed to this argument for this
method but that may change in the future.
"""
logs = self._process_logs(logs)
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs)
def on_epoch_end(self, epoch, logs=None):
"""Calls the `on_epoch_end` methods of its callbacks.
This function should only be called during TRAIN mode.
Args:
epoch: Integer, index of epoch.
logs: Dict, metric results for this training epoch, and for the
validation epoch if validation is performed. Validation result
keys are prefixed with `val_`.
"""
logs = self._process_logs(logs)
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs)
def on_train_batch_begin(self, batch, logs=None):
"""Calls the `on_train_batch_begin` methods of its callbacks.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict, contains the return value of `model.train_step`.
Typically, the values of the `Model`'s metrics are returned.
Example: `{'loss': 0.2, 'accuracy': 0.7}`.
"""
if self._should_call_train_batch_hooks:
self._call_batch_hook(ModeKeys.TRAIN, "begin", batch, logs=logs)
def on_train_batch_end(self, batch, logs=None):
"""Calls the `on_train_batch_end` methods of its callbacks.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch.
"""
if self._should_call_train_batch_hooks:
self._call_batch_hook(ModeKeys.TRAIN, "end", batch, logs=logs)
def on_test_batch_begin(self, batch, logs=None):
"""Calls the `on_test_batch_begin` methods of its callbacks.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict, contains the return value of `model.test_step`.
Typically, the values of the `Model`'s metrics are returned.
Example: `{'loss': 0.2, 'accuracy': 0.7}`.
"""
if self._should_call_test_batch_hooks:
self._call_batch_hook(ModeKeys.TEST, "begin", batch, logs=logs)
def on_test_batch_end(self, batch, logs=None):
"""Calls the `on_test_batch_end` methods of its callbacks.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch.
"""
if self._should_call_test_batch_hooks:
self._call_batch_hook(ModeKeys.TEST, "end", batch, logs=logs)
def on_predict_batch_begin(self, batch, logs=None):
"""Calls the `on_predict_batch_begin` methods of its callbacks.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict, contains the return value of `model.predict_step`,
it typically returns a dict with a key 'outputs' containing
the model's outputs.
"""
if self._should_call_predict_batch_hooks:
self._call_batch_hook(ModeKeys.PREDICT, "begin", batch, logs=logs)
def on_predict_batch_end(self, batch, logs=None):
"""Calls the `on_predict_batch_end` methods of its callbacks.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch.
"""
if self._should_call_predict_batch_hooks:
self._call_batch_hook(ModeKeys.PREDICT, "end", batch, logs=logs)
def on_train_begin(self, logs=None):
"""Calls the `on_train_begin` methods of its callbacks.
Args:
logs: Dict. Currently, no data is passed via this argument
for this method, but that may change in the future.
"""
logs = self._process_logs(logs)
for callback in self.callbacks:
callback.on_train_begin(logs)
def on_train_end(self, logs=None):
"""Calls the `on_train_end` methods of its callbacks.
Args:
logs: Dict. Currently, no data is passed via this argument
for this method, but that may change in the future.
"""
logs = self._process_logs(logs)
for callback in self.callbacks:
callback.on_train_end(logs)
def on_test_begin(self, logs=None):
"""Calls the `on_test_begin` methods of its callbacks.
Args:
logs: Dict. Currently no data is passed to this argument for this
method but that may change in the future.
"""
logs = self._process_logs(logs)
for callback in self.callbacks:
callback.on_test_begin(logs)
def on_test_end(self, logs=None):
"""Calls the `on_test_end` methods of its callbacks.
Args:
logs: Dict. Currently, no data is passed via this argument
for this method, but that may change in the future.
"""
logs = self._process_logs(logs)
for callback in self.callbacks:
callback.on_test_end(logs)
def on_predict_begin(self, logs=None):
"""Calls the 'on_predict_begin` methods of its callbacks.
Args:
logs: Dict. Currently no data is passed to this argument for this
method but that may change in the future.
"""
logs = self._process_logs(logs)
for callback in self.callbacks:
callback.on_predict_begin(logs)
def on_predict_end(self, logs=None):
"""Calls the `on_predict_end` methods of its callbacks.
Args:
logs: Dict. Currently, no data is passed via this argument
for this method, but that may change in the future.
"""
logs = self._process_logs(logs)
for callback in self.callbacks:
callback.on_predict_end(logs)
def __iter__(self):
return iter(self.callbacks)
def _disallow_batch_hooks_in_ps_strategy(self):
"""Error out if batch-level callbacks are passed with PSStrategy."""
strategy = tf.distribute.get_strategy()
if strategy._should_use_with_coordinator:
unsupported_callbacks = []
for cb in self.callbacks:
# These Callbacks can accept RemoteValues directly.
if getattr(cb, "_supports_tf_logs", False):
continue
if (
cb._implements_train_batch_hooks()
or cb._implements_test_batch_hooks()
or cb._implements_predict_batch_hooks()
):
unsupported_callbacks.append(cb)
if unsupported_callbacks:
raise ValueError(
"Batch-level `Callback`s are not supported with "
"`ParameterServerStrategy`. Found unsupported "
f"callbacks: {unsupported_callbacks}"
)
def make_logs(self, model, logs, outputs, mode, prefix=""):
"""Computes logs for sending to `on_batch_end` methods."""
if not self.callbacks:
return logs
return make_logs(model, logs, outputs, mode, prefix=prefix)
@keras_export("keras.callbacks.Callback")
class Callback:
"""Abstract base class used to build new callbacks.
Callbacks can be passed to keras methods such as `fit`, `evaluate`, and
`predict` in order to hook into the various stages of the model training and
inference lifecycle.
To create a custom callback, subclass `keras.callbacks.Callback` and
override the method associated with the stage of interest. See the
[Custom callback](https://www.tensorflow.org/guide/keras/custom_callback)
for more information.
Example:
>>> training_finished = False
>>> class MyCallback(tf.keras.callbacks.Callback):
... def on_train_end(self, logs=None):
... global training_finished
... training_finished = True
>>> model = tf.keras.Sequential([
... tf.keras.layers.Dense(1, input_shape=(1,))])
>>> model.compile(loss='mean_squared_error')
>>> model.fit(tf.constant([[1.0]]), tf.constant([[1.0]]),
... callbacks=[MyCallback()])
>>> assert training_finished == True
If you want to use `Callback` objects in a custom training loop:
1. You should pack all your callbacks into a single `callbacks.CallbackList`
so they can all be called together.
2. You will need to manually call all the `on_*` methods at the appropriate
locations in your loop. Like this:
Example:
```python
callbacks = tf.keras.callbacks.CallbackList([...])
callbacks.append(...)
callbacks.on_train_begin(...)
for epoch in range(EPOCHS):
callbacks.on_epoch_begin(epoch)
for i, data in dataset.enumerate():
callbacks.on_train_batch_begin(i)
batch_logs = model.train_step(data)
callbacks.on_train_batch_end(i, batch_logs)
epoch_logs = ...
callbacks.on_epoch_end(epoch, epoch_logs)
final_logs=...
callbacks.on_train_end(final_logs)
```
Attributes:
params: Dict. Training parameters
(eg. verbosity, batch size, number of epochs...).
model: Instance of `keras.models.Model`.
Reference of the model being trained.
The `logs` dictionary that callback methods
take as argument will contain keys for quantities relevant to
the current batch or epoch (see method-specific docstrings).
"""
def __init__(self):
self.validation_data = None
self.model = None
# Whether this Callback should only run on the chief worker in a
# Multi-Worker setting.
# TODO(omalleyt): Make this attr public once solution is stable.
self._chief_worker_only = None
self._supports_tf_logs = False
def set_params(self, params):
self.params = params
def set_model(self, model):
self.model = model
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_batch_begin(self, batch, logs=None):
"""A backwards compatibility alias for `on_train_batch_begin`."""
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_batch_end(self, batch, logs=None):
"""A backwards compatibility alias for `on_train_batch_end`."""
@doc_controls.for_subclass_implementers
def on_epoch_begin(self, epoch, logs=None):
"""Called at the start of an epoch.
Subclasses should override for any actions to run. This function should
only be called during TRAIN mode.
Args:
epoch: Integer, index of epoch.
logs: Dict. Currently no data is passed to this argument for this
method but that may change in the future.
"""
@doc_controls.for_subclass_implementers
def on_epoch_end(self, epoch, logs=None):
"""Called at the end of an epoch.
Subclasses should override for any actions to run. This function should
only be called during TRAIN mode.
Args:
epoch: Integer, index of epoch.
logs: Dict, metric results for this training epoch, and for the
validation epoch if validation is performed. Validation result
keys are prefixed with `val_`. For training epoch, the values of
the `Model`'s metrics are returned. Example:
`{'loss': 0.2, 'accuracy': 0.7}`.
"""
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_train_batch_begin(self, batch, logs=None):
"""Called at the beginning of a training batch in `fit` methods.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`tf.keras.Model` is set to `N`, this method will only be called every
`N` batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Currently no data is passed to this argument for this
method but that may change in the future.
"""
# For backwards compatibility.
self.on_batch_begin(batch, logs=logs)
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_train_batch_end(self, batch, logs=None):
"""Called at the end of a training batch in `fit` methods.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`tf.keras.Model` is set to `N`, this method will only be called every
`N` batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch.
"""
# For backwards compatibility.
self.on_batch_end(batch, logs=logs)
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_test_batch_begin(self, batch, logs=None):
"""Called at the beginning of a batch in `evaluate` methods.
Also called at the beginning of a validation batch in the `fit`
methods, if validation data is provided.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`tf.keras.Model` is set to `N`, this method will only be called every
`N` batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Currently no data is passed to this argument for this
method but that may change in the future.
"""
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_test_batch_end(self, batch, logs=None):
"""Called at the end of a batch in `evaluate` methods.
Also called at the end of a validation batch in the `fit`
methods, if validation data is provided.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`tf.keras.Model` is set to `N`, this method will only be called every
`N` batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch.
"""
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_predict_batch_begin(self, batch, logs=None):
"""Called at the beginning of a batch in `predict` methods.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`tf.keras.Model` is set to `N`, this method will only be called every
`N` batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Currently no data is passed to this argument for this
method but that may change in the future.
"""
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_predict_batch_end(self, batch, logs=None):
"""Called at the end of a batch in `predict` methods.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`tf.keras.Model` is set to `N`, this method will only be called every
`N` batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch.
"""
@doc_controls.for_subclass_implementers
def on_train_begin(self, logs=None):
"""Called at the beginning of training.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently no data is passed to this argument for this
method but that may change in the future.
"""
@doc_controls.for_subclass_implementers
def on_train_end(self, logs=None):
"""Called at the end of training.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently the output of the last call to
`on_epoch_end()` is passed to this argument for this method but
that may change in the future.
"""
@doc_controls.for_subclass_implementers
def on_test_begin(self, logs=None):
"""Called at the beginning of evaluation or validation.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently no data is passed to this argument for this
method but that may change in the future.
"""
@doc_controls.for_subclass_implementers
def on_test_end(self, logs=None):
"""Called at the end of evaluation or validation.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently the output of the last call to
`on_test_batch_end()` is passed to this argument for this method
but that may change in the future.
"""
@doc_controls.for_subclass_implementers
def on_predict_begin(self, logs=None):
"""Called at the beginning of prediction.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently no data is passed to this argument for this
method but that may change in the future.
"""
@doc_controls.for_subclass_implementers
def on_predict_end(self, logs=None):
"""Called at the end of prediction.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently no data is passed to this argument for this
method but that may change in the future.
"""
def _implements_train_batch_hooks(self):
"""Determines if this Callback should be called for each train batch."""
return (
not generic_utils.is_default(self.on_batch_begin)
or not generic_utils.is_default(self.on_batch_end)
or not generic_utils.is_default(self.on_train_batch_begin)
or not generic_utils.is_default(self.on_train_batch_end)
)
def _implements_test_batch_hooks(self):
"""Determines if this Callback should be called for each test batch."""
return not generic_utils.is_default(
self.on_test_batch_begin
) or not generic_utils.is_default(self.on_test_batch_end)
def _implements_predict_batch_hooks(self):
"""Determines if this Callback should be called for each predict
batch."""
return not generic_utils.is_default(
self.on_predict_batch_begin
) or not generic_utils.is_default(self.on_predict_batch_end)
@keras_export("keras.callbacks.BaseLogger")
class BaseLogger(Callback):
"""Callback that accumulates epoch averages of metrics.
This callback is automatically applied to every TF-Keras model.
Args:
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over an epoch.
Metrics in this list will be logged as-is in `on_epoch_end`.
All others will be averaged in `on_epoch_end`.
"""
def __init__(self, stateful_metrics=None):
super().__init__()
self.stateful_metrics = set(stateful_metrics or [])
def on_epoch_begin(self, epoch, logs=None):
self.seen = 0
self.totals = {}
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get("size", 0)
# In case of distribution strategy we can potentially run multiple steps
# at the same time, we should account for that in the `seen`
# calculation.
num_steps = logs.get("num_steps", 1)
self.seen += batch_size * num_steps
for k, v in logs.items():
if k in self.stateful_metrics:
self.totals[k] = v
else:
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
def on_epoch_end(self, epoch, logs=None):
if logs is not None:
for k in self.params["metrics"]:
if k in self.totals:
# Make value available to next callbacks.
if k in self.stateful_metrics:
logs[k] = self.totals[k]
else:
logs[k] = self.totals[k] / self.seen
@keras_export("keras.callbacks.TerminateOnNaN")
class TerminateOnNaN(Callback):
"""Callback that terminates training when a NaN loss is encountered."""
def __init__(self):
super().__init__()
self._supports_tf_logs = True
def on_batch_end(self, batch, logs=None):
logs = logs or {}
loss = logs.get("loss")
if loss is not None:
loss = tf_utils.sync_to_numpy_or_python_type(loss)
if np.isnan(loss) or np.isinf(loss):
io_utils.print_msg(
f"Batch {batch}: Invalid loss, terminating training"
)
self.model.stop_training = True
@keras_export("keras.callbacks.ProgbarLogger")
class ProgbarLogger(Callback):
"""Callback that prints metrics to stdout.
Args:
count_mode: One of `"steps"` or `"samples"`.
Whether the progress bar should
count samples seen or steps (batches) seen.
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over an epoch.
Metrics in this list will be logged as-is.
All others will be averaged over time (e.g. loss, etc).
If not provided, defaults to the `Model`'s metrics.
Raises:
ValueError: In case of invalid `count_mode`.
"""
def __init__(self, count_mode: str = "samples", stateful_metrics=None):
super().__init__()
self._supports_tf_logs = True
if count_mode == "samples":
self.use_steps = False
elif count_mode == "steps":
self.use_steps = True
else:
raise ValueError(
f"Unknown `count_mode`: {count_mode}. "
'Expected values are ["samples", "steps"]'
)
# Defaults to all Model's metrics except for loss.
self.stateful_metrics = (
set(stateful_metrics) if stateful_metrics else set()
)
self.seen = 0
self.progbar = None
self.target = None
self.verbose = 1
self.epochs = 1
self._train_step, self._test_step, self._predict_step = None, None, None
self._call_batch_hooks = True
self._called_in_fit = False
def set_params(self, params):
self.verbose = params["verbose"]
self.epochs = params["epochs"]
if self.use_steps and "steps" in params:
self.target = params["steps"]
elif not self.use_steps and "samples" in params:
self.target = params["samples"]
else:
self.target = (
None # Will be inferred at the end of the first epoch.
)
self._call_batch_hooks = self.verbose == 1
if self.target is None:
try:
self._train_step = self.model._train_counter
self._test_step = self.model._test_counter
self._predict_step = self.model._predict_counter
except AttributeError:
self._call_batch_hooks = True
def on_train_begin(self, logs=None):
# When this logger is called inside `fit`, validation is silent.
self._called_in_fit = True
def on_test_begin(self, logs=None):
if not self._called_in_fit:
self._reset_progbar()
self._maybe_init_progbar()
def on_predict_begin(self, logs=None):
self._reset_progbar()
self._maybe_init_progbar()
def on_epoch_begin(self, epoch, logs=None):
self._reset_progbar()
self._maybe_init_progbar()
if self.verbose and self.epochs > 1:
io_utils.print_msg(f"Epoch {epoch + 1}/{self.epochs}")
def on_train_batch_end(self, batch, logs=None):
self._batch_update_progbar(batch, logs)
def on_test_batch_end(self, batch, logs=None):
if not self._called_in_fit:
self._batch_update_progbar(batch, logs)
def on_predict_batch_end(self, batch, logs=None):
# Don't pass prediction results.
self._batch_update_progbar(batch, None)
def on_epoch_end(self, epoch, logs=None):
self._finalize_progbar(logs, self._train_step)
def on_test_end(self, logs=None):
if not self._called_in_fit:
self._finalize_progbar(logs, self._test_step)
def on_predict_end(self, logs=None):
self._finalize_progbar(logs, self._predict_step)
def _reset_progbar(self):
self.seen = 0
self.progbar = None
def _maybe_init_progbar(self):
"""Instantiate a `Progbar` if not yet, and update the stateful
metrics."""
# TODO(rchao): Legacy TF1 code path may use list for
# `self.stateful_metrics`. Remove "cast to set" when TF1 support is
# dropped.
self.stateful_metrics = set(self.stateful_metrics)
if self.model:
# Update the existing stateful metrics as `self.model.metrics` may
# contain updated metrics after `MetricsContainer` is built in the
# first train step.
self.stateful_metrics = self.stateful_metrics.union(
set(m.name for m in self.model.metrics)
)
if self.progbar is None:
self.progbar = Progbar(
target=self.target,
verbose=self.verbose,
stateful_metrics=self.stateful_metrics,
unit_name="step" if self.use_steps else "sample",
)
self.progbar._update_stateful_metrics(self.stateful_metrics)
def _implements_train_batch_hooks(self):
return self._call_batch_hooks
def _implements_test_batch_hooks(self):
return self._call_batch_hooks
def _implements_predict_batch_hooks(self):
return self._call_batch_hooks
def _batch_update_progbar(self, batch, logs=None):
"""Updates the progbar."""
logs = logs or {}
self._maybe_init_progbar()
if self.use_steps:
self.seen = batch + 1 # One-indexed.
else:
# v1 path only.
logs = copy.copy(logs)
batch_size = logs.pop("size", 0)
num_steps = logs.pop("num_steps", 1)
logs.pop("batch", None)
add_seen = num_steps * batch_size
self.seen += add_seen
if self.verbose == 1:
# Only block async when verbose = 1.
logs = tf_utils.sync_to_numpy_or_python_type(logs)
self.progbar.update(self.seen, list(logs.items()), finalize=False)
def _finalize_progbar(self, logs, counter):
logs = tf_utils.sync_to_numpy_or_python_type(logs or {})
if self.target is None:
if counter is not None:
counter = counter.numpy()
if not self.use_steps:
counter *= logs.get("size", 1)
self.target = counter or self.seen
self.progbar.target = self.target
self.progbar.update(self.target, list(logs.items()), finalize=True)
@keras_export("keras.callbacks.History")
class History(Callback):
"""Callback that records events into a `History` object.
This callback is automatically applied to
every TF-Keras model. The `History` object
gets returned by the `fit` method of models.
Example:
>>> model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])
>>> model.compile(tf.keras.optimizers.SGD(), loss='mse')
>>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5),
... epochs=10, verbose=1)
>>> print(history.params)
{'verbose': 1, 'epochs': 10, 'steps': 1}
>>> # check the keys of history object
>>> print(history.history.keys())
dict_keys(['loss'])
"""
def __init__(self):
super().__init__()
self.history = {}
def on_train_begin(self, logs=None):
self.epoch = []
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epoch.append(epoch)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
# Set the history attribute on the model after the epoch ends. This will
# make sure that the state which is set is the latest one.
self.model.history = self
@keras_export("keras.callbacks.ModelCheckpoint")
class ModelCheckpoint(Callback):
"""Callback to save the TF-Keras model or model weights at some frequency.
`ModelCheckpoint` callback is used in conjunction with training using
`model.fit()` to save a model or weights (in a checkpoint file) at some
interval, so the model or weights can be loaded later to continue the
training from the state saved.
A few options this callback provides include:
- Whether to only keep the model that has achieved the "best performance" so
far, or whether to save the model at the end of every epoch regardless of
performance.
- Definition of 'best'; which quantity to monitor and whether it should be
maximized or minimized.
- The frequency it should save at. Currently, the callback supports saving
at the end of every epoch, or after a fixed number of training batches.
- Whether only weights are saved, or the whole model is saved.
Note: If you get `WARNING:tensorflow:Can save best model only with <name>
available, skipping` see the description of the `monitor` argument for
details on how to get this right.
Example:
```python
model.compile(loss=..., optimizer=...,
metrics=['accuracy'])
EPOCHS = 10
checkpoint_filepath = '/tmp/checkpoint'
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=True,
monitor='val_accuracy',
mode='max',
save_best_only=True)
# Model weights are saved at the end of every epoch, if it's the best seen
# so far.
model.fit(epochs=EPOCHS, callbacks=[model_checkpoint_callback])
# The model weights (that are considered the best) are loaded into the
# model.
model.load_weights(checkpoint_filepath)
```
Args:
filepath: string or `PathLike`, path to save the model file. e.g.
filepath = os.path.join(working_dir, 'ckpt', file_name). `filepath`
can contain named formatting options, which will be filled the value
of `epoch` and keys in `logs` (passed in `on_epoch_end`). For example:
if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`, then the
model checkpoints will be saved with the epoch number and the
validation loss in the filename. The directory of the filepath should
not be reused by any other callbacks to avoid conflicts.
monitor: The metric name to monitor. Typically the metrics are set by
the `Model.compile` method. Note:
* Prefix the name with `"val_`" to monitor validation metrics.
* Use `"loss"` or "`val_loss`" to monitor the model's total loss.
* If you specify metrics as strings, like `"accuracy"`, pass the same
string (with or without the `"val_"` prefix).
* If you pass `metrics.Metric` objects, `monitor` should be set to
`metric.name`
* If you're not sure about the metric names you can check the contents
of the `history.history` dictionary returned by
`history = model.fit()`
* Multi-output models set additional prefixes on the metric names.
verbose: Verbosity mode, 0 or 1. Mode 0 is silent, and mode 1
displays messages when the callback takes an action.
save_best_only: if `save_best_only=True`, it only saves when the model
is considered the "best" and the latest best model according to the
quantity monitored will not be overwritten. If `filepath` doesn't
contain formatting options like `{epoch}` then `filepath` will be
overwritten by each new better model.
mode: one of {'auto', 'min', 'max'}. If `save_best_only=True`, the
decision to overwrite the current save file is made based on either
the maximization or the minimization of the monitored quantity.
For `val_acc`, this should be `max`, for `val_loss` this should be
`min`, etc. In `auto` mode, the mode is set to `max` if the quantities
monitored are 'acc' or start with 'fmeasure' and are set to `min` for
the rest of the quantities.
save_weights_only: if True, then only the model's weights will be saved
(`model.save_weights(filepath)`), else the full model is saved
(`model.save(filepath)`).
save_freq: `'epoch'` or integer. When using `'epoch'`, the callback
saves the model after each epoch. When using integer, the callback
saves the model at end of this many batches. If the `Model` is
compiled with `steps_per_execution=N`, then the saving criteria will
be checked every Nth batch. Note that if the saving isn't aligned to
epochs, the monitored metric may potentially be less reliable (it
could reflect as little as 1 batch, since the metrics get reset every
epoch). Defaults to `'epoch'`.
options: Optional `tf.train.CheckpointOptions` object if
`save_weights_only` is true or optional `tf.saved_model.SaveOptions`
object if `save_weights_only` is false.
initial_value_threshold: Floating point initial "best" value of the
metric to be monitored. Only applies if `save_best_value=True`. Only
overwrites the model weights already saved if the performance of
current model is better than this value.
**kwargs: Additional arguments for backwards compatibility. Possible key
is `period`.
"""
def __init__(
self,
filepath,
monitor: str = "val_loss",
verbose: int = 0,
save_best_only: bool = False,
save_weights_only: bool = False,
mode: str = "auto",
save_freq="epoch",
options=None,
initial_value_threshold=None,
**kwargs,
):
super().__init__()
self._supports_tf_logs = True
self.monitor = monitor
self.verbose = verbose
self.filepath = io_utils.path_to_string(filepath)
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.save_freq = save_freq
self.epochs_since_last_save = 0
self._batches_seen_since_last_saving = 0
self._last_batch_seen = -1
self.best = initial_value_threshold
if save_weights_only:
if options is None or isinstance(
options, tf.train.CheckpointOptions
):
self._options = options or tf.train.CheckpointOptions()
else:
raise TypeError(
"If save_weights_only is True, then `options` must be "
"either None or a tf.train.CheckpointOptions. "
f"Got {options}."
)
else:
if filepath and filepath.endswith(".keras") and options is not None:
raise ValueError(
"The native TF-Keras format does not support "
"the `options` argument. Please remove "
"the `options` argument, or use the SavedModel "
"format by removing the `.keras` extension from "
"the model filepath."
)
if options is None or isinstance(
options, tf.saved_model.SaveOptions
):
self._options = options or tf.saved_model.SaveOptions()
else:
raise TypeError(
"If save_weights_only is False, then `options` must be "
"either None or a tf.saved_model.SaveOptions. "
f"Got {options}."
)
# Deprecated field `load_weights_on_restart` is for loading the
# checkpoint file from `filepath` at the start of `model.fit()`
# TODO(rchao): Remove the arg during next breaking release.
if "load_weights_on_restart" in kwargs:
self.load_weights_on_restart = kwargs["load_weights_on_restart"]
logging.warning(
"`load_weights_on_restart` argument is deprecated. "
"Please use `model.load_weights()` for loading weights "
"before the start of `model.fit()`."
)
else:
self.load_weights_on_restart = False
# Deprecated field `period` is for the number of epochs between which
# the model is saved.
if "period" in kwargs:
self.period = kwargs["period"]
logging.warning(
"`period` argument is deprecated. Please use `save_freq` "
"to specify the frequency in number of batches seen."
)
else:
self.period = 1
if mode not in ["auto", "min", "max"]:
logging.warning(
"ModelCheckpoint mode %s is unknown, fallback to auto mode.",
mode,
)
mode = "auto"
if mode == "min":
self.monitor_op = np.less
if self.best is None:
self.best = np.Inf
elif mode == "max":
self.monitor_op = np.greater
if self.best is None:
self.best = -np.Inf
else:
if "acc" in self.monitor or self.monitor.startswith("fmeasure"):
self.monitor_op = np.greater
if self.best is None:
self.best = -np.Inf
else:
self.monitor_op = np.less
if self.best is None:
self.best = np.Inf
if self.save_freq != "epoch" and not isinstance(self.save_freq, int):
raise ValueError(
f"Unrecognized save_freq: {self.save_freq}. "
'Expected save_freq are "epoch" or integer'
)
# Only the chief worker writes model checkpoints, but all workers
# restore checkpoint at on_train_begin().
self._chief_worker_only = False
def on_train_begin(self, logs=None):
if self.load_weights_on_restart:
filepath_to_load = (
self._get_most_recently_modified_file_matching_pattern(
self.filepath
)
)
if filepath_to_load is not None and self._checkpoint_exists(
filepath_to_load
):
try:
# `filepath` may contain placeholders such as `{epoch:02d}`,
# and thus it attempts to load the most recently modified
# file with file name matching the pattern.
self.model.load_weights(filepath_to_load)
except (IOError, ValueError) as e:
raise ValueError(
f"Error loading file from {filepath_to_load}. "
f"Reason: {e}"
)
def _implements_train_batch_hooks(self):
# Only call batch hooks when saving on batch
return self.save_freq != "epoch"
def on_train_batch_end(self, batch, logs=None):
if self._should_save_on_batch(batch):
self._save_model(epoch=self._current_epoch, batch=batch, logs=logs)
def on_epoch_begin(self, epoch, logs=None):
self._current_epoch = epoch
def on_epoch_end(self, epoch, logs=None):
self.epochs_since_last_save += 1
if self.save_freq == "epoch":
self._save_model(epoch=epoch, batch=None, logs=logs)
def _should_save_on_batch(self, batch):
"""Handles batch-level saving logic, supports steps_per_execution."""
if self.save_freq == "epoch":
return False
if batch <= self._last_batch_seen: # New epoch.
add_batches = batch + 1 # batches are zero-indexed.
else:
add_batches = batch - self._last_batch_seen
self._batches_seen_since_last_saving += add_batches
self._last_batch_seen = batch
if self._batches_seen_since_last_saving >= self.save_freq:
self._batches_seen_since_last_saving = 0
return True
return False
def _save_model(self, epoch, batch, logs):
"""Saves the model.
Args:
epoch: the epoch this iteration is in.
batch: the batch this iteration is in. `None` if the `save_freq`
is set to `epoch`.
logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.
"""
logs = logs or {}
if (
isinstance(self.save_freq, int)
or self.epochs_since_last_save >= self.period
):
# Block only when saving interval is reached.
logs = tf_utils.sync_to_numpy_or_python_type(logs)
self.epochs_since_last_save = 0
filepath = self._get_file_path(epoch, batch, logs)
dirname = os.path.dirname(filepath)
if (
dirname
and not dirname.startswith("gs://")
and not tf.io.gfile.exists(dirname)
):
tf.io.gfile.makedirs(dirname)
try:
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
logging.warning(
"Can save best model only with %s available, "
"skipping.",
self.monitor,
)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
io_utils.print_msg(
f"\nEpoch {epoch + 1}: {self.monitor} "
"improved "
f"from {self.best:.5f} to {current:.5f}, "
f"saving model to {filepath}"
)
self.best = current
# Handles saving and corresponding options
self._save_handler(filepath)
else:
if self.verbose > 0:
io_utils.print_msg(
f"\nEpoch {epoch + 1}: "
f"{self.monitor} did not improve "
f"from {self.best:.5f}"
)
else:
if self.verbose > 0:
io_utils.print_msg(
f"\nEpoch {epoch + 1}: saving model to {filepath}"
)
# Handles saving and corresponding options
self._save_handler(filepath)
self._maybe_remove_file()
except IsADirectoryError: # h5py 3.x
raise IOError(
"Please specify a non-directory filepath for "
"ModelCheckpoint. Filepath used is an existing "
f"directory: {filepath}"
)
except IOError as e: # h5py 2.x
# `e.errno` appears to be `None` so checking the content of
# `e.args[0]`.
if "is a directory" in str(e.args[0]).lower():
raise IOError(
"Please specify a non-directory filepath for "
"ModelCheckpoint. Filepath used is an existing "
f"directory: f{filepath}"
)
# Re-throw the error for any other causes.
raise e
def _save_handler(self, filepath):
if self.save_weights_only:
if filepath.endswith(".weights.h5"):
self.model.save_weights(
filepath,
overwrite=True,
)
else:
self.model.save_weights(
filepath,
overwrite=True,
options=self._options,
)
else:
if filepath.endswith(".keras"):
self.model.save(filepath, overwrite=True)
else:
self.model.save(
filepath,
overwrite=True,
options=self._options,
)
def _get_file_path(self, epoch, batch, logs):
"""Returns the file path for checkpoint."""
try:
# `filepath` may contain placeholders such as
# `{epoch:02d}`,`{batch:02d}` and `{mape:.2f}`. A mismatch between
# logged metrics and the path's placeholders can cause formatting to
# fail.
if batch is None or "batch" in logs:
file_path = self.filepath.format(epoch=epoch + 1, **logs)
else:
file_path = self.filepath.format(
epoch=epoch + 1, batch=batch + 1, **logs
)
except KeyError as e:
raise KeyError(
f'Failed to format this callback filepath: "{self.filepath}". '
f"Reason: {e}"
)
self._write_filepath = distributed_file_utils.write_filepath(
file_path, self.model.distribute_strategy
)
return self._write_filepath
def _maybe_remove_file(self):
# Remove the checkpoint directory in multi-worker training where this
# worker should not checkpoint. It is a dummy directory previously saved
# for sync distributed training.
distributed_file_utils.remove_temp_dir_with_filepath(
self._write_filepath, self.model.distribute_strategy
)
def _checkpoint_exists(self, filepath):
"""Returns whether the checkpoint `filepath` refers to exists."""
if filepath.endswith(".h5"):
return tf.io.gfile.exists(filepath)
tf_saved_model_exists = tf.io.gfile.exists(filepath)
tf_weights_only_checkpoint_exists = tf.io.gfile.exists(
filepath + ".index"
)
return tf_saved_model_exists or tf_weights_only_checkpoint_exists
def _get_most_recently_modified_file_matching_pattern(self, pattern):
"""Returns the most recently modified filepath matching pattern.
Pattern may contain python formatting placeholder. If
`tf.train.latest_checkpoint()` does not return None, use that;
otherwise, check for most recently modified one that matches the
pattern.
In the rare case where there are more than one pattern-matching file
having the same modified time that is most recent among all, return the
filepath that is largest (by `>` operator, lexicographically using the
numeric equivalents). This provides a tie-breaker when multiple files
are most recent. Note that a larger `filepath` can sometimes indicate a
later time of modification (for instance, when epoch/batch is used as
formatting option), but not necessarily (when accuracy or loss is used).
The tie-breaker is put in the logic as best effort to return the most
recent, and to avoid undeterministic result.
Modified time of a file is obtained with `os.path.getmtime()`.
This utility function is best demonstrated via an example:
```python
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5',
'f.batch02epoch02.h5', 'f.batch01epoch01.h5']
]
for file_path in file_paths:
# Write something to each of the files
self.assertEqual(
_get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-1])
```
Args:
pattern: The file pattern that may optionally contain python
placeholder such as `{epoch:02d}`.
Returns:
The most recently modified file's full filepath matching `pattern`.
If `pattern` does not contain any placeholder, this returns the
filepath that exactly matches `pattern`. Returns `None` if no match
is found.
"""
dir_name = os.path.dirname(pattern)
base_name = os.path.basename(pattern)
base_name_regex = "^" + re.sub(r"{.*}", r".*", base_name) + "$"
# If tf.train.latest_checkpoint tells us there exists a latest
# checkpoint, use that as it is more robust than `os.path.getmtime()`.
latest_tf_checkpoint = tf.train.latest_checkpoint(dir_name)
if latest_tf_checkpoint is not None and re.match(
base_name_regex, os.path.basename(latest_tf_checkpoint)
):
return latest_tf_checkpoint
latest_mod_time = 0
file_path_with_latest_mod_time = None
n_file_with_latest_mod_time = 0
file_path_with_largest_file_name = None
if tf.io.gfile.exists(dir_name):
for file_name in os.listdir(dir_name):
# Only consider if `file_name` matches the pattern.
if re.match(base_name_regex, file_name):
file_path = os.path.join(dir_name, file_name)
mod_time = os.path.getmtime(file_path)
if (
file_path_with_largest_file_name is None
or file_path > file_path_with_largest_file_name
):
file_path_with_largest_file_name = file_path
if mod_time > latest_mod_time:
latest_mod_time = mod_time
file_path_with_latest_mod_time = file_path
# In the case a file with later modified time is found,
# reset the counter for the number of files with latest
# modified time.
n_file_with_latest_mod_time = 1
elif mod_time == latest_mod_time:
# In the case a file has modified time tied with the
# most recent, increment the counter for the number of
# files with latest modified time by 1.
n_file_with_latest_mod_time += 1
if n_file_with_latest_mod_time == 1:
# Return the sole file that has most recent modified time.
return file_path_with_latest_mod_time
else:
# If there are more than one file having latest modified time,
# return the file path with the largest file name.
return file_path_with_largest_file_name
@keras_export("keras.callbacks.BackupAndRestore", v1=[])
class BackupAndRestore(Callback):
"""Callback to back up and restore the training state.
`BackupAndRestore` callback is intended to recover training from an
interruption that has happened in the middle of a `Model.fit` execution, by
backing up the training states in a temporary checkpoint file (with the help
of a `tf.train.CheckpointManager`), at the end of each epoch. Each backup
overwrites the previously written checkpoint file, so at any given time
there is at most one such checkpoint file for backup/restoring purpose.
If training restarts before completion, the training state (which includes
the `Model` weights and epoch number) is restored to the most recently saved
state at the beginning of a new `Model.fit` run. At the completion of a
`Model.fit` run, the temporary checkpoint file is deleted.
Note that the user is responsible to bring jobs back after the interruption.
This callback is important for the backup and restore mechanism for fault
tolerance purpose, and the model to be restored from a previous checkpoint
is expected to be the same as the one used to back up. If user changes
arguments passed to compile or fit, the checkpoint saved for fault tolerance
can become invalid.
Note:
1. This callback is not compatible with eager execution disabled.
2. A checkpoint is saved at the end of each epoch. After restoring,
`Model.fit` redoes any partial work during the unfinished epoch in which the
training got restarted (so the work done before the interruption doesn't
affect the final model state).
3. This works for both single worker and multi-worker modes. When
`Model.fit` is used with `tf.distribute`, it supports
`tf.distribute.MirroredStrategy`,
`tf.distribute.MultiWorkerMirroredStrategy`, `tf.distribute.TPUStrategy`,
and `tf.distribute.experimental.ParameterServerStrategy`.
Example:
>>> class InterruptingCallback(tf.keras.callbacks.Callback):
... def on_epoch_begin(self, epoch, logs=None):
... if epoch == 4:
... raise RuntimeError('Interrupting!')
>>> callback = tf.keras.callbacks.BackupAndRestore(backup_dir="/tmp/backup")
>>> model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])
>>> model.compile(tf.keras.optimizers.SGD(), loss='mse')
>>> try:
... model.fit(np.arange(100).reshape(5, 20), np.zeros(5), epochs=10,
... batch_size=1, callbacks=[callback, InterruptingCallback()],
... verbose=0)
... except:
... pass
>>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5),
... epochs=10, batch_size=1, callbacks=[callback],
... verbose=0)
>>> # Only 6 more epochs are run, since first training got interrupted at
>>> # zero-indexed epoch 4, second training will continue from 4 to 9.
>>> len(history.history['loss'])
6
Besides the option to save at the end of every epoch or every N steps, if
you are doing distributed training with
`tf.distribute.MultiWorkerMirroredStrategy` on Google Cloud Platform or
Google Borg, you can also use the `save_before_preemption` argument
to enable saving a checkpoint right before a worker gets preempted
by other jobs and training gets interrupted. See
`tf.distribute.experimental.PreemptionCheckpointHandler` for more details.
Args:
backup_dir: String, path to store the checkpoint.
e.g. `backup_dir = os.path.join(working_dir, 'backup')`.
This is the directory in which the system stores temporary files to
recover the model from jobs terminated unexpectedly. The directory
cannot be reused elsewhere to store other files, e.g. by the
`BackupAndRestore` callback of another training run,
or by another callback
(e.g. `ModelCheckpoint`) of the same training.
save_freq: `'epoch'`, integer, or `False`. When set to `'epoch'`
the callback saves the checkpoint at the end of each epoch.
When set to an integer, the callback saves the checkpoint every
`save_freq` batches. Set `save_freq` to `False` if only using
preemption checkpointing (with `save_before_preemption=True`).
delete_checkpoint: Boolean, default to True. This `BackupAndRestore`
callback works by saving a checkpoint to back up the training state.
If `delete_checkpoint=True`, the checkpoint will be deleted after
training is finished. Use `False` if you'd like to keep the checkpoint
for future usage.
save_before_preemption: A boolean value instructing whether to turn on
the automatic checkpoint saving for preemption/maintenance events.
This only supports
`tf.distribute.MultiWorkerMirroredStrategy` on Google Cloud Platform
or Google Borg for now.
"""
def __init__(
self,
backup_dir,
save_freq="epoch",
delete_checkpoint=True,
save_before_preemption=False,
):
super().__init__()
self.backup_dir = backup_dir
self._supports_tf_logs = True
self._supported_strategies = (
tf.distribute.MirroredStrategy,
tf.distribute.MultiWorkerMirroredStrategy,
tf.distribute.experimental.TPUStrategy,
tf.distribute.TPUStrategy,
tf.distribute.experimental.ParameterServerStrategy,
)
self.save_freq = save_freq
self.delete_checkpoint = delete_checkpoint
self.save_before_preemption = save_before_preemption
self._batches_count = 0
self._current_epoch = 0
if not tf.executing_eagerly():
if tf.inside_function():
raise ValueError(
"This Callback's method contains Python state and "
"should be called outside of `tf.function`s."
)
else: # Legacy graph mode:
raise ValueError(
"BackupAndRestore only supports eager mode. In graph "
"mode, consider using ModelCheckpoint to manually save "
"and restore weights with `model.load_weights()` and by "
"providing `initial_epoch` in `model.fit()` for fault "
"tolerance."
)
if (not save_freq) and (not save_before_preemption):
raise ValueError(
"Either `save_freq` or `save_before_preemption` " "must be set."
)
# Only the chief worker writes model checkpoints, but all workers
# restore checkpoint at on_train_begin().
self._chief_worker_only = False
def on_train_begin(self, logs=None):
# TrainingState is used to manage the training state needed for
# failure-recovery of a worker in training.
if self.model._distribution_strategy and not isinstance(
self.model.distribute_strategy, self._supported_strategies
):
raise NotImplementedError(
f"{type(self.model.distribute_strategy)} is not supported yet. "
"Currently BackupAndRestore callback "
"only supports empty strategy, "
"MirroredStrategy, MultiWorkerMirroredStrategy and TPUStrategy."
)
self.model._training_state = worker_training_state.WorkerTrainingState(
self.model,
self.backup_dir,
self.save_freq,
self.save_before_preemption,
)
self._training_state = self.model._training_state
self._training_state.restore()
def on_train_batch_begin(self, batch, logs=None):
# Skip batch update for PSS Strategy
if isinstance(
self.model.distribute_strategy,
tf.distribute.ParameterServerStrategy,
):
return
self._training_state._ckpt_saved_batch.assign(batch)
def on_train_batch_end(self, batch, logs=None):
# Skip batch update for PSS Strategy
if isinstance(
self.model.distribute_strategy,
tf.distribute.ParameterServerStrategy,
):
return
self._training_state.backup_if_preempted()
if self.save_freq and self.save_freq != "epoch":
self._batches_count += 1
if self._batches_count >= self.save_freq:
self._batches_count = 0
self._backup(epoch=self._current_epoch, batch=batch)
def _implements_train_batch_hooks(self):
return self.save_freq != "epoch"
def on_train_end(self, logs=None):
if self.delete_checkpoint:
# On exit of training, delete the training state backup file saved
# for the purpose of worker recovery unless the user opts out.
self._training_state.delete_backup()
# Clean up the training state.
del self._training_state
del self.model._training_state
def on_epoch_begin(self, epoch, logs=None):
self._training_state._ckpt_saved_epoch.assign(epoch)
self._current_epoch = epoch
def on_epoch_end(self, epoch, logs=None):
# Back up the model and current epoch for possible future recovery.
if self.save_freq == "epoch":
self._backup(epoch=epoch)
def _backup(self, epoch, batch=0):
self._training_state.back_up(epoch=epoch, batch=batch)
@keras_export("keras.callbacks.experimental.BackupAndRestore", v1=[])
@deprecation.deprecated_endpoints(
"keras.callbacks.experimental.BackupAndRestore"
)
class BackupAndRestoreExperimental(BackupAndRestore):
"""Deprecated. Please use `tf.keras.callbacks.BackupAndRestore` instead.
Caution: `tf.keras.callbacks.experimental.BackupAndRestore` endpoint is
deprecated and will be removed in a future release. Please use
`tf.keras.callbacks.BackupAndRestore`.
"""
def __init__(self, *args, **kwargs):
logging.warning(
"`tf.keras.callbacks.experimental.BackupAndRestore` endpoint is "
"deprecated and will be removed in a future release. Please use "
"`tf.keras.callbacks.BackupAndRestore`."
)
super().__init__(*args, **kwargs)
@keras_export("keras.callbacks.EarlyStopping")
class EarlyStopping(Callback):
"""Stop training when a monitored metric has stopped improving.
Assuming the goal of a training is to minimize the loss. With this, the
metric to be monitored would be `'loss'`, and mode would be `'min'`. A
`model.fit()` training loop will check at end of every epoch whether
the loss is no longer decreasing, considering the `min_delta` and
`patience` if applicable. Once it's found no longer decreasing,
`model.stop_training` is marked True and the training terminates.
The quantity to be monitored needs to be available in `logs` dict.
To make it so, pass the loss or metrics at `model.compile()`.
Args:
monitor: Quantity to be monitored.
min_delta: Minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than min_delta, will count as no
improvement.
patience: Number of epochs with no improvement
after which training will be stopped.
verbose: Verbosity mode, 0 or 1. Mode 0 is silent, and mode 1
displays messages when the callback takes an action.
mode: One of `{"auto", "min", "max"}`. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `"max"`
mode it will stop when the quantity
monitored has stopped increasing; in `"auto"`
mode, the direction is automatically inferred
from the name of the monitored quantity.
baseline: Baseline value for the monitored quantity.
Training will stop if the model doesn't show improvement over the
baseline.
restore_best_weights: Whether to restore model weights from
the epoch with the best value of the monitored quantity.
If False, the model weights obtained at the last step of
training are used. An epoch will be restored regardless
of the performance relative to the `baseline`. If no epoch
improves on `baseline`, training will run for `patience`
epochs and restore weights from the best epoch in that set.
start_from_epoch: Number of epochs to wait before starting
to monitor improvement. This allows for a warm-up period in which
no improvement is expected and thus training will not be stopped.
Example:
>>> callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3)
>>> # This callback will stop the training when there is no improvement in
>>> # the loss for three consecutive epochs.
>>> model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])
>>> model.compile(tf.keras.optimizers.SGD(), loss='mse')
>>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5),
... epochs=10, batch_size=1, callbacks=[callback],
... verbose=0)
>>> len(history.history['loss']) # Only 4 epochs are run.
4
"""
def __init__(
self,
monitor="val_loss",
min_delta=0,
patience=0,
verbose=0,
mode="auto",
baseline=None,
restore_best_weights=False,
start_from_epoch=0,
):
super().__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.baseline = baseline
self.min_delta = abs(min_delta)
self.wait = 0
self.stopped_epoch = 0
self.restore_best_weights = restore_best_weights
self.best_weights = None
self.start_from_epoch = start_from_epoch
if mode not in ["auto", "min", "max"]:
logging.warning(
"EarlyStopping mode %s is unknown, fallback to auto mode.",
mode,
)
mode = "auto"
if mode == "min":
self.monitor_op = np.less
elif mode == "max":
self.monitor_op = np.greater
else:
if (
self.monitor.endswith("acc")
or self.monitor.endswith("accuracy")
or self.monitor.endswith("auc")
):
self.monitor_op = np.greater
else:
self.monitor_op = np.less
if self.monitor_op == np.greater:
self.min_delta *= 1
else:
self.min_delta *= -1
def on_train_begin(self, logs=None):
# Allow instances to be re-used
self.wait = 0
self.stopped_epoch = 0
self.best = np.Inf if self.monitor_op == np.less else -np.Inf
self.best_weights = None
self.best_epoch = 0
def on_epoch_end(self, epoch, logs=None):
current = self.get_monitor_value(logs)
if current is None or epoch < self.start_from_epoch:
# If no monitor value exists or still in initial warm-up stage.
return
if self.restore_best_weights and self.best_weights is None:
# Restore the weights after first epoch if no progress is ever made.
self.best_weights = self.model.get_weights()
self.best_epoch = epoch
self.wait += 1
if self._is_improvement(current, self.best):
self.best = current
self.best_epoch = epoch
if self.restore_best_weights:
self.best_weights = self.model.get_weights()
# Only restart wait if we beat both the baseline and our previous
# best.
if self.baseline is None or self._is_improvement(
current, self.baseline
):
self.wait = 0
return
# Only check after the first epoch.
if self.wait >= self.patience and epoch > 0:
self.stopped_epoch = epoch
self.model.stop_training = True
def on_train_end(self, logs=None):
if self.stopped_epoch > 0 and self.verbose > 0:
io_utils.print_msg(
f"Epoch {self.stopped_epoch + 1}: early stopping"
)
if self.restore_best_weights and self.best_weights is not None:
if self.verbose > 0:
io_utils.print_msg(
"Restoring model weights from "
"the end of the best epoch: "
f"{self.best_epoch + 1}."
)
self.model.set_weights(self.best_weights)
def get_monitor_value(self, logs):
logs = logs or {}
monitor_value = logs.get(self.monitor)
if monitor_value is None:
logging.warning(
"Early stopping conditioned on metric `%s` "
"which is not available. Available metrics are: %s",
self.monitor,
",".join(list(logs.keys())),
)
return monitor_value
def _is_improvement(self, monitor_value, reference_value):
return self.monitor_op(monitor_value - self.min_delta, reference_value)
@keras_export("keras.callbacks.RemoteMonitor")
class RemoteMonitor(Callback):
"""Callback used to stream events to a server.
Requires the `requests` library.
Events are sent to `root + '/publish/epoch/end/'` by default. Calls are
HTTP POST, with a `data` argument which is a
JSON-encoded dictionary of event data.
If `send_as_json=True`, the content type of the request will be
`"application/json"`.
Otherwise the serialized JSON will be sent within a form.
Args:
root: String; root url of the target server.
path: String; path relative to `root` to which the events will be sent.
field: String; JSON field under which the data will be stored.
The field is used only if the payload is sent within a form
(i.e. send_as_json is set to False).
headers: Dictionary; optional custom HTTP headers.
send_as_json: Boolean; whether the request should be
sent as `"application/json"`.
"""
def __init__(
self,
root="http://localhost:9000",
path="/publish/epoch/end/",
field="data",
headers=None,
send_as_json=False,
):
super().__init__()
self.root = root
self.path = path
self.field = field
self.headers = headers
self.send_as_json = send_as_json
def on_epoch_end(self, epoch, logs=None):
if requests is None:
raise ImportError("RemoteMonitor requires the `requests` library.")
logs = logs or {}
send = {}
send["epoch"] = epoch
for k, v in logs.items():
# np.ndarray and np.generic are not scalar types
# therefore we must unwrap their scalar values and
# pass to the json-serializable dict 'send'
if isinstance(v, (np.ndarray, np.generic)):
send[k] = v.item()
else:
send[k] = v
try:
if self.send_as_json:
requests.post(
self.root + self.path, json=send, headers=self.headers
)
else:
requests.post(
self.root + self.path,
{self.field: json.dumps(send)},
headers=self.headers,
)
except requests.exceptions.RequestException:
logging.warning(
"Warning: could not reach RemoteMonitor root server at "
+ str(self.root)
)
@keras_export("keras.callbacks.LearningRateScheduler")
class LearningRateScheduler(Callback):
"""Learning rate scheduler.
At the beginning of every epoch, this callback gets the updated learning
rate value from `schedule` function provided at `__init__`, with the current
epoch and current learning rate, and applies the updated learning rate on
the optimizer.
Args:
schedule: a function that takes an epoch index (integer, indexed from 0)
and current learning rate (float) as inputs and returns a new
learning rate as output (float).
verbose: int. 0: quiet, 1: update messages.
Example:
>>> # This function keeps the initial learning rate for the first ten epochs
>>> # and decreases it exponentially after that.
>>> def scheduler(epoch, lr):
... if epoch < 10:
... return lr
... else:
... return lr * tf.math.exp(-0.1)
>>>
>>> model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])
>>> model.compile(tf.keras.optimizers.SGD(), loss='mse')
>>> round(model.optimizer.lr.numpy(), 5)
0.01
>>> callback = tf.keras.callbacks.LearningRateScheduler(scheduler)
>>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5),
... epochs=15, callbacks=[callback], verbose=0)
>>> round(model.optimizer.lr.numpy(), 5)
0.00607
"""
def __init__(self, schedule, verbose=0):
super().__init__()
self.schedule = schedule
self.verbose = verbose
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, "lr"):
raise ValueError('Optimizer must have a "lr" attribute.')
try: # new API
lr = float(backend.get_value(self.model.optimizer.lr))
lr = self.schedule(epoch, lr)
except TypeError: # Support for old API for backward compatibility
lr = self.schedule(epoch)
if not isinstance(lr, (tf.Tensor, float, np.float32, np.float64)):
raise ValueError(
'The output of the "schedule" function '
f"should be float. Got: {lr}"
)
if isinstance(lr, tf.Tensor) and not lr.dtype.is_floating:
raise ValueError(
f"The dtype of `lr` Tensor should be float. Got: {lr.dtype}"
)
backend.set_value(self.model.optimizer.lr, backend.get_value(lr))
if self.verbose > 0:
io_utils.print_msg(
f"\nEpoch {epoch + 1}: LearningRateScheduler setting learning "
f"rate to {lr}."
)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs["lr"] = backend.get_value(self.model.optimizer.lr)
def keras_model_summary(name, data, step=None):
"""Writes a TF-Keras model as JSON to as a Summary.
Writing the TF-Keras model configuration allows the TensorBoard graph plugin
to render a conceptual graph, as opposed to graph of ops. In case the model
fails to serialize as JSON, it ignores and returns False.
Args:
name: A name for this summary. The summary tag used for TensorBoard will
be this name prefixed by any active name scopes.
data: A TF-Keras Model to write.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which
must not be None.
Returns:
True on success, or False if no summary was written because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
summary_metadata = tf.compat.v1.SummaryMetadata()
# Hard coding a plugin name. Please refer to go/tb-plugin-name-hardcode for
# the rationale.
summary_metadata.plugin_data.plugin_name = "graph_keras_model"
# version number = 1
summary_metadata.plugin_data.content = b"1"
try:
json_string = data.to_json()
except Exception as exc:
# An exception should not break a model code.
logging.warning(
"Model failed to serialize as JSON. Ignoring... %s", exc
)
return False
with tf.summary.experimental.summary_scope(
name, "graph_keras_model", [data, step]
) as (tag, _):
with tf.device("cpu:0"):
tensor = tf.constant(json_string, dtype=tf.string)
return tf.summary.write(
tag=tag, tensor=tensor, step=step, metadata=summary_metadata
)
@keras_export("keras.callbacks.TensorBoard", v1=[])
class TensorBoard(Callback, version_utils.TensorBoardVersionSelector):
"""Enable visualizations for TensorBoard.
TensorBoard is a visualization tool provided with TensorFlow.
This callback logs events for TensorBoard, including:
* Metrics summary plots
* Training graph visualization
* Weight histograms
* Sampled profiling
When used in `Model.evaluate` or regular validation
([on_test_end](https://www.tensorflow.org/api_docs/python/tf/tf_keras/callbacks/Callback#on_test_end)),
in addition to epoch summaries, there will be a summary that records
evaluation metrics vs `Model.optimizer.iterations` written. The metric names
will be prepended with `evaluation`, with `Model.optimizer.iterations` being
the step in the visualized TensorBoard.
If you have installed TensorFlow with pip, you should be able
to launch TensorBoard from the command line:
```
tensorboard --logdir=path_to_your_logs
```
You can find more information about TensorBoard
[here](https://www.tensorflow.org/get_started/summaries_and_tensorboard).
Args:
log_dir: the path of the directory where to save the log files to be
parsed by TensorBoard. e.g. log_dir = os.path.join(working_dir,
'logs') This directory should not be reused by any other callbacks.
histogram_freq: frequency (in epochs) at which to compute
weight histograms for the layers of the model. If set to 0, histograms
won't be computed. Validation data (or split) must be specified for
histogram visualizations.
write_graph: whether to visualize the graph in TensorBoard. The log file
can become quite large when write_graph is set to True.
write_images: whether to write model weights to visualize as image in
TensorBoard.
write_steps_per_second: whether to log the training steps per second
into TensorBoard. This supports both epoch and batch frequency
logging.
update_freq: `'batch'` or `'epoch'` or integer. When using `'epoch'`,
writes the losses and metrics to TensorBoard after every epoch.
If using an integer, let's say `1000`, all metrics and losses
(including custom ones added by `Model.compile`) will be logged to
TensorBoard every 1000 batches. `'batch'` is a synonym for `1`,
meaning that they will be written every batch.
Note however that writing too frequently to TensorBoard can slow down
your training, especially when used with `tf.distribute.Strategy` as
it will incur additional synchronization overhead.
Use with `ParameterServerStrategy` is not supported.
Batch-level summary writing is also available via `train_step`
override. Please see
[TensorBoard Scalars tutorial](https://www.tensorflow.org/tensorboard/scalars_and_keras#batch-level_logging) # noqa: E501
for more details.
profile_batch: Profile the batch(es) to sample compute characteristics.
profile_batch must be a non-negative integer or a tuple of integers.
A pair of positive integers signify a range of batches to profile.
By default, profiling is disabled.
embeddings_freq: frequency (in epochs) at which embedding layers will be
visualized. If set to 0, embeddings won't be visualized.
embeddings_metadata: Dictionary which maps embedding layer names to the
filename of a file in which to save metadata for the embedding layer.
In case the same metadata file is to be
used for all embedding layers, a single filename can be passed.
Examples:
Basic usage:
```python
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="./logs")
model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback])
# Then run the tensorboard command to view the visualizations.
```
Custom batch-level summaries in a subclassed Model:
```python
class MyModel(tf.keras.Model):
def build(self, _):
self.dense = tf.keras.layers.Dense(10)
def call(self, x):
outputs = self.dense(x)
tf.summary.histogram('outputs', outputs)
return outputs
model = MyModel()
model.compile('sgd', 'mse')
# Make sure to set `update_freq=N` to log a batch-level summary every N
# batches. In addition to any `tf.summary` contained in `Model.call`,
# metrics added in `Model.compile` will be logged every N batches.
tb_callback = tf.keras.callbacks.TensorBoard('./logs', update_freq=1)
model.fit(x_train, y_train, callbacks=[tb_callback])
```
Custom batch-level summaries in a Functional API Model:
```python
def my_summary(x):
tf.summary.histogram('x', x)
return x
inputs = tf.keras.Input(10)
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Lambda(my_summary)(x)
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', 'mse')
# Make sure to set `update_freq=N` to log a batch-level summary every N
# batches. In addition to any `tf.summary` contained in `Model.call`,
# metrics added in `Model.compile` will be logged every N batches.
tb_callback = tf.keras.callbacks.TensorBoard('./logs', update_freq=1)
model.fit(x_train, y_train, callbacks=[tb_callback])
```
Profiling:
```python
# Profile a single batch, e.g. the 5th batch.
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir='./logs', profile_batch=5)
model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback])
# Profile a range of batches, e.g. from 10 to 20.
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir='./logs', profile_batch=(10,20))
model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback])
```
"""
def __init__(
self,
log_dir="logs",
histogram_freq=0,
write_graph=True,
write_images=False,
write_steps_per_second=False,
update_freq="epoch",
profile_batch=0,
embeddings_freq=0,
embeddings_metadata=None,
**kwargs,
):
super().__init__()
self._supports_tf_logs = True
self._validate_kwargs(kwargs)
self.log_dir = io_utils.path_to_string(log_dir)
self.histogram_freq = histogram_freq
self.write_graph = write_graph
self.write_images = write_images
self.write_steps_per_second = write_steps_per_second
self.update_freq = 1 if update_freq == "batch" else update_freq
self.embeddings_freq = embeddings_freq
self.embeddings_metadata = embeddings_metadata
self._init_profile_batch(profile_batch)
self._global_train_batch = 0
self._previous_epoch_iterations = 0
self._train_accumulated_time = 0
self._batch_start_time = 0
# Lazily initialized in order to avoid creating event files when
# not needed.
self._writers = {}
# Used to restore any existing `SummaryWriter` after training ends.
self._prev_summary_state = []
def _validate_kwargs(self, kwargs):
"""Handle arguments were supported in V1."""
if kwargs.get("write_grads", False):
logging.warning(
"`write_grads` will be ignored in TensorFlow 2.0 "
"for the `TensorBoard` Callback."
)
if kwargs.get("batch_size", False):
logging.warning(
"`batch_size` is no longer needed in the "
"`TensorBoard` Callback and will be ignored "
"in TensorFlow 2.0."
)
if kwargs.get("embeddings_layer_names", False):
logging.warning(
"`embeddings_layer_names` is not supported in "
"TensorFlow 2.0. Instead, all `Embedding` layers "
"will be visualized."
)
if kwargs.get("embeddings_data", False):
logging.warning(
"`embeddings_data` is not supported in TensorFlow "
"2.0. Instead, all `Embedding` variables will be "
"visualized."
)
supported_kwargs = {
"write_grads",
"embeddings_layer_names",
"embeddings_data",
"batch_size",
}
unrecognized_kwargs = set(kwargs.keys()) - supported_kwargs
# Only allow kwargs that were supported in V1.
if unrecognized_kwargs:
raise ValueError(
"Unrecognized arguments in `TensorBoard` Callback: "
f"{unrecognized_kwargs}. "
f"Supported kwargs are: {supported_kwargs}"
)
def set_model(self, model):
"""Sets TF-Keras model and writes graph if specified."""
self.model = model
self._log_write_dir = self._get_log_write_dir()
self._train_dir = os.path.join(self._log_write_dir, "train")
self._train_step = self.model._train_counter
self._val_dir = os.path.join(self._log_write_dir, "validation")
self._val_step = self.model._test_counter
self._writers = {} # Resets writers.
self._should_write_train_graph = False
if self.write_graph:
self._write_keras_model_summary()
self._should_write_train_graph = True
if self.embeddings_freq:
self._configure_embeddings()
@property
def _train_writer(self):
if "train" not in self._writers:
self._writers["train"] = tf.summary.create_file_writer(
self._train_dir
)
return self._writers["train"]
@property
def _val_writer(self):
if "val" not in self._writers:
self._writers["val"] = tf.summary.create_file_writer(self._val_dir)
return self._writers["val"]
def _get_log_write_dir(self):
"""For multi-worker, only chief should write, others write to '/tmp'."""
return distributed_file_utils.write_dirpath(
self.log_dir, self.model.distribute_strategy
)
def _delete_tmp_write_dir(self):
"""Deletes tmp write directories for multi-worker."""
distributed_file_utils.remove_temp_dirpath(
self.log_dir, self.model.distribute_strategy
)
def _write_keras_model_train_graph(self):
"""Writes TF-Keras model train_function graph to TensorBoard."""
with self._train_writer.as_default():
with tf.summary.record_if(True):
train_fn = self.model.train_tf_function
# If the train_function is a `tf.function`, we can write out a
# graph
if hasattr(train_fn, "function_spec"):
tf.summary.graph(
train_fn._concrete_variable_creation_fn.graph
)
def _write_keras_model_summary(self):
"""Writes TF-Keras graph network summary to TensorBoard."""
with self._train_writer.as_default():
with tf.summary.record_if(True):
summary_writable = (
self.model._is_graph_network
or self.model.__class__.__name__ == "Sequential"
)
if summary_writable:
keras_model_summary("keras", self.model, step=0)
def _configure_embeddings(self):
"""Configure the Projector for embeddings."""
# TODO(omalleyt): Add integration tests.
from tf_keras.layers import core
from tf_keras.protobuf import projector_config_pb2
# isort: off
from google.protobuf import text_format
config = projector_config_pb2.ProjectorConfig()
for layer in self.model.layers:
if isinstance(layer, core.Embedding):
embedding = config.embeddings.add()
# Embeddings are always the first layer, so this naming should
# be consistent in any keras models checkpoints.
name = (
"layer_with_weights-0/embeddings/.ATTRIBUTES/VARIABLE_VALUE"
)
embedding.tensor_name = name
if self.embeddings_metadata is not None:
if isinstance(self.embeddings_metadata, str):
embedding.metadata_path = self.embeddings_metadata
else:
if layer.name in self.embeddings_metadata.keys():
embedding.metadata_path = (
self.embeddings_metadata.pop(layer.name)
)
if self.embeddings_metadata and not isinstance(
self.embeddings_metadata, str
):
raise ValueError(
"Unrecognized `Embedding` layer names passed to "
"`keras.callbacks.TensorBoard` `embeddings_metadata` "
f"argument: {self.embeddings_metadata.keys()}"
)
config_pbtxt = text_format.MessageToString(config)
path = os.path.join(self._log_write_dir, "projector_config.pbtxt")
with tf.io.gfile.GFile(path, "w") as f:
f.write(config_pbtxt)
def _push_writer(self, writer, step):
"""Sets the default writer for custom batch-level summaries."""
if self.update_freq == "epoch":
return
should_record = lambda: tf.equal(step % self.update_freq, 0)
# TODO(b/151339474): Fix deadlock when not using .value() here.
summary_context = (
writer.as_default(step.value()),
tf.summary.record_if(should_record),
)
self._prev_summary_state.append(summary_context)
summary_context[0].__enter__()
summary_context[1].__enter__()
def _pop_writer(self):
"""Pops the current writer."""
if self.update_freq == "epoch":
return
# See _push_writer for the content of the previous_context, which is
# pair of context.
previous_context = self._prev_summary_state.pop()
previous_context[1].__exit__(*sys.exc_info())
previous_context[0].__exit__(*sys.exc_info())
def _close_writers(self):
for writer in self._writers.values():
writer.close()
def _init_profile_batch(self, profile_batch):
"""Validate profile_batch value and set the range of batches to profile.
Sets values of _start_batch and _stop_batch attributes,
specifying the start and stop batch to profile.
Setting `profile_batch=0` disables profiling.
Args:
profile_batch: The range of batches to profile. Should be a
non-negative integer or a comma separated string of pair of positive
integers. A pair of positive integers signify a range of batches to
profile.
Raises:
ValueError: If profile_batch is not an integer or a comma separated
pair of positive integers.
"""
profile_batch_error_message = (
"profile_batch must be a non-negative integer or "
"2-tuple of positive "
"integers. A pair of positive integers "
"signifies a range of batches "
f"to profile. Found: {profile_batch}"
)
# Support legacy way of specifying "start,stop" or "start" as str.
if isinstance(profile_batch, str):
profile_batch = str(profile_batch).split(",")
profile_batch = tf.nest.map_structure(int, profile_batch)
if isinstance(profile_batch, int):
self._start_batch = profile_batch
self._stop_batch = profile_batch
elif (
isinstance(profile_batch, (tuple, list)) and len(profile_batch) == 2
):
self._start_batch, self._stop_batch = profile_batch
else:
raise ValueError(profile_batch_error_message)
if self._start_batch < 0 or self._stop_batch < self._start_batch:
raise ValueError(profile_batch_error_message)
# True when the profiler was successfully started by this callback.
# We track the status here to make sure callbacks do not interfere with
# each other. The callback will only stop the profiler it started.
self._profiler_started = False
if self._start_batch > 0:
# Warm up and improve the profiling accuracy.
self._start_profiler(logdir="")
self._stop_profiler(save=False)
# True when a trace is running.
self._is_tracing = False
# Setting `profile_batch=0` disables profiling.
self._should_trace = not (
self._start_batch == 0 and self._stop_batch == 0
)
def on_train_begin(self, logs=None):
self._global_train_batch = 0
self._previous_epoch_iterations = 0
self._push_writer(self._train_writer, self._train_step)
def on_train_end(self, logs=None):
self._pop_writer()
if self._is_tracing:
self._stop_trace()
self._close_writers()
self._delete_tmp_write_dir()
def on_test_begin(self, logs=None):
self._push_writer(self._val_writer, self._val_step)
def on_test_end(self, logs=None):
if self.model.optimizer and hasattr(self.model.optimizer, "iterations"):
with tf.summary.record_if(True), self._val_writer.as_default():
for name, value in logs.items():
tf.summary.scalar(
"evaluation_" + name + "_vs_iterations",
value,
step=self.model.optimizer.iterations.read_value(),
)
self._pop_writer()
def _implements_train_batch_hooks(self):
# Only call batch hooks when tracing or write_steps_per_second are
# enabled
return self._should_trace or self.write_steps_per_second
def on_train_batch_begin(self, batch, logs=None):
self._global_train_batch += 1
if self.write_steps_per_second:
self._batch_start_time = time.time()
if not self._should_trace:
return
if self._global_train_batch == self._start_batch:
self._start_trace()
def on_train_batch_end(self, batch, logs=None):
if self._should_write_train_graph:
self._write_keras_model_train_graph()
self._should_write_train_graph = False
if self.write_steps_per_second:
batch_run_time = time.time() - self._batch_start_time
tf.summary.scalar(
"batch_steps_per_second",
1.0 / batch_run_time,
step=self._train_step,
)
# `logs` isn't necessarily always a dict. For example, when using
# `tf.distribute.experimental.ParameterServerStrategy`, a
# `tf.distribute.experimental.coordinator.RemoteValue` will be passed.
# For now, we just disable `update_freq` in those cases.
if isinstance(logs, dict):
for name, value in logs.items():
tf.summary.scalar("batch_" + name, value, step=self._train_step)
if not self._should_trace:
return
if self._is_tracing and self._global_train_batch >= self._stop_batch:
self._stop_trace()
def on_epoch_begin(self, epoch, logs=None):
# Keeps track of epoch for profiling.
if self.write_steps_per_second:
self._previous_epoch_iterations = (
self.model.optimizer.iterations.numpy()
)
self._epoch_start_time = time.time()
def on_epoch_end(self, epoch, logs=None):
"""Runs metrics and histogram summaries at epoch end."""
self._log_epoch_metrics(epoch, logs)
if self.histogram_freq and epoch % self.histogram_freq == 0:
self._log_weights(epoch)
if self.embeddings_freq and epoch % self.embeddings_freq == 0:
self._log_embeddings(epoch)
def _start_trace(self):
tf.summary.trace_on(graph=True, profiler=False)
self._start_profiler(logdir=self.log_dir)
self._is_tracing = True
def _stop_trace(self, batch=None):
"""Logs the trace graph to TensorBoard."""
if batch is None:
batch = self._stop_batch
with self._train_writer.as_default():
with tf.summary.record_if(True):
# TODO(b/126388999): Remove step info in the summary name.
tf.summary.trace_export(name="batch_%d" % batch, step=batch)
self._stop_profiler()
self._is_tracing = False
def _collect_learning_rate(self, logs):
if isinstance(self.model.optimizer, optimizer.Optimizer):
lr_schedule = getattr(self.model.optimizer, "_learning_rate", None)
else:
lr_schedule = getattr(self.model.optimizer, "lr", None)
if isinstance(lr_schedule, learning_rate_schedule.LearningRateSchedule):
logs["learning_rate"] = lr_schedule(self.model.optimizer.iterations)
return logs
def _compute_steps_per_second(self):
current_iteration = self.model.optimizer.iterations.numpy()
time_since_epoch_begin = time.time() - self._epoch_start_time
steps_per_second = (
current_iteration - self._previous_epoch_iterations
) / time_since_epoch_begin
return steps_per_second
def _log_epoch_metrics(self, epoch, logs):
"""Writes epoch metrics out as scalar summaries.
Args:
epoch: Int. The global step to use for TensorBoard.
logs: Dict. Keys are scalar summary names, values are scalars.
"""
if not logs:
return
train_logs = dict()
val_logs = dict()
for k, v in logs.items():
if k.startswith("val_"):
val_logs[k] = v
else:
train_logs[k] = v
train_logs = self._collect_learning_rate(train_logs)
if self.write_steps_per_second:
train_logs["steps_per_second"] = self._compute_steps_per_second()
with tf.summary.record_if(True):
if train_logs:
with self._train_writer.as_default():
for name, value in train_logs.items():
tf.summary.scalar("epoch_" + name, value, step=epoch)
if val_logs:
with self._val_writer.as_default():
for name, value in val_logs.items():
name = name[4:] # Remove 'val_' prefix.
tf.summary.scalar("epoch_" + name, value, step=epoch)
def _log_weights(self, epoch):
"""Logs the weights of the Model to TensorBoard."""
with self._train_writer.as_default():
with tf.summary.record_if(True):
for layer in self.model.layers:
for weight in layer.weights:
weight_name = weight.name.replace(":", "_")
# Add a suffix to prevent summary tag name collision.
histogram_weight_name = weight_name + "/histogram"
tf.summary.histogram(
histogram_weight_name, weight, step=epoch
)
if self.write_images:
# Add a suffix to prevent summary tag name
# collision.
image_weight_name = weight_name + "/image"
self._log_weight_as_image(
weight, image_weight_name, epoch
)
self._train_writer.flush()
def _log_weight_as_image(self, weight, weight_name, epoch):
"""Logs a weight as a TensorBoard image."""
w_img = tf.squeeze(weight)
shape = backend.int_shape(w_img)
if len(shape) == 1: # Bias case
w_img = tf.reshape(w_img, [1, shape[0], 1, 1])
elif len(shape) == 2: # Dense layer kernel case
if shape[0] > shape[1]:
w_img = tf.transpose(w_img)
shape = backend.int_shape(w_img)
w_img = tf.reshape(w_img, [1, shape[0], shape[1], 1])
elif len(shape) == 3: # ConvNet case
if backend.image_data_format() == "channels_last":
# Switch to channels_first to display every kernel as a separate
# image.
w_img = tf.transpose(w_img, perm=[2, 0, 1])
shape = backend.int_shape(w_img)
w_img = tf.reshape(w_img, [shape[0], shape[1], shape[2], 1])
shape = backend.int_shape(w_img)
# Not possible to handle 3D convnets etc.
if len(shape) == 4 and shape[-1] in [1, 3, 4]:
tf.summary.image(weight_name, w_img, step=epoch)
def _log_embeddings(self, epoch):
embeddings_ckpt = os.path.join(
self._log_write_dir,
"train",
f"keras_embedding.ckpt-{epoch}",
)
self.model.save_weights(embeddings_ckpt)
def _start_profiler(self, logdir):
"""Starts the profiler if currently inactive.
Args:
logdir: Directory where profiler results will be saved.
"""
if self._profiler_started:
return
try:
tf.profiler.experimental.start(logdir=logdir)
self._profiler_started = True
except tf.errors.AlreadyExistsError as e:
# Profiler errors should not be fatal.
logging.error("Failed to start profiler: %s", e.message)
def _stop_profiler(self, save=True):
"""Stops the profiler if currently active.
Args:
save: Whether to save the profiler results to TensorBoard.
"""
if not self._profiler_started:
return
try:
tf.profiler.experimental.stop(save=save)
except tf.errors.UnavailableError as e:
# Profiler errors should not be fatal.
logging.error("Failed to stop profiler: %s", e.message)
finally:
self._profiler_started = False
@keras_export("keras.callbacks.ReduceLROnPlateau")
class ReduceLROnPlateau(Callback):
"""Reduce learning rate when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor
of 2-10 once learning stagnates. This callback monitors a
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate is reduced.
Example:
```python
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
model.fit(X_train, Y_train, callbacks=[reduce_lr])
```
Args:
monitor: quantity to be monitored.
factor: factor by which the learning rate will be reduced.
`new_lr = lr * factor`.
patience: number of epochs with no improvement after which learning rate
will be reduced.
verbose: int. 0: quiet, 1: update messages.
mode: one of `{'auto', 'min', 'max'}`. In `'min'` mode,
the learning rate will be reduced when the
quantity monitored has stopped decreasing; in `'max'` mode it will be
reduced when the quantity monitored has stopped increasing; in
`'auto'` mode, the direction is automatically inferred from the name
of the monitored quantity.
min_delta: threshold for measuring the new optimum, to only focus on
significant changes.
cooldown: number of epochs to wait before resuming normal operation
after lr has been reduced.
min_lr: lower bound on the learning rate.
"""
def __init__(
self,
monitor="val_loss",
factor=0.1,
patience=10,
verbose=0,
mode="auto",
min_delta=1e-4,
cooldown=0,
min_lr=0,
**kwargs,
):
super().__init__()
self.monitor = monitor
if factor >= 1.0:
raise ValueError(
"ReduceLROnPlateau does not support "
f"a factor >= 1.0. Got {factor}"
)
if "epsilon" in kwargs:
min_delta = kwargs.pop("epsilon")
logging.warning(
"`epsilon` argument is deprecated and "
"will be removed, use `min_delta` instead."
)
self.factor = factor
self.min_lr = min_lr
self.min_delta = min_delta
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0 # Cooldown counter.
self.wait = 0
self.best = 0
self.mode = mode
self.monitor_op = None
self._reset()
def _reset(self):
"""Resets wait counter and cooldown counter."""
if self.mode not in ["auto", "min", "max"]:
logging.warning(
"Learning rate reduction mode %s is unknown, "
"fallback to auto mode.",
self.mode,
)
self.mode = "auto"
if self.mode == "min" or (
self.mode == "auto" and "acc" not in self.monitor
):
self.monitor_op = lambda a, b: np.less(a, b - self.min_delta)
self.best = np.Inf
else:
self.monitor_op = lambda a, b: np.greater(a, b + self.min_delta)
self.best = -np.Inf
self.cooldown_counter = 0
self.wait = 0
def on_train_begin(self, logs=None):
self._reset()
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs["lr"] = backend.get_value(self.model.optimizer.lr)
current = logs.get(self.monitor)
if current is None:
logging.warning(
"Learning rate reduction is conditioned on metric `%s` "
"which is not available. Available metrics are: %s",
self.monitor,
",".join(list(logs.keys())),
)
else:
if self.in_cooldown():
self.cooldown_counter -= 1
self.wait = 0
if self.monitor_op(current, self.best):
self.best = current
self.wait = 0
elif not self.in_cooldown():
self.wait += 1
if self.wait >= self.patience:
old_lr = backend.get_value(self.model.optimizer.lr)
if old_lr > np.float32(self.min_lr):
new_lr = old_lr * self.factor
new_lr = max(new_lr, self.min_lr)
backend.set_value(self.model.optimizer.lr, new_lr)
if self.verbose > 0:
io_utils.print_msg(
f"\nEpoch {epoch +1}: "
"ReduceLROnPlateau reducing "
f"learning rate to {new_lr}."
)
self.cooldown_counter = self.cooldown
self.wait = 0
def in_cooldown(self):
return self.cooldown_counter > 0
@keras_export("keras.callbacks.CSVLogger")
class CSVLogger(Callback):
"""Callback that streams epoch results to a CSV file.
Supports all values that can be represented as a string,
including 1D iterables such as `np.ndarray`.
Example:
```python
csv_logger = CSVLogger('training.log')
model.fit(X_train, Y_train, callbacks=[csv_logger])
```
Args:
filename: Filename of the CSV file, e.g. `'run/log.csv'`.
separator: String used to separate elements in the CSV file.
Separator string ("delimiter") must be a 1-character string.
append: Boolean. True: append if file exists (useful for continuing
training). False: overwrite existing file.
"""
def __init__(self, filename, separator=",", append=False):
self.sep = separator
self.filename = io_utils.path_to_string(filename)
self.append = append
self.writer = None
self.keys = None
self.append_header = True
super().__init__()
def on_train_begin(self, logs=None):
if self.append:
if tf.io.gfile.exists(self.filename):
with tf.io.gfile.GFile(self.filename, "r") as f:
self.append_header = not bool(len(f.readline()))
mode = "a"
else:
mode = "w"
self.csv_file = tf.io.gfile.GFile(self.filename, mode)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
def handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, str):
return k
elif (
isinstance(k, collections.abc.Iterable)
and not is_zero_dim_ndarray
):
return f"\"[{', '.join(map(str, k))}]\""
else:
return k
if self.keys is None:
self.keys = sorted(logs.keys())
# When validation_freq > 1, `val_` keys are not in first epoch logs
# Add the `val_` keys so that its part of the fieldnames of writer.
val_keys_found = False
for key in self.keys:
if key.startswith("val_"):
val_keys_found = True
break
if not val_keys_found:
self.keys.extend(["val_" + k for k in self.keys])
if not self.writer:
class CustomDialect(csv.excel):
delimiter = self.sep
fieldnames = ["epoch"] + self.keys
self.writer = csv.DictWriter(
self.csv_file, fieldnames=fieldnames, dialect=CustomDialect
)
if self.append_header:
self.writer.writeheader()
row_dict = collections.OrderedDict({"epoch": epoch})
row_dict.update(
(key, handle_value(logs.get(key, "NA"))) for key in self.keys
)
self.writer.writerow(row_dict)
self.csv_file.flush()
def on_train_end(self, logs=None):
self.csv_file.close()
self.writer = None
@keras_export("keras.callbacks.LambdaCallback")
class LambdaCallback(Callback):
r"""Callback for creating simple, custom callbacks on-the-fly.
This callback is constructed with anonymous functions that will be called
at the appropriate time (during `Model.{fit | evaluate | predict}`).
Note that the callbacks expects positional arguments, as:
- `on_epoch_begin` and `on_epoch_end` expect two positional arguments:
`epoch`, `logs`
- `on_batch_begin` and `on_batch_end` expect two positional arguments:
`batch`, `logs`
- `on_train_begin` and `on_train_end` expect one positional argument:
`logs`
Args:
on_epoch_begin: called at the beginning of every epoch.
on_epoch_end: called at the end of every epoch.
on_batch_begin: called at the beginning of every batch.
on_batch_end: called at the end of every batch.
on_train_begin: called at the beginning of model training.
on_train_end: called at the end of model training.
Example:
```python
# Print the batch number at the beginning of every batch.
batch_print_callback = LambdaCallback(
on_batch_begin=lambda batch,logs: print(batch))
# Stream the epoch loss to a file in JSON format. The file content
# is not well-formed JSON but rather has a JSON object per line.
import json
json_log = open('loss_log.json', mode='wt', buffering=1)
json_logging_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: json_log.write(
json.dumps({'epoch': epoch, 'loss': logs['loss']}) + '\n'),
on_train_end=lambda logs: json_log.close()
)
# Terminate some processes after having finished model training.
processes = ...
cleanup_callback = LambdaCallback(
on_train_end=lambda logs: [
p.terminate() for p in processes if p.is_alive()])
model.fit(...,
callbacks=[batch_print_callback,
json_logging_callback,
cleanup_callback])
```
"""
def __init__(
self,
on_epoch_begin=None,
on_epoch_end=None,
on_batch_begin=None,
on_batch_end=None,
on_train_begin=None,
on_train_end=None,
**kwargs,
):
super().__init__()
self.__dict__.update(kwargs)
if on_epoch_begin is not None:
self.on_epoch_begin = on_epoch_begin
if on_epoch_end is not None:
self.on_epoch_end = on_epoch_end
if on_batch_begin is not None:
self.on_batch_begin = on_batch_begin
if on_batch_end is not None:
self.on_batch_end = on_batch_end
if on_train_begin is not None:
self.on_train_begin = on_train_begin
if on_train_end is not None:
self.on_train_end = on_train_end
@keras_export("keras.callbacks.experimental.UpdateEmbeddingCallback")
class UpdateEmbeddingCallback(TimedThread, Callback):
"""A callback to update the DynamicEmbedding layer at specific time
interval.
Updating the embedding matrix would mean that the optimizer variables will
be reset in this callback and this could have potential side effects. This
means that any existing slot variables associated with the optimizer will
likely be discarded when the optimizer is rebuilt. This affects optimizers
that rely on states of optimizer slot variables.
Example:
```
# Generate dummy data
train_data = np.array([
['a', 'j', 'c', 'd', 'e'],
['a', 'h', 'i', 'j', 'b'],
['i', 'h', 'c', 'j', 'e'],
])
train_labels = np.array([0, 1, 2])
vocab = tf.constant(['a', 'b', 'c', 'd', 'e'])
eviction_policy = 'LFU'
# Define the model
model = tf.keras.models.Sequential([
DynamicEmbedding(
input_dim=5,
output_dim=2,
input_length=5,
eviction_policy=eviction_policy,
initial_vocabulary=vocab,
),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(3, activation='softmax'),
])
# Compile the model
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
)
# update the vocabulary every 1 second
update_embedding_callback = UpdateEmbeddingCallback(
model.layers[0], interval=1
)
with update_embedding_callback:
result = model.fit(
train_data,
train_labels,
epochs=100,
batch_size=1,
callbacks=[update_embedding_callback],
)
```
"""
def __init__(self, dynamic_embedding_layer, interval):
"""Initialize Timed Callback object.
Args:
dynamic_embedding_layer: The dynamic embedding
layer to be updated.
interval: the interval, in seconds, to wait between calls to the
thread function. The thread function here updates the embeddings
matrix and resets the optimizer states.
"""
self._epoch = 0
TimedThread.__init__(self, interval)
Callback.__init__(self)
self._dynamic_embedding_layer = dynamic_embedding_layer
self.strategy = tf.distribute.get_strategy()
def on_interval(self):
try:
critical_section = tf.CriticalSection()
# Using `tf.CriticalSection` when updating embeddings using timed
# thread can help ensure thread safety and prevent race conditions
# in the shared variables.
def execute_critical_section():
critical_section.execute(
lambda: self._dynamic_embedding_layer.update_embeddings(
self.strategy # pylint: disable=g-long-lambda
)
)
# update embeddings across all devices if distributed training is
# used
self.strategy.run(execute_critical_section)
# update optimizer variables across all devices if distributed
# training is used.
self.strategy.run(
lambda: self._reset_optimizer()
) # pylint: disable=unnecessary-lambda
except AttributeError:
logging.info(
"Time interval specified to the UpdateEmbeddingCallback may be"
" too small, please try increasing the value of `interval`."
)
def _reset_optimizer(self):
"""Resetting the optimizer variables.
Resetting the optimizer variables is necessary after updating the
variable in the layer. This ensures that the optimizer is working with a
consistent internal state. This helps to prevent unexpected behavior and
can lead to more stable and faster training of the model.
"""
for var in self.model.optimizer.variables():
if "dynamic_embedding" in var.name:
backend.set_value(var, backend.zeros_like(var))
def on_epoch_begin(self, epoch, logs=None):
self._epoch = epoch
|
tf-keras/tf_keras/callbacks.py/0
|
{
"file_path": "tf-keras/tf_keras/callbacks.py",
"repo_id": "tf-keras",
"token_count": 59353
}
| 222 |
# Description:
# keras/distribute package is intended to serve as the centralized place for things
# related to dist-strat used by TF-Keras..
# Placeholder: load unaliased py_library
load("@org_keras//tf_keras:tf_keras.bzl", "cuda_py_test")
load("@org_keras//tf_keras:tf_keras.bzl", "tf_py_test") # buildifier: disable=same-origin-load
load("@org_keras//tf_keras:tf_keras.bzl", "distribute_py_test")
package(
# copybara:uncomment default_applicable_licenses = ["//tf_keras:license"],
# TODO(scottzhu): Remove this deps when distribute test are converted to integration test.
default_visibility = [
"//tf_keras:friends",
"//third_party/tensorflow/python/distribute:__pkg__",
"//third_party/tensorflow/tools/pip_package:__pkg__",
],
licenses = ["notice"],
)
py_library(
name = "distribute",
srcs = [
"__init__.py",
"distributed_training_utils.py",
"distributed_training_utils_v1.py",
],
srcs_version = "PY3",
deps = [
":distribute_coordinator_utils",
"//:expect_tensorflow_installed",
"//tf_keras:backend",
"//tf_keras:callbacks",
"//tf_keras:callbacks_v1",
"//tf_keras:constraints",
"//tf_keras:losses",
"//tf_keras:regularizers",
"//tf_keras/initializers",
"//tf_keras/mixed_precision:policy",
"//tf_keras/optimizers",
"//tf_keras/utils:engine_utils",
"//tf_keras/utils:mode_keys",
],
)
py_library(
name = "distribute_test_lib_pip",
srcs_version = "PY3",
deps = [
":dataset_creator_model_fit_test_base",
":distribute_strategy_test_lib",
":keras_correctness_test_lib",
":keras_test_lib",
":model_combinations",
":multi_worker_testing_utils",
":saved_model_test_base",
":test_example",
],
)
py_library(
name = "optimizer_combinations",
srcs = ["optimizer_combinations.py"],
srcs_version = "PY3",
deps = [
"//:expect_tensorflow_installed",
"//tf_keras/optimizers",
"//tf_keras/optimizers/legacy:optimizers",
],
)
py_library(
name = "worker_training_state",
srcs = [
"worker_training_state.py",
],
srcs_version = "PY3",
deps = [
":distributed_file_utils",
"//:expect_tensorflow_installed",
"//tf_keras:backend",
"//tf_keras/utils:mode_keys",
],
)
py_library(
name = "model_collection_base",
srcs = ["model_collection_base.py"],
srcs_version = "PY3",
)
py_library(
name = "model_combinations",
srcs = ["model_combinations.py"],
srcs_version = "PY3",
deps = [
":simple_models",
"//:expect_tensorflow_installed",
],
)
py_library(
name = "simple_models",
srcs = ["simple_models.py"],
srcs_version = "PY3",
deps = [
":model_collection_base",
"//:expect_tensorflow_installed",
"//tf_keras",
],
)
py_library(
name = "saved_model_test_base",
srcs = ["saved_model_test_base.py"],
srcs_version = "PY3",
deps = [
":model_combinations",
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras/testing_infra:test_utils",
],
)
cuda_py_test(
name = "model_checkpoint_test",
srcs = ["model_checkpoint_test.py"],
python_version = "PY3",
shard_count = 4,
deps = [
":multi_worker_testing_utils",
":worker_training_state",
"//:expect_tensorflow_installed",
"//tf_keras",
],
)
cuda_py_test(
name = "worker_training_state_test",
srcs = ["worker_training_state_test.py"],
python_version = "PY3",
shard_count = 4,
deps = [
":multi_worker_testing_utils",
":worker_training_state",
"//:expect_tensorflow_installed",
"//tf_keras",
],
)
distribute_py_test(
name = "checkpointing_test",
srcs = ["checkpointing_test.py"],
main = "checkpointing_test.py",
tags = [
"multi_and_single_gpu",
"nomultivm", # TODO(b/170502145)
],
deps = [
"//:expect_tensorflow_installed",
"//tf_keras/optimizers/legacy:optimizers",
],
)
cuda_py_test(
name = "collective_all_reduce_strategy_test",
srcs = ["collective_all_reduce_strategy_test.py"],
python_version = "PY3",
tags = [
"multi_and_single_gpu",
"nomsan", # TODO(b/162894966)
"notsan", # TODO(b/171040408): data race
],
# b/155301154 broken with XLA:GPU
xla_enable_strict_auto_jit = True,
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_portpicker_installed",
"//:expect_tensorflow_installed",
"//tf_keras/engine",
"//tf_keras/layers",
"//tf_keras/mixed_precision:policy",
"//tf_keras/mixed_precision:test_util",
"//tf_keras/testing_infra:test_utils",
],
)
distribute_py_test(
name = "ctl_correctness_test",
srcs = ["ctl_correctness_test.py"],
env = {
"CUDA_MODULE_LOADING": "LAZY",
},
main = "ctl_correctness_test.py",
shard_count = 10,
tags = [
"multi_and_single_gpu",
"no_cuda_asan", # times out
"nomultivm", # TODO(b/170502145)
],
deps = [
":optimizer_combinations",
":strategy_combinations",
"//:expect_portpicker_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_utils",
],
)
distribute_py_test(
name = "custom_training_loop_metrics_test",
srcs = ["custom_training_loop_metrics_test.py"],
disable_mlir_bridge = False,
main = "custom_training_loop_metrics_test.py",
tags = [
"multi_and_single_gpu",
"nomultivm", # TODO(b/170502145)
],
deps = [
":strategy_combinations",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_portpicker_installed",
"//:expect_tensorflow_installed",
"//tf_keras/metrics",
],
)
distribute_py_test(
name = "custom_training_loop_models_test",
srcs = ["custom_training_loop_models_test.py"],
main = "custom_training_loop_models_test.py",
tags = [
"multi_and_single_gpu",
"no_cuda_asan", # times out
"nomultivm", # TODO(b/170502145)
"notsan", # TODO(b/170954243)
],
tpu_tags = [
"no_oss", # b/153615544.
"notsan", # TODO(b/170869466)
],
deps = [
":strategy_combinations",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_portpicker_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
],
)
distribute_py_test(
name = "custom_training_loop_optimizer_test",
srcs = ["custom_training_loop_optimizer_test.py"],
disable_mlir_bridge = False,
main = "custom_training_loop_optimizer_test.py",
tags = [
"multi_and_single_gpu",
"nomultivm", # TODO(b/170502145)
],
deps = [
":strategy_combinations",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras/optimizers/legacy:optimizers",
],
)
py_library(
name = "distribute_strategy_test_lib",
srcs = [
"distribute_strategy_test.py",
],
srcs_version = "PY3",
deps = [
":multi_worker_testing_utils",
":optimizer_combinations",
":strategy_combinations",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
distribute_py_test(
name = "keras_premade_models_test",
size = "medium",
srcs = ["keras_premade_models_test.py"],
disable_mlir_bridge = False,
env = {
"CUDA_MODULE_LOADING": "LAZY",
},
full_precision = True,
main = "keras_premade_models_test.py",
shard_count = 8,
tags = [
"multi_and_single_gpu",
"nomultivm", # TODO(b/170502145)
"requires-mem:28g", # spawns multiple processes.
],
deps = [
":distribute_strategy_test_lib",
":keras_correctness_test_lib",
"//:expect_portpicker_installed",
],
)
distribute_py_test(
name = "distribute_strategy_test",
size = "medium",
srcs = ["distribute_strategy_test.py"],
disable_mlir_bridge = True, # TODO(b/170352626)
full_precision = True,
main = "distribute_strategy_test.py",
python_version = "PY3",
shard_count = 20,
tags = [
"multi_and_single_gpu",
"no_cuda_asan", # TODO(b/182391774)
"no_oss", # TODO(b/191770103)
"no_rocm", # times out on ROCm
"no_windows_gpu",
"noguitar", # TODO(b/172354344)
"nomultivm", # TODO(b/170502145)
"notpu", # b/188061768
"notsan",
],
tpu_tags = [
"no_oss", # b/155502591
],
deps = [
":distribute_strategy_test_lib",
":optimizer_combinations",
"//:expect_portpicker_installed",
],
)
distribute_py_test(
name = "distributed_training_utils_test",
srcs = ["distributed_training_utils_test.py"],
disable_mlir_bridge = False,
full_precision = True,
main = "distributed_training_utils_test.py",
tags = [
"nomultivm", # TODO(b/170502145)
],
deps = [
":distribute",
":distribute_strategy_test_lib",
"//:expect_tensorflow_installed",
"//tf_keras:callbacks",
],
)
py_library(
name = "keras_correctness_test_lib",
srcs = [
"keras_correctness_test_base.py",
"keras_dnn_correctness_test.py",
"keras_embedding_model_correctness_test.py",
"keras_image_model_correctness_test.py",
"keras_rnn_model_correctness_test.py",
"keras_stateful_lstm_model_correctness_test.py",
],
srcs_version = "PY3",
deps = [
":strategy_combinations",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras:backend",
"//tf_keras/testing_infra:test_combinations",
],
)
distribute_py_test(
name = "keras_dnn_correctness_test",
size = "medium",
srcs = ["keras_dnn_correctness_test.py"],
disable_mlir_bridge = True, # TODO(b/170352626)
full_precision = True,
main = "keras_dnn_correctness_test.py",
# Shard count is set to an odd number to distribute tasks across
# shards more evenly.
shard_count = 19,
tags = [
"multi_and_single_gpu",
"no_oss", # TODO(b/173021094)
"no_rocm", # times out on ROCm
"no_windows_gpu",
"nogpu", # TODO(b/170905292)
"nomultivm", # TODO(b/170502145)
"notap", # TODO(b/178803051): flaky
"notsan",
],
deps = [
":keras_correctness_test_lib",
"//:expect_portpicker_installed",
],
)
distribute_py_test(
name = "keras_embedding_model_correctness_test",
size = "medium",
srcs = ["keras_embedding_model_correctness_test.py"],
disable_mlir_bridge = True, # TODO(b/170352626)
full_precision = True,
main = "keras_embedding_model_correctness_test.py",
shard_count = 8,
tags = [
"broken", # b/170975619
"multi_and_single_gpu",
"no_cuda_asan", # times out
"no_rocm",
"no_windows_gpu",
"nomultivm", # TODO(b/170502145)
"notsan",
],
deps = [
":keras_correctness_test_lib",
"//:expect_portpicker_installed",
],
)
distribute_py_test(
name = "keras_image_model_correctness_test",
size = "medium",
srcs = ["keras_image_model_correctness_test.py"],
disable_mlir_bridge = True, # TODO(b/170352626)
full_precision = True,
main = "keras_image_model_correctness_test.py",
shard_count = 16,
tags = [
"multi_and_single_gpu",
"no_rocm", # times out on ROCm
"no_windows_gpu",
"noasan", # TODO(b/337374867) fails with -fsanitize=null
"nomultivm", # TODO(b/170502145)
"notpu", # TODO(b/210148661)
"notsan",
],
xla_enable_strict_auto_jit = False, # Tensorflow also fails.
deps = [
":keras_correctness_test_lib",
"//:expect_portpicker_installed",
],
)
distribute_py_test(
name = "keras_metrics_test",
srcs = ["keras_metrics_test.py"],
disable_mlir_bridge = False,
env = {
"CUDA_MODULE_LOADING": "LAZY",
},
main = "keras_metrics_test.py",
shard_count = 8,
tags = [
"multi_and_single_gpu",
"nomultivm", # TODO(b/170502145)
],
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras/metrics",
],
)
distribute_py_test(
name = "keras_models_test",
srcs = ["keras_models_test.py"],
main = "keras_models_test.py",
tags = [
"multi_and_single_gpu",
"no_oss", # TODO(b/202850066)
"nomultivm", # TODO(b/170502145)
],
deps = [
":strategy_combinations",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras",
],
)
distribute_py_test(
name = "keras_rnn_model_correctness_test",
size = "medium",
srcs = ["keras_rnn_model_correctness_test.py"],
full_precision = True,
main = "keras_rnn_model_correctness_test.py",
# Shard count is set to an odd number to distribute tasks across
# shards more evenly.
shard_count = 31,
tags = [
"multi_and_single_gpu",
"no_oss", # TODO(b/277925387)
"no_rocm", # Would require size large, but that effectively disables the test for presubmits.
"no_windows_gpu",
"noasan", # TODO(b/337374867) fails with -fsanitize=null
"nomultivm", # TODO(b/170502145)
"notpu", # TODO(b/153672562)
"notsan",
],
deps = [
":keras_correctness_test_lib",
"//:expect_portpicker_installed",
],
)
distribute_py_test(
name = "keras_save_load_test",
size = "medium",
srcs = ["keras_save_load_test.py"],
full_precision = True,
main = "keras_save_load_test.py",
shard_count = 7,
tags = [
"multi_and_single_gpu",
"no_rocm",
"nomultivm", # TODO(b/170502145)
],
deps = [
":saved_model_test_base",
"//tf_keras/saving",
],
)
distribute_py_test(
name = "keras_stateful_lstm_model_correctness_test",
size = "medium",
srcs = ["keras_stateful_lstm_model_correctness_test.py"],
disable_mlir_bridge = False,
full_precision = True,
main = "keras_stateful_lstm_model_correctness_test.py",
shard_count = 4,
tags = [
"multi_and_single_gpu",
"no_pip",
"no_windows_gpu",
"nomultivm", # TODO(b/170502145)
"notsan",
],
deps = [
":keras_correctness_test_lib",
],
)
distribute_py_test(
name = "keras_utils_test",
srcs = ["keras_utils_test.py"],
disable_mlir_bridge = True, # TODO(b/170352626)
full_precision = True,
main = "keras_utils_test.py",
shard_count = 4,
tags = [
"multi_and_single_gpu",
"no_cuda_asan", # times out
"no_pip", # The test imports distribute_strategy_test which is not in the pip package.
"no_windows_gpu",
"nomultivm", # TODO(b/170502145)
"notsan",
],
deps = [
":distribute_strategy_test_lib",
":keras_test_lib",
":optimizer_combinations",
"//:expect_portpicker_installed",
"//:expect_tensorflow_installed",
],
)
py_library(
name = "keras_test_lib",
srcs = [
"keras_utils_test.py",
],
srcs_version = "PY3",
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
],
)
cuda_py_test(
name = "keras_optimizer_v2_test",
srcs = ["keras_optimizer_v2_test.py"],
python_version = "PY3",
shard_count = 4,
tags = [
"multi_and_single_gpu",
"tf_integration_test",
],
deps = [
":keras_test_lib",
],
)
distribute_py_test(
name = "minimize_loss_test",
srcs = ["minimize_loss_test.py"],
main = "minimize_loss_test.py",
tags = [
"multi_and_single_gpu",
"nomultivm", # TODO(b/170502145)
],
deps = [
":optimizer_combinations",
":test_example",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras/layers",
],
)
cuda_py_test(
name = "mirrored_strategy_test",
srcs = ["mirrored_strategy_test.py"],
python_version = "PY3",
tags = [
"multi_and_single_gpu",
"no_windows_gpu", # TODO(b/130551176)
],
deps = [
"//:expect_tensorflow_installed",
"//tf_keras/engine",
"//tf_keras/layers/core",
"//tf_keras/utils:kpl_test_utils",
],
)
cuda_py_test(
name = "mirrored_variable_test",
srcs = ["mirrored_variable_test.py"],
python_version = "PY3",
tags = [
"guitar",
"multi_and_single_gpu",
],
deps = [
"//:expect_tensorflow_installed",
"//tf_keras/layers/core",
"//tf_keras/metrics",
],
)
cuda_py_test(
name = "multi_worker_test",
srcs = ["multi_worker_test.py"],
python_version = "PY3",
shard_count = 2,
tags = [
"multi_and_single_gpu",
"no_oss", # TODO(b/130369494): Investigate why it times out on OSS.
],
deps = [
":multi_worker_testing_utils",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_portpicker_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras:backend",
"//tf_keras:callbacks",
"//tf_keras:engine",
"//tf_keras/optimizers",
"//tf_keras/optimizers/legacy:optimizers",
"//tf_keras/utils:kpl_test_utils",
],
)
tf_py_test(
name = "multi_worker_callback_tf2_test",
srcs = ["multi_worker_callback_tf2_test.py"],
python_version = "PY3",
shard_count = 5,
tags = [
"no_windows", # TODO(b/184424727): Re-enable this.
],
deps = [
":distributed_file_utils",
":multi_worker_testing_utils",
"//:expect_portpicker_installed",
"//:expect_tensorflow_installed",
],
)
py_library(
name = "multi_worker_testing_utils",
srcs = [
"multi_worker_testing_utils.py",
],
srcs_version = "PY3",
deps = [
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/optimizers/legacy:optimizers",
],
)
py_library(
name = "tpu_strategy_test_utils",
srcs = ["tpu_strategy_test_utils.py"],
srcs_version = "PY3",
deps = [
"//:expect_tensorflow_installed",
],
)
distribute_py_test(
name = "saved_model_save_load_test",
size = "medium",
srcs = ["saved_model_save_load_test.py"],
full_precision = True,
main = "saved_model_save_load_test.py",
shard_count = 7,
tags = [
"multi_and_single_gpu",
"no_cuda_asan", # times out
"no_rocm",
"nomultivm", # TODO(b/170502145)
],
deps = [
":saved_model_test_base",
"//:expect_tensorflow_installed",
],
)
distribute_py_test(
name = "saved_model_mixed_api_test",
size = "medium",
srcs = ["saved_model_mixed_api_test.py"],
full_precision = True,
main = "saved_model_mixed_api_test.py",
shard_count = 7,
tags = [
"multi_and_single_gpu",
"no_rocm",
"nomultivm", # TODO(b/170502145)
],
deps = [
":saved_model_test_base",
"//:expect_tensorflow_installed",
"//tf_keras/saving",
],
)
distribute_py_test(
name = "sharded_variable_test",
srcs = ["sharded_variable_test.py"],
python_version = "PY3",
shard_count = 2,
tags = [
"multi_and_single_gpu",
"no_rocm",
"nomultivm", # TODO(b/170502145)
],
deps = [
":multi_worker_testing_utils",
":strategy_combinations",
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras/engine:base_layer",
],
)
distribute_py_test(
name = "parameter_server_evaluation_test",
srcs = ["parameter_server_evaluation_test.py"],
python_version = "PY3",
shard_count = 1,
tags = [
"multi_and_single_gpu",
"no_cuda_asan", # TODO(b/186361027)
"no_oss", # TODO(b/186248973)
"no_tfrt",
"nomultivm", # TODO(b/170502145)
"notpu",
],
deps = [
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_utils",
"//tf_keras/utils:dataset_creator",
],
)
distribute_py_test(
name = "parameter_server_exact_evaluation_test",
srcs = ["parameter_server_exact_evaluation_test.py"],
python_version = "PY3",
shard_count = 29,
tags = [
"multi_and_single_gpu",
"no_cuda_asan", # TODO(b/186361027)
"no_oss", # TODO(b/186248973)
"no_tfrt",
"nomultivm", # TODO(b/170502145)
"notpu",
],
deps = [
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_utils",
"//tf_keras/utils:dataset_creator",
],
)
distribute_py_test(
name = "dataset_creator_model_fit_test",
srcs = ["dataset_creator_model_fit_test.py"],
disable_mlir_bridge = True, # TODO(b/170352626)
disable_tpu_use_tfrt = True, # TODO(b/195081590)
full_precision = True,
main = "dataset_creator_model_fit_test.py",
shard_count = 50,
tags = [
"multi_gpu",
"no_oss", # TODO(b/183640564): Re-enable
"no_rocm",
"nomultivm", # TODO(b/170502145)
"notpu", # TODO(b/210168103)
"notsan", # TODO(b/184542721)
],
deps = [
":dataset_creator_model_fit_test_base",
":strategy_combinations",
"//:expect_portpicker_installed",
"//:expect_tensorflow_installed",
"//tf_keras:callbacks",
"//tf_keras/testing_infra:test_utils",
],
)
distribute_py_test(
name = "dataset_creator_model_fit_ps_only_test",
size = "medium",
srcs = ["dataset_creator_model_fit_ps_only_test.py"],
disable_mlir_bridge = True, # TODO(b/170352626)
full_precision = True,
main = "dataset_creator_model_fit_ps_only_test.py",
shard_count = 21,
tags = [
"multi_gpu",
"no_oss", # TODO(b/183640564): Re-enable
"no_rocm",
"nomultivm", # TODO(b/170502145)
"notsan", # TODO(b/184542721)
],
deps = [
":dataset_creator_model_fit_test_base",
":strategy_combinations",
"//:expect_tensorflow_installed",
"//tf_keras:callbacks",
"//tf_keras/testing_infra:test_utils",
],
)
py_library(
name = "distributed_file_utils",
srcs = [
"distributed_file_utils.py",
],
srcs_version = "PY3",
deps = [
"//:expect_tensorflow_installed",
],
)
tf_py_test(
name = "distributed_file_utils_test",
srcs = ["distributed_file_utils_test.py"],
python_version = "PY3",
srcs_version = "PY3",
deps = [
":distributed_file_utils",
"//:expect_tensorflow_installed",
],
)
py_library(
name = "strategy_combinations",
srcs = ["strategy_combinations.py"],
srcs_version = "PY3",
deps = [
"//:expect_tensorflow_installed",
],
)
py_library(
name = "test_example",
srcs = ["test_example.py"],
srcs_version = "PY3",
deps = [
"//:expect_tensorflow_installed",
"//tf_keras/legacy_tf_layers:layers",
],
)
py_library(
name = "distribute_coordinator_utils",
srcs = [
"distribute_coordinator_utils.py",
],
srcs_version = "PY3",
deps = ["//:expect_tensorflow_installed"],
)
py_library(
name = "dataset_creator_model_fit_test_base",
srcs = [
"dataset_creator_model_fit_test_base.py",
],
srcs_version = "PY3",
deps = [
":multi_worker_testing_utils",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras:callbacks",
"//tf_keras/engine",
"//tf_keras/layers/core",
"//tf_keras/layers/preprocessing:string_lookup",
"//tf_keras/optimizers/legacy:optimizers",
"//tf_keras/utils:dataset_creator",
],
)
|
tf-keras/tf_keras/distribute/BUILD/0
|
{
"file_path": "tf-keras/tf_keras/distribute/BUILD",
"repo_id": "tf-keras",
"token_count": 12479
}
| 223 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to distributed training."""
import contextlib
import tensorflow.compat.v2 as tf
from absl import flags
from tf_keras import backend
FLAGS = flags.FLAGS
# TODO(b/118776054): Currently we support global batch size for TPUStrategy and
# core MirroredStrategy only. Remove this check when contrib MirroredStrategy is
# no longer needed.
def global_batch_size_supported(distribution_strategy):
return distribution_strategy.extended._global_batch_size
def call_replica_local_fn(fn, *args, **kwargs):
"""Call a function that uses replica-local variables.
This function correctly handles calling `fn` in a cross-replica
context.
Args:
fn: The function to call.
*args: Positional arguments to the `fn`.
**kwargs: Keyword argument to `fn`.
Returns:
The result of calling `fn`.
"""
# TODO(b/132666209): Remove this function when we support assign_*
# for replica-local variables.
strategy = None
if "strategy" in kwargs:
strategy = kwargs.pop("strategy")
else:
if tf.distribute.has_strategy():
strategy = tf.distribute.get_strategy()
# TODO(b/120571621): TPUStrategy does not implement replica-local variables.
is_tpu = backend.is_tpu_strategy(strategy)
if (not is_tpu) and strategy and tf.distribute.in_cross_replica_context():
with strategy.scope():
return strategy.extended.call_for_each_replica(fn, args, kwargs)
return fn(*args, **kwargs)
def is_distributed_variable(v):
"""Returns whether `v` is a distributed variable."""
return isinstance(v, tf.distribute.DistributedValues) and isinstance(
v, tf.Variable
)
def get_strategy():
"""Creates a `tf.distribute.Strategy` object from flags.
Example usage:
```python
strategy = utils.get_strategy()
with strategy.scope():
model = tf.keras.Sequential([tf.keras.layers.Dense(10)])
model.compile(...)
train_ds, test_ds = ...
model.fit(train_ds, validation_data=test_ds, epochs=10)
```
Returns:
`tf.distribute.Strategy` instance.
"""
cls = FLAGS.keras_distribute_strategy_class
accepted_strats = {
"tpu",
"multi_worker_mirrored",
"mirrored",
"parameter_server",
"one_device",
}
if cls == "tpu":
tpu_addr = FLAGS.keras_distribute_strategy_tpu_addr
if not tpu_addr:
raise ValueError(
"When using a TPU strategy, you must set the flag "
"`keras_distribute_strategy_tpu_addr` (TPU address)."
)
cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu=tpu_addr
)
tf.config.experimental_connect_to_cluster(cluster_resolver)
tf.tpu.experimental.initialize_tpu_system(cluster_resolver)
strategy = tf.distribute.experimental.TPUStrategy(cluster_resolver)
elif cls == "multi_worker_mirrored":
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
elif cls == "mirrored":
strategy = tf.distribute.MirroredStrategy()
elif cls == "parameter_server":
cluster_resolver = (
tf.distribute.cluster_resolver.TFConfigClusterResolver()
)
strategy = tf.distribute.experimental.ParameterServerStrategy(
cluster_resolver
)
elif cls == "one_device":
strategy = tf.distribute.OneDeviceStrategy("/gpu:0")
else:
raise ValueError(
"Unknown distribution strategy flag. Received: "
f"keras_distribute_strategy_class={cls}. "
f"It should be one of {accepted_strats}"
)
return strategy
def maybe_preemption_handler_scope(model):
if getattr(model, "_preemption_handler", None):
preemption_checkpoint_scope = (
model._preemption_handler.watch_preemption_scope()
)
else:
preemption_checkpoint_scope = contextlib.nullcontext()
return preemption_checkpoint_scope
|
tf-keras/tf_keras/distribute/distributed_training_utils.py/0
|
{
"file_path": "tf-keras/tf_keras/distribute/distributed_training_utils.py",
"repo_id": "tf-keras",
"token_count": 1813
}
| 224 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MirroredStrategy."""
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras.engine import training as keras_training
from tf_keras.layers import core as keras_core
from tf_keras.optimizers.legacy import rmsprop
from tf_keras.utils import kpl_test_utils
# isort: off
from tensorflow.python.eager import backprop
from tensorflow.python.training import (
optimizer as optimizer_lib,
)
class MiniModel(keras_training.Model):
"""Minimal model for mnist.
Useful for testing and debugging on slow TPU simulators.
"""
def __init__(self):
super().__init__(name="")
self.fc = keras_core.Dense(
1, name="fc", kernel_initializer="ones", bias_initializer="ones"
)
def call(self, inputs, training=True):
inputs = tf.ones([1, 10])
return self.fc(inputs)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
],
mode=["eager"],
)
)
class MirroredStrategyDefunTest(tf.test.TestCase, parameterized.TestCase):
def testTrain(self, distribution):
with distribution.scope():
mock_model = MiniModel()
mock_model.call = tf.function(mock_model.call)
def loss_fn(ctx):
del ctx
return mock_model(tf.ones([1, 10]))
gradients_fn = backprop.implicit_grad(loss_fn)
gradients_fn = optimizer_lib.get_filtered_grad_fn(gradients_fn)
grads_and_vars = distribution.extended.call_for_each_replica(
gradients_fn, args=(None,)
)
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.25)
update_ops = optimizer._distributed_apply(
distribution, grads_and_vars
)
if not tf.executing_eagerly():
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(update_ops)
updated_var_values = self.evaluate(mock_model.variables)
# All variables start at 1.0 and get two updates of 0.25.
self.assertAllEqual(0.5 * np.ones([10, 1]), updated_var_values[0])
self.assertAllEqual([0.5], updated_var_values[1])
def testTrainAndServeWithKPL(self, distribution):
use_adapt = False
test_utils_obj = kpl_test_utils.DistributeKplTestUtils()
with distribution.scope():
(
feature_mapper,
label_mapper,
) = test_utils_obj.define_kpls_for_training(use_adapt)
model = test_utils_obj.define_model()
optimizer = rmsprop.RMSprop(learning_rate=0.1)
accuracy = keras.metrics.Accuracy()
def dataset_fn(_):
return test_utils_obj.dataset_fn(feature_mapper, label_mapper)
@tf.function
def train_step(iterator):
"""The step function for one training step."""
def step_fn(inputs):
"""The computation to run on each replica(GPU)."""
features, labels = inputs
with tf.GradientTape() as tape:
pred = model(features, training=True)
loss = keras.losses.binary_crossentropy(labels, pred)
loss = tf.nn.compute_average_loss(loss)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(
list(zip(grads, model.trainable_variables))
)
actual_pred = tf.cast(tf.greater(pred, 0.5), tf.int64)
accuracy.update_state(labels, actual_pred)
distribution.run(step_fn, args=(next(iterator),))
distributed_dataset = (
distribution.distribute_datasets_from_function(dataset_fn)
)
distributed_iterator = iter(distributed_dataset)
num_epochs = 4
num_steps = 7
for _ in range(num_epochs):
accuracy.reset_state()
for _ in range(num_steps):
train_step(distributed_iterator)
self.assertGreater(accuracy.result().numpy(), 0.5)
self.assertEqual(
optimizer.iterations.numpy(), num_epochs * num_steps
)
# Test save/load/serving the trained model.
test_utils_obj.test_save_load_serving_model(
model, feature_mapper, test_utils_obj.define_reverse_lookup_layer()
)
if __name__ == "__main__":
tf.test.main()
|
tf-keras/tf_keras/distribute/mirrored_strategy_test.py/0
|
{
"file_path": "tf-keras/tf_keras/distribute/mirrored_strategy_test.py",
"repo_id": "tf-keras",
"token_count": 2485
}
| 225 |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for keras model save/load."""
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras import layers
from tf_keras import models
from tf_keras.dtensor import dtensor_api as dtensor
from tf_keras.dtensor import layout_map as layout_map_lib
from tf_keras.dtensor import test_util
from tf_keras.utils import tf_utils
def _create_test_model():
model = models.Sequential()
model.add(
layers.Conv2D(
32,
name="conv2d_1",
kernel_size=(3, 3),
activation="relu",
input_shape=(28, 28, 1), # channel last gray scale input
)
)
model.add(
layers.Conv2D(
64,
name="conv2d_2",
kernel_size=(3, 3),
activation="relu",
)
)
return model
class SaveLoadTest(test_util.DTensorBaseTest):
def setUp(self):
super().setUp()
backend.enable_tf_random_generator()
tf_utils.set_random_seed(1337)
global_ids = test_util.create_device_ids_array((2, 2))
local_device_ids = np.ravel(global_ids).tolist()
mesh_dict = {
"CPU": dtensor.Mesh(
["X", "Y"],
global_ids,
local_device_ids,
test_util.create_device_list((2, 2), "CPU"),
)
}
self.mesh = self.configTestMesh(mesh_dict)
def test_save_h5_weights_for_dtensor_model(self):
layout_map = layout_map_lib.LayoutMap(mesh=self.mesh)
with layout_map_lib.layout_map_scope(layout_map):
dtensor_model = _create_test_model()
self.assertNotEmpty(dtensor_model.weights)
for w in dtensor_model.weights:
# Make sure the weights are DVariable
self.assertIsNotNone(w.layout)
save_file = self.create_tempfile("dtensor_model.h5")
dtensor_model.save_weights(save_file)
# Make sure the weights can be load back to a normal keras model.
normal_model = _create_test_model()
normal_model.load_weights(save_file)
for (
w1,
w2,
) in zip(normal_model.weights, dtensor_model.weights):
self.assertAllClose(w1.numpy(), w2.numpy())
self.assertIsNone(getattr(w1, "layout", None))
def test_load_h5_weights_for_dtensor_model(self):
normal_model = _create_test_model()
save_file = self.create_tempfile("normal_model.h5")
normal_model.save_weights(save_file)
layout_map = layout_map_lib.LayoutMap(mesh=self.mesh)
with layout_map_lib.layout_map_scope(layout_map):
dtensor_model = _create_test_model()
self.assertNotEmpty(dtensor_model.weights)
for w in dtensor_model.weights:
self.assertIsNotNone(w.layout)
dtensor_model.load_weights(save_file)
for (
w1,
w2,
) in zip(normal_model.weights, dtensor_model.weights):
self.assertAllClose(w1.numpy(), w2.numpy())
if __name__ == "__main__":
tf.test.main()
|
tf-keras/tf_keras/dtensor/save_load_test.py/0
|
{
"file_path": "tf-keras/tf_keras/dtensor/save_load_test.py",
"repo_id": "tf-keras",
"token_count": 1656
}
| 226 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dynamic control flow behavior with TF-Keras."""
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras.engine import base_layer
from tf_keras.optimizers.legacy import rmsprop
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
class ControlFlowLayer1(base_layer.Layer):
"""Layer with an `if` condition in call."""
def call(self, inputs):
if tf.reduce_sum(inputs) > 0:
return tf.sqrt(inputs)
else:
return tf.square(inputs)
class ControlFlowLayer2(base_layer.Layer):
"""Layer with a `for` loop in call."""
def call(self, inputs):
samples = tf.TensorArray(dtype=tf.float32, size=tf.shape(inputs)[0])
i = 0
for sample in inputs:
samples = samples.write(i, tf.square(sample))
i += 1
return samples.stack()
class NestedControlFlowLayer(base_layer.Layer):
"""Layer nested with a control flow layer."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.layer = ControlFlowLayer1()
def call(self, inputs):
return self.layer(inputs)
class ControlFlowModel(keras.Model):
"""Model with an `if` condition in call."""
def call(self, inputs):
if tf.reduce_sum(inputs) > 0:
return tf.sqrt(inputs)
else:
return tf.square(inputs)
class NestedControlFlowModel(keras.Model):
"""Model with an `if` condition in call using a control flow layer."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.layer = NestedControlFlowLayer()
def call(self, inputs):
inputs = self.layer(inputs)
if tf.reduce_sum(inputs) > 0:
return tf.sqrt(inputs)
else:
return tf.square(inputs)
class FunctionControlFlowModel(keras.Model):
"""Model with control flow where `call` is wrapped in function already."""
@tf.function
def call(self, inputs):
if tf.reduce_sum(inputs) > 0:
return tf.sqrt(inputs)
else:
return tf.square(inputs)
@test_combinations.run_all_keras_modes
class AutographWrapperTest(test_combinations.TestCase):
@test_combinations.run_with_all_model_types
@parameterized.named_parameters(
("with_if", ControlFlowLayer1),
("with_for", ControlFlowLayer2),
("nested", NestedControlFlowLayer),
)
def test_control_flow_layer(self, layer_class):
model = test_utils.get_model_from_layers(
[layer_class()], input_shape=(3,)
)
model.compile(rmsprop.RMSprop(0.001), loss="mse")
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
@parameterized.named_parameters(
("with_if", ControlFlowModel),
("nested", NestedControlFlowModel),
("wrapped_in_function", FunctionControlFlowModel),
)
def test_control_flow_model(self, model_class):
model = model_class()
model.compile(rmsprop.RMSprop(0.001), loss="mse")
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
def test_control_flow_in_deferred_sequential_model(self):
model = keras.Sequential(
[ControlFlowLayer1(), keras.layers.Dense(3), ControlFlowLayer2()]
)
model.compile(rmsprop.RMSprop(0.001), loss="mse")
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
if __name__ == "__main__":
tf.test.main()
|
tf-keras/tf_keras/engine/control_flow_test.py/0
|
{
"file_path": "tf-keras/tf_keras/engine/control_flow_test.py",
"repo_id": "tf-keras",
"token_count": 1685
}
| 227 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the `Node` class."""
import collections
import copy
import json
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.engine import base_layer_utils
from tf_keras.saving.legacy.saved_model import json_utils
from tf_keras.utils import tf_utils
_CONSTANT_VALUE = "_CONSTANT_VALUE"
# Using dict to avoid conflict with constant string tensor.
_COMPOSITE_TYPE = {"_TYPE": "COMPOSITE"}
class Node:
"""A `Node` describes a layer `__call__()` event.
A Functional model is a DAG with `Node` instances as nodes, and
`KerasTensor` instances as edges. Nodes aren't `Layer` instances, because a
single layer could be called multiple times, which would result in graph
cycles.
A `__call__()` event involves input tensors (and other input arguments),
the layer that was called, and the resulting output tensors.
A `Node` will include all this information.
Since a single `Layer` could be called multiple times, the `Node` instances
are stored on layers as a list. Each time a layer is called a node is added
to `layer._inbound_nodes`. Each time the output of a layer is used by
another layer, a node is added to `layer._outbound_nodes`.
Every `KerasTensor` instance has a `KerasHistory` object attached,
which tracks the `Node` that records the `__call__()` event that created
the tensor. By recursively walking through `Node` instances
via the `KerasHistory` metadata of `KerasTensor` instances, once can
retrieve the entire DAG of a Functional model.
Args:
layer: The layer that was called in the `Layer.__call__()`
event that this node represents.
call_args: The positional arguments the layer was called with.
call_kwargs: The keyword arguments the layer was called with.
outputs: The output tensors of the `Layer.__call__()`
"""
def __init__(self, layer, call_args=None, call_kwargs=None, outputs=None):
call_args = [] if call_args is None else call_args
call_kwargs = {} if call_kwargs is None else call_kwargs
outputs = [] if outputs is None else outputs
self.layer = layer
self.is_input = not call_args and not call_kwargs
# These arguments are user-provided. Copy the structures here so that
# future user modifications do not affect the node's metadata.
# We copy using map_structure rather than python's shallow or deep copy,
# because the args can be data structures (so shallow copy is
# insufficient), but individual values might not support copy.copy
# or be too expensive to deep copy.
call_args = tf.nest.map_structure(lambda t: t, call_args)
call_kwargs = tf.nest.map_structure(lambda t: t, call_kwargs)
self.outputs = tf.nest.map_structure(lambda t: t, outputs)
self.call_args = call_args
self.call_kwargs = call_kwargs
# Cached for performance.
self._flat_arguments = tf.nest.flatten(
(self.call_args, self.call_kwargs)
)
# Used to avoid expensive `nest` operations in the most common case.
self._single_positional_tensor_passed = (
not self.call_kwargs
and len(self.call_args) == 1
and tf.is_tensor(self.call_args[0])
)
if not tf.compat.v1.executing_eagerly_outside_functions():
# Create TensorFlowOpLayers if needed (in TF1)
for obj in self._flat_arguments:
if isinstance(
obj, tf.Tensor
) and base_layer_utils.needs_keras_history(
obj, ignore_call_context=True
):
base_layer_utils.create_keras_history(obj)
self._keras_inputs = []
self._keras_inputs_ids_and_indices = []
for i, ele in enumerate(self._flat_arguments):
if is_keras_tensor(ele):
self._keras_inputs.append(ele)
kt_id = str(id(ele))
kt_index = i
self._keras_inputs_ids_and_indices.append((kt_id, kt_index))
# Wire up Node to Layers.
self.layer._inbound_nodes.append(self)
for kt in self.keras_inputs:
inbound_layer = kt._keras_history.layer
if inbound_layer is not None: # `None` for `Input` tensors.
inbound_layer._outbound_nodes.append(self)
# Set metadata on outputs.
node_index = len(self.layer._inbound_nodes) - 1
for i, tensor in enumerate(tf.nest.flatten(outputs)):
tensor._keras_history = KerasHistory(
layer=layer, node_index=node_index, tensor_index=i
)
# Cached for performance.
self.flat_input_ids = [str(id(t)) for t in self._keras_inputs]
self.flat_output_ids = [
str(id(t)) for t in tf.nest.flatten(self.outputs)
]
@property
def keras_inputs(self):
"""Tensors input to this node that can be traced back to a
`keras.Input`."""
return self._keras_inputs
@property
def parent_nodes(self):
"""Returns all the `Node`s whose output this node immediately depends
on."""
node_deps = []
for kt in self.keras_inputs:
layer = kt._keras_history.layer
node_index = kt._keras_history.node_index
if layer is not None: # `None` for `Input` tensors.
node_deps.append(layer._inbound_nodes[node_index])
return node_deps
def iterate_inbound(self):
"""Yields tuples representing the data inbound from other nodes.
Yields:
tuples like: (inbound_layer, node_index, tensor_index, tensor).
"""
for kt in self.keras_inputs:
keras_history = kt._keras_history
layer = keras_history.layer
node_index = keras_history.node_index
tensor_index = keras_history.tensor_index
yield layer, node_index, tensor_index, kt
def map_arguments(self, tensor_dict):
"""Maps TF-Keras Tensors to computed Tensors using `tensor_dict`."""
if self._single_positional_tensor_passed:
# Performance optimization for most common case.
kt_id, _ = self._keras_inputs_ids_and_indices[0]
return (tensor_dict[kt_id].pop(),), {}
else:
flat_arguments = copy.copy(self._flat_arguments)
for kt_id, kt_index in self._keras_inputs_ids_and_indices:
flat_arguments[kt_index] = tensor_dict[kt_id].pop()
args, kwargs = tf.nest.pack_sequence_as(
(self.call_args, self.call_kwargs), flat_arguments
)
return args, kwargs
def serialize(self, make_node_key, node_conversion_map):
"""Serializes `Node` for Functional API's `get_config`."""
# Serialization still special-cases first argument.
args, kwargs = self.call_args, self.call_kwargs
inputs, args, kwargs = self.layer._call_spec.split_out_first_arg(
args, kwargs
)
# Treat everything other than first argument as a kwarg.
arguments = dict(zip(self.layer._call_spec.arg_names[1:], args))
arguments.update(kwargs)
kwargs = arguments
def _serialize_keras_tensor(t):
"""Serializes a single Tensor passed to `call`."""
if hasattr(t, "_keras_history"):
kh = t._keras_history
node_index = kh.node_index
node_key = make_node_key(kh.layer.name, node_index)
new_node_index = node_conversion_map.get(node_key, 0)
return [kh.layer.name, new_node_index, kh.tensor_index]
if isinstance(t, np.ndarray):
return t.tolist()
if isinstance(t, tf.Tensor):
return backend.get_value(t).tolist()
# Not using json_utils to serialize both constant Tensor and
# constant CompositeTensor for saving format backward compatibility.
if isinstance(t, tf.__internal__.CompositeTensor):
return (_COMPOSITE_TYPE, json_utils.Encoder().encode(t))
return t
kwargs = tf.nest.map_structure(_serialize_keras_tensor, kwargs)
try:
json.dumps(kwargs, default=json_utils.get_json_type)
except TypeError:
kwarg_types = tf.nest.map_structure(type, kwargs)
raise TypeError(
"Layer "
+ self.layer.name
+ " was passed non-JSON-serializable arguments. "
+ "Arguments had types: "
+ str(kwarg_types)
+ ". They cannot be serialized out when saving the model."
)
# `kwargs` is added to each Tensor in the first arg. This should be
# changed in a future version of the serialization format.
def serialize_first_arg_tensor(t):
if is_keras_tensor(t):
kh = t._keras_history
node_index = kh.node_index
node_key = make_node_key(kh.layer.name, node_index)
new_node_index = node_conversion_map.get(node_key, 0)
data = [kh.layer.name, new_node_index, kh.tensor_index, kwargs]
else:
# If an element in the first call argument did not originate as
# a keras tensor and is a constant value, we save it using the
# format ['_CONSTANT_VALUE', -1,
# serialized_tensor_or_python_constant] (potentially including
# serialized kwargs in an optional 4th argument).
data = [_CONSTANT_VALUE, -1, _serialize_keras_tensor(t), kwargs]
return tf_utils.ListWrapper(data)
data = tf.nest.map_structure(serialize_first_arg_tensor, inputs)
if (
not tf.nest.is_nested(data)
and not self.layer._preserve_input_structure_in_config
):
data = [data]
data = tf_utils.convert_inner_node_data(data)
return data
#############################################################
# Properties for Backwards compatibility.
# These only check the first input argument
# As nodes are internal, they may be removed in the future.
#############################################################
@property
def input_tensors(self):
if self.is_input:
return [self.outputs] # Used in `Layer.input`.
return self.call_args[0]
@property
def output_tensors(self):
if self.is_input:
return [self.outputs] # Used in `Layer.input`.
return self.outputs
@property
def input_shapes(self):
input_shapes = tf.nest.map_structure(
backend.int_shape, self.input_tensors
)
if len(input_shapes) == 1 and not self.is_input:
return input_shapes[0]
return input_shapes
@property
def output_shapes(self):
return tf.nest.map_structure(backend.int_shape, self.output_tensors)
@property
def outbound_layer(self):
return self.layer
@property
def inbound_layers(self):
"""Return all layers that feed into the current node."""
if self.is_input:
return []
tensor_call_args = [
x
for x in self._flat_arguments
if tf.is_tensor(x) and hasattr(x, "_keras_history")
]
inbound_layers = tf.nest.map_structure(
lambda t: t._keras_history.layer, tensor_call_args
)
if len(inbound_layers) == 1:
return inbound_layers[0]
return inbound_layers
class KerasHistory(
collections.namedtuple(
"KerasHistory", ["layer", "node_index", "tensor_index"]
)
):
"""Tracks the Layer call that created a Tensor, for TF-Keras Graph Networks.
During construction of TF-Keras Graph Networks, this metadata is added to
each Tensor produced as the output of a Layer, starting with an
`InputLayer`. This allows TF-Keras to track how each Tensor was produced,
and this information is later retraced by the `keras.engine.Network` class
to reconstruct the TF-Keras Graph Network.
Attributes:
layer: The Layer that produced the Tensor.
node_index: The specific call to the Layer that produced this Tensor.
Layers can be called multiple times in order to share weights. A new
node is created every time a Layer is called. The corresponding node
that represents the call event that produced the Tensor can be found at
`layer._inbound_nodes[node_index]`.
tensor_index: The output index for this Tensor. Always zero if the Layer
that produced this Tensor only has one output. Nested structures of
Tensors are deterministically assigned an index via `nest.flatten`.
"""
# Added to maintain memory and performance characteristics of `namedtuple`
# while subclassing.
__slots__ = ()
def is_keras_tensor(obj):
return hasattr(obj, "_keras_history")
|
tf-keras/tf_keras/engine/node.py/0
|
{
"file_path": "tf-keras/tf_keras/engine/node.py",
"repo_id": "tf-keras",
"token_count": 5855
}
| 228 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training routines."""
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras import backend
from tf_keras.engine import input_layer
from tf_keras.engine import training
from tf_keras.layers.convolutional import Conv2D
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
class TrainingGPUTest(tf.test.TestCase, parameterized.TestCase):
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_model_with_crossentropy_losses_channels_first(self):
"""Tests use of all crossentropy losses with `channels_first`.
Tests `sparse_categorical_crossentropy`, `categorical_crossentropy`,
and `binary_crossentropy`.
Verifies that evaluate gives the same result with either
`channels_first` or `channels_last` image_data_format.
"""
def prepare_simple_model(input_tensor, loss_name, target):
axis = 1 if backend.image_data_format() == "channels_first" else -1
loss = None
num_channels = None
activation = None
if loss_name == "sparse_categorical_crossentropy":
loss = lambda y_true, y_pred: backend.sparse_categorical_crossentropy( # noqa: E501
y_true, y_pred, axis=axis
)
num_channels = int(np.amax(target) + 1)
activation = "softmax"
elif loss_name == "categorical_crossentropy":
loss = lambda y_true, y_pred: backend.categorical_crossentropy(
y_true, y_pred, axis=axis
)
num_channels = target.shape[axis]
activation = "softmax"
elif loss_name == "binary_crossentropy":
loss = lambda y_true, y_pred: backend.binary_crossentropy(
y_true, y_pred
)
num_channels = target.shape[axis]
activation = "sigmoid"
predictions = Conv2D(
num_channels,
1,
activation=activation,
kernel_initializer="ones",
bias_initializer="ones",
)(input_tensor)
simple_model = training.Model(
inputs=input_tensor, outputs=predictions
)
simple_model.compile(optimizer="rmsprop", loss=loss)
return simple_model
if tf.test.is_gpu_available(cuda_only=True):
with test_utils.use_gpu():
losses_to_test = [
"sparse_categorical_crossentropy",
"categorical_crossentropy",
"binary_crossentropy",
]
data_channels_first = np.array(
[[[[8.0, 7.1, 0.0], [4.5, 2.6, 0.55], [0.9, 4.2, 11.2]]]],
dtype=np.float32,
)
# Labels for testing 4-class sparse_categorical_crossentropy,
# 4-class categorical_crossentropy, and 2-class
# binary_crossentropy:
labels_channels_first = [
np.array(
[[[[0, 1, 3], [2, 1, 0], [2, 2, 1]]]], dtype=np.float32
),
np.array(
[
[
[[0, 1, 0], [0, 1, 0], [0, 0, 0]],
[[1, 0, 0], [0, 0, 1], [0, 1, 0]],
[[0, 0, 0], [1, 0, 0], [0, 0, 1]],
[[0, 0, 1], [0, 0, 0], [1, 0, 0]],
]
],
dtype=np.float32,
),
np.array(
[
[
[[0, 1, 0], [0, 1, 0], [0, 0, 1]],
[[1, 0, 1], [1, 0, 1], [1, 1, 0]],
]
],
dtype=np.float32,
),
]
# Compute one loss for each loss function in the list
# `losses_to_test`:
loss_channels_last = [0.0, 0.0, 0.0]
loss_channels_first = [0.0, 0.0, 0.0]
old_data_format = backend.image_data_format()
# Evaluate a simple network with channels last, with all three
# loss functions:
backend.set_image_data_format("channels_last")
data = np.moveaxis(data_channels_first, 1, -1)
for index, loss_function in enumerate(losses_to_test):
labels = np.moveaxis(labels_channels_first[index], 1, -1)
inputs = input_layer.Input(shape=(3, 3, 1))
model = prepare_simple_model(inputs, loss_function, labels)
loss_channels_last[index] = model.evaluate(
x=data, y=labels, batch_size=1, verbose=0
)
# Evaluate the same network with channels first, with all three
# loss functions:
backend.set_image_data_format("channels_first")
data = data_channels_first
for index, loss_function in enumerate(losses_to_test):
labels = labels_channels_first[index]
inputs = input_layer.Input(shape=(1, 3, 3))
model = prepare_simple_model(inputs, loss_function, labels)
loss_channels_first[index] = model.evaluate(
x=data, y=labels, batch_size=1, verbose=0
)
backend.set_image_data_format(old_data_format)
np.testing.assert_allclose(
loss_channels_first,
loss_channels_last,
rtol=1e-06,
err_msg="{}{}".format(
"Computed different losses for ",
"channels_first and channels_last",
),
)
if __name__ == "__main__":
tf.test.main()
|
tf-keras/tf_keras/engine/training_gpu_test.py/0
|
{
"file_path": "tf-keras/tf_keras/engine/training_gpu_test.py",
"repo_id": "tf-keras",
"token_count": 3726
}
| 229 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A layer that produces a dense `Tensor` based on given `feature_columns`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.feature_column import base_feature_layer as kfc
from tf_keras.saving.legacy.saved_model import json_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export(v1=["keras.layers.DenseFeatures"])
class DenseFeatures(kfc._BaseFeaturesLayer):
"""A layer that produces a dense `Tensor` based on given `feature_columns`.
Generally a single example in training data is described with
FeatureColumns. At the first layer of the model, this column-oriented data
should be converted to a single `Tensor`.
This layer can be called multiple times with different features.
This is the V1 version of this layer that uses variable_scope's or
partitioner to create variables which works well with PartitionedVariables.
Variable scopes are deprecated in V2, so the V2 version uses name_scopes
instead. But currently that lacks support for partitioned variables. Use
this if you need partitioned variables. Use the partitioner argument if you
have a TF-Keras model and uses
`tf.compat.v1.keras.estimator.model_to_estimator` for training.
Example:
```python
price = tf.feature_column.numeric_column('price')
keywords_embedded = tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_hash_bucket("keywords", 10K),
dimension=16)
columns = [price, keywords_embedded, ...]
partitioner = tf.compat.v1.fixed_size_partitioner(num_shards=4)
feature_layer = tf.compat.v1.keras.layers.DenseFeatures(
feature_columns=columns, partitioner=partitioner)
features = tf.io.parse_example(
..., features=tf.feature_column.make_parse_example_spec(columns))
dense_tensor = feature_layer(features)
for units in [128, 64, 32]:
dense_tensor = tf.compat.v1.keras.layers.Dense(
units, activation='relu')(dense_tensor)
prediction = tf.compat.v1.keras.layers.Dense(1)(dense_tensor)
```
"""
def __init__(
self,
feature_columns,
trainable=True,
name=None,
partitioner=None,
**kwargs
):
"""Constructs a DenseFeatures layer.
Args:
feature_columns: An iterable containing the FeatureColumns to use as
inputs to your model. All items should be instances of classes
derived from `DenseColumn` such as `numeric_column`,
`embedding_column`, `bucketized_column`, `indicator_column`. If you
have categorical features, you can wrap them with an
`embedding_column` or `indicator_column`.
trainable: Boolean, whether the layer's variables will be updated via
gradient descent during training.
name: Name to give to the DenseFeatures.
partitioner: Partitioner for input layer. Defaults to `None`.
**kwargs: Keyword arguments to construct a layer.
Raises:
ValueError: if an item in `feature_columns` is not a `DenseColumn`.
"""
super().__init__(
feature_columns=feature_columns,
trainable=trainable,
name=name,
partitioner=partitioner,
expected_column_type=tf.__internal__.feature_column.DenseColumn,
**kwargs
)
@property
def _is_feature_layer(self):
return True
@property
def _tracking_metadata(self):
"""String stored in metadata field in the SavedModel proto.
Returns:
A serialized JSON storing information necessary for recreating this
layer.
"""
metadata = json.loads(super()._tracking_metadata)
metadata["_is_feature_layer"] = True
return json.dumps(metadata, default=json_utils.get_json_type)
def _target_shape(self, input_shape, total_elements):
return (input_shape[0], total_elements)
def call(self, features, cols_to_output_tensors=None, training=None):
"""Returns a dense tensor corresponding to the `feature_columns`.
Example usage:
>>> t1 = tf.feature_column.embedding_column(
... tf.feature_column.categorical_column_with_hash_bucket("t1", 2),
... dimension=8)
>>> t2 = tf.feature_column.numeric_column('t2')
>>> feature_layer = tf.compat.v1.keras.layers.DenseFeatures([t1, t2])
>>> features = {"t1": tf.constant(["a", "b"]),
... "t2": tf.constant([1, 2])}
>>> dense_tensor = feature_layer(features, training=True)
Args:
features: A mapping from key to tensors. `FeatureColumn`s look up via
these keys. For example `numeric_column('price')` will look at
'price' key in this dict. Values can be a `SparseTensor` or a
`Tensor` depends on corresponding `FeatureColumn`.
cols_to_output_tensors: If not `None`, this will be filled with a dict
mapping feature columns to output tensors created.
training: Python boolean or None, indicating whether to the layer is
being run in training mode. This argument is passed to the call
method of any `FeatureColumn` that takes a `training` argument. For
example, if a `FeatureColumn` performed dropout, the column could
expose a `training` argument to control whether the dropout should
be applied. If `None`, becomes `tf.keras.backend.learning_phase()`.
Defaults to `None`.
Returns:
A `Tensor` which represents input layer of a model. Its shape
is (batch_size, first_layer_dimension) and its dtype is `float32`.
first_layer_dimension is determined based on given `feature_columns`.
Raises:
ValueError: If features are not a dictionary.
"""
if training is None:
training = backend.learning_phase()
if not isinstance(features, dict):
raise ValueError(
"We expected a dictionary here. Instead we got: ", features
)
transformation_cache = (
tf.__internal__.feature_column.FeatureTransformationCache(features)
)
output_tensors = []
for column in self._feature_columns:
with backend.name_scope(column.name):
try:
tensor = column.get_dense_tensor(
transformation_cache,
self._state_manager,
training=training,
)
except TypeError:
tensor = column.get_dense_tensor(
transformation_cache, self._state_manager
)
processed_tensors = self._process_dense_tensor(column, tensor)
if cols_to_output_tensors is not None:
cols_to_output_tensors[column] = processed_tensors
output_tensors.append(processed_tensors)
return self._verify_and_concat_tensors(output_tensors)
|
tf-keras/tf_keras/feature_column/dense_features.py/0
|
{
"file_path": "tf-keras/tf_keras/feature_column/dense_features.py",
"repo_id": "tf-keras",
"token_count": 3175
}
| 230 |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for custom TF-Keras object saving with `register_keras_serializable`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras.testing_infra import test_utils
from tf_keras.utils import get_custom_objects
# `tf.print` message is only available in stderr in TF2, which this test checks.
@test_utils.run_v2_only
class CustomObjectSavingTest(tf.test.TestCase, parameterized.TestCase):
"""Test for custom TF-Keras object saving with
`register_keras_serializable`."""
def setUp(self):
super().setUp()
get_custom_objects().clear()
def test_register_keras_serializable_correct_class(self):
train_step_message = "This is my training step"
temp_dir = os.path.join(self.get_temp_dir(), "my_model")
@keras.utils.register_keras_serializable("CustomModelX")
class CustomModelX(keras.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dense1 = MyDense(
1,
kernel_regularizer=MyRegularizer(0.01),
activity_regularizer=MyRegularizer(0.01),
)
def call(self, inputs):
return self.dense1(inputs)
def train_step(self, data):
tf.print(train_step_message)
x, y = data
with tf.GradientTape() as tape:
y_pred = self(x)
loss = self.compiled_loss(y, y_pred)
gradients = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(
zip(gradients, self.trainable_variables)
)
return {}
def one(self):
return 1
@keras.utils.register_keras_serializable("MyDense")
class MyDense(keras.layers.Dense):
def two(self):
return 2
@keras.utils.register_keras_serializable("MyAdam")
class MyAdam(keras.optimizers.Adam):
def three(self):
return 3
@keras.utils.register_keras_serializable("MyLoss")
class MyLoss(keras.losses.MeanSquaredError):
def four(self):
return 4
@keras.utils.register_keras_serializable("MyMetric")
class MyMetric(keras.metrics.MeanAbsoluteError):
def five(self):
return 5
@keras.utils.register_keras_serializable("MyRegularizer")
class MyRegularizer(keras.regularizers.L2):
def six(self):
return 6
@keras.utils.register_keras_serializable("my_sq_diff")
def my_sq_diff(y_true, y_pred):
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.cast(y_true, y_pred.dtype)
sq_diff_plus_x = tf.math.squared_difference(y_pred, y_true)
return tf.reduce_mean(sq_diff_plus_x, axis=-1)
subclassed_model = CustomModelX()
subclassed_model.compile(
optimizer=MyAdam(), loss=MyLoss(), metrics=[MyMetric(), my_sq_diff]
)
x = np.random.random((100, 32))
y = np.random.random((100, 1))
subclassed_model.fit(x, y, epochs=1)
subclassed_model.save(temp_dir, save_format="tf")
loaded_model = keras.models.load_model(temp_dir)
# `tf.print` writes to stderr.
with self.captureWritesToStream(sys.stderr) as printed:
loaded_model.fit(x, y, epochs=1)
self.assertRegex(printed.contents(), train_step_message)
# Check that the custom classes do get used.
self.assertIs(loaded_model.__class__, CustomModelX)
self.assertIs(loaded_model.optimizer.__class__, MyAdam)
self.assertIs(loaded_model.compiled_loss._losses[0].__class__, MyLoss)
self.assertIs(
loaded_model.compiled_metrics._metrics[0].__class__, MyMetric
)
self.assertIs(loaded_model.compiled_metrics._metrics[1], my_sq_diff)
self.assertIs(loaded_model.layers[0].__class__, MyDense)
self.assertIs(
loaded_model.layers[0].activity_regularizer.__class__, MyRegularizer
)
self.assertIs(
loaded_model.layers[0].kernel_regularizer.__class__, MyRegularizer
)
# Check that the custom methods are available.
self.assertEqual(loaded_model.one(), 1)
self.assertEqual(loaded_model.layers[0].two(), 2)
self.assertEqual(loaded_model.optimizer.three(), 3)
self.assertEqual(loaded_model.compiled_loss._losses[0].four(), 4)
self.assertEqual(loaded_model.compiled_metrics._metrics[0].five(), 5)
self.assertEqual(loaded_model.layers[0].activity_regularizer.six(), 6)
self.assertEqual(loaded_model.layers[0].kernel_regularizer.six(), 6)
self.assertEqual(loaded_model.compiled_metrics._metrics[1]([1], [3]), 4)
if __name__ == "__main__":
tf.test.main()
|
tf-keras/tf_keras/integration_test/custom_object_saving_test.py/0
|
{
"file_path": "tf-keras/tf_keras/integration_test/custom_object_saving_test.py",
"repo_id": "tf-keras",
"token_count": 2589
}
| 231 |
"""Image classification with EfficientNetV2 architecture.
Adapted from the EfficientNetV2 TF-Keras Application.
"""
import math
from tensorflow import keras
from tf_keras.integration_test.models.input_spec import InputSpec
IMG_SIZE = (96, 96)
NUM_CLASSES = 5
def get_data_spec(batch_size):
return (
InputSpec((batch_size,) + IMG_SIZE + (3,)),
InputSpec((batch_size, NUM_CLASSES)),
)
def get_input_preprocessor():
return keras.layers.Rescaling(scale=1.0 / 128.0, offset=-1)
def round_filters(filters, width_coefficient, min_depth, depth_divisor):
filters *= width_coefficient
minimum_depth = min_depth or depth_divisor
new_filters = max(
minimum_depth,
int(filters + depth_divisor / 2) // depth_divisor * depth_divisor,
)
return int(new_filters)
def MBConvBlock(
input_filters: int,
output_filters: int,
expand_ratio=1,
kernel_size=3,
strides=1,
se_ratio=0.0,
activation="swish",
survival_probability: float = 0.8,
):
def apply(inputs):
filters = input_filters * expand_ratio
if expand_ratio != 1:
x = keras.layers.Conv2D(
filters=filters,
kernel_size=1,
strides=1,
padding="same",
data_format="channels_last",
use_bias=False,
)(inputs)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Activation(activation)(x)
else:
x = inputs
x = keras.layers.DepthwiseConv2D(
kernel_size=kernel_size,
strides=strides,
padding="same",
data_format="channels_last",
use_bias=False,
)(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Activation(activation)(x)
if 0 < se_ratio <= 1:
filters_se = max(1, int(input_filters * se_ratio))
se = keras.layers.GlobalAveragePooling2D()(x)
se = keras.layers.Reshape((1, 1, filters))(se)
se = keras.layers.Conv2D(
filters_se,
1,
padding="same",
activation=activation,
)(se)
se = keras.layers.Conv2D(
filters,
1,
padding="same",
activation="sigmoid",
)(se)
x = keras.layers.multiply([x, se])
x = keras.layers.Conv2D(
filters=output_filters,
kernel_size=1,
strides=1,
padding="same",
data_format="channels_last",
use_bias=False,
)(x)
x = keras.layers.BatchNormalization()(x)
if strides == 1 and input_filters == output_filters:
if survival_probability:
x = keras.layers.Dropout(
survival_probability,
noise_shape=(None, 1, 1, 1),
)(x)
x = keras.layers.add([x, inputs])
return x
return apply
def FusedMBConvBlock(
input_filters: int,
output_filters: int,
expand_ratio=1,
kernel_size=3,
strides=1,
se_ratio=0.0,
activation="swish",
survival_probability: float = 0.8,
):
def apply(inputs):
filters = input_filters * expand_ratio
if expand_ratio != 1:
x = keras.layers.Conv2D(
filters,
kernel_size=kernel_size,
strides=strides,
data_format="channels_last",
padding="same",
use_bias=False,
)(inputs)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Activation(activation)(x)
else:
x = inputs
if 0 < se_ratio <= 1:
filters_se = max(1, int(input_filters * se_ratio))
se = keras.layers.GlobalAveragePooling2D()(x)
se = keras.layers.Reshape((1, 1, filters))(se)
se = keras.layers.Conv2D(
filters_se,
1,
padding="same",
activation=activation,
)(se)
se = keras.layers.Conv2D(
filters,
1,
padding="same",
activation="sigmoid",
)(se)
x = keras.layers.multiply([x, se])
x = keras.layers.Conv2D(
output_filters,
kernel_size=1 if expand_ratio != 1 else kernel_size,
strides=1 if expand_ratio != 1 else strides,
padding="same",
use_bias=False,
)(x)
x = keras.layers.BatchNormalization()(x)
if expand_ratio == 1:
x = keras.layers.Activation(activation)(x)
if strides == 1 and input_filters == output_filters:
if survival_probability:
x = keras.layers.Dropout(
survival_probability,
noise_shape=(None, 1, 1, 1),
)(x)
x = keras.layers.add([x, inputs])
return x
return apply
def get_model(
build=False, compile=False, jit_compile=False, include_preprocessing=True
):
width_coefficient = 1.0
depth_coefficient = 1.0
dropout_rate = 0.2
drop_connect_rate = 0.2
depth_divisor = 8
min_depth = 8
activation = "swish"
blocks_args = [
{
"kernel_size": 3,
"num_repeat": 2,
"input_filters": 24,
"output_filters": 24,
"expand_ratio": 1,
"se_ratio": 0.0,
"strides": 1,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 4,
"input_filters": 24,
"output_filters": 48,
"expand_ratio": 4,
"se_ratio": 0.0,
"strides": 2,
"conv_type": 1,
},
{
"conv_type": 1,
"expand_ratio": 4,
"input_filters": 48,
"kernel_size": 3,
"num_repeat": 4,
"output_filters": 64,
"se_ratio": 0,
"strides": 2,
},
{
"conv_type": 0,
"expand_ratio": 4,
"input_filters": 64,
"kernel_size": 3,
"num_repeat": 6,
"output_filters": 128,
"se_ratio": 0.25,
"strides": 2,
},
]
inputs = keras.layers.Input(shape=IMG_SIZE + (3,))
if include_preprocessing:
x = get_input_preprocessor()(inputs)
else:
x = inputs
stem_filters = round_filters(
filters=blocks_args[0]["input_filters"],
width_coefficient=width_coefficient,
min_depth=min_depth,
depth_divisor=depth_divisor,
)
x = keras.layers.Conv2D(
filters=stem_filters,
kernel_size=3,
strides=2,
padding="same",
use_bias=False,
)(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Activation(activation, name="stem_activation")(x)
b = 0
blocks = float(sum(args["num_repeat"] for args in blocks_args))
for _, args in enumerate(blocks_args):
args["input_filters"] = round_filters(
filters=args["input_filters"],
width_coefficient=width_coefficient,
min_depth=min_depth,
depth_divisor=depth_divisor,
)
args["output_filters"] = round_filters(
filters=args["output_filters"],
width_coefficient=width_coefficient,
min_depth=min_depth,
depth_divisor=depth_divisor,
)
block = {0: MBConvBlock, 1: FusedMBConvBlock}[args.pop("conv_type")]
repeats = int(math.ceil(depth_coefficient * args.pop("num_repeat")))
for j in range(repeats):
if j > 0:
args["strides"] = 1
args["input_filters"] = args["output_filters"]
x = block(
activation=activation,
survival_probability=drop_connect_rate * b / blocks,
**args,
)(x)
b += 1
top_filters = round_filters(
filters=1280,
width_coefficient=width_coefficient,
min_depth=min_depth,
depth_divisor=depth_divisor,
)
x = keras.layers.Conv2D(
filters=top_filters,
kernel_size=1,
strides=1,
padding="same",
data_format="channels_last",
use_bias=False,
)(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Activation(activation=activation, name="top_activation")(x)
x = keras.layers.GlobalAveragePooling2D(name="avg_pool")(x)
x = keras.layers.Dropout(dropout_rate, name="top_dropout")(x)
x = keras.layers.Dense(
NUM_CLASSES,
activation="softmax",
)(x)
model = keras.Model(inputs, x)
if compile:
model.compile(
"adam", loss="categorical_crossentropy", jit_compile=jit_compile
)
return model
def get_custom_objects():
return {}
|
tf-keras/tf_keras/integration_test/models/efficientnet_v2.py/0
|
{
"file_path": "tf-keras/tf_keras/integration_test/models/efficientnet_v2.py",
"repo_id": "tf-keras",
"token_count": 5011
}
| 232 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests training metrics with PSS distribution strategy."""
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras import layers as layers_module
from tf_keras import metrics as metrics_module
from tf_keras.engine import training as training_module
from tf_keras.testing_infra import test_combinations
# isort: off
from tensorflow.python.distribute import (
multi_process_runner,
multi_worker_test_base,
)
class ParameterServerTrainingMetricTest(test_combinations.TestCase):
"""Test Parameter Server Distribution strategy with TF-Keras Model
Training.
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.cluster = multi_worker_test_base.create_multi_process_cluster(
num_workers=2, num_ps=3, rpc_layer="grpc"
)
cls.cluster_resolver = cls.cluster.cluster_resolver
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls.cluster.stop()
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_pss_fit_metric_batch_counter(self):
"""Verify that metric data is complete during fit when using
ParameterServerStrategy
"""
strategy = tf.distribute.ParameterServerStrategy(
self.cluster_resolver,
variable_partitioner=None,
)
class BatchCount(metrics_module.Sum):
def __init__(self, name="batch_count", dtype=tf.int64):
super().__init__(name=name, dtype=dtype)
def update_state(self, y_true, y_pred, sample_weight=None):
return super().update_state(1, sample_weight)
# Build and compile model within strategy scope.
with strategy.scope():
inputs = layers_module.Input((1,))
outputs = layers_module.Dense(1)(inputs)
model = training_module.Model(inputs, outputs)
model.compile(
loss="mse", metrics=[BatchCount()], steps_per_execution=2
)
BATCH_SIZE = 10
x, y = np.ones((400, 1)), np.ones((400, 1))
val_x, val_y = np.ones((100, 1)), np.ones((100, 1))
train_dataset = tf.data.Dataset.from_tensor_slices((x, y))
train_dataset = train_dataset.batch(BATCH_SIZE)
val_dataset = tf.data.Dataset.from_tensor_slices((val_x, val_y))
val_dataset = val_dataset.batch(BATCH_SIZE)
train_batch_count = x.shape[0] // BATCH_SIZE
val_batch_count = val_x.shape[0] // BATCH_SIZE
# Verify that Model fit doesn't drop any batches
hist = model.fit(
train_dataset,
steps_per_epoch=train_batch_count,
validation_data=val_dataset,
validation_steps=val_batch_count,
epochs=5,
)
# Verify that min and max value of batch count metric is accurate
self.assertEqual(max(hist.history["batch_count"]), train_batch_count)
self.assertEqual(min(hist.history["batch_count"]), train_batch_count)
self.assertEqual(max(hist.history["val_batch_count"]), val_batch_count)
self.assertEqual(min(hist.history["val_batch_count"]), val_batch_count)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_pss_evaluate_metric_batch_counter(self):
"""Verify that metric data is complete during evaluate when using
ParameterServerStrategy
"""
strategy = tf.distribute.ParameterServerStrategy(
self.cluster_resolver,
variable_partitioner=None,
)
class BatchCount(metrics_module.Sum):
def __init__(self, name="batch_count", dtype=tf.int64):
super().__init__(name=name, dtype=dtype)
def update_state(self, y_true, y_pred, sample_weight=None):
return super().update_state(1, sample_weight)
# Build and compile model within strategy scope.
with strategy.scope():
inputs = layers_module.Input((1,))
outputs = layers_module.Dense(1)(inputs)
model = training_module.Model(inputs, outputs)
model.compile(
loss="mse", metrics=[BatchCount()], steps_per_execution=2
)
BATCH_SIZE = 10
x, y = np.ones((400, 1)), np.ones((400, 1))
dataset = tf.data.Dataset.from_tensor_slices((x, y))
batch_count = x.shape[0] // BATCH_SIZE
# Verify that Model Eval batch counter metric is accurate.
eval_results = model.evaluate(dataset, steps=batch_count)
self.assertEqual(eval_results[-1], batch_count)
if __name__ == "__main__":
tf.enable_v2_behavior()
multi_process_runner.test_main()
|
tf-keras/tf_keras/integration_test/parameter_server_training_metric_test.py/0
|
{
"file_path": "tf-keras/tf_keras/integration_test/parameter_server_training_metric_test.py",
"repo_id": "tf-keras",
"token_count": 2232
}
| 233 |
build_file: "tf-keras/tf_keras/kokoro/github/ubuntu/gpu/build.sh"
action {
define_artifacts {
regex: "**/sponge_log.log"
regex: "**/sponge_log.xml"
}
}
|
tf-keras/tf_keras/kokoro/github/ubuntu/gpu/presubmit.cfg/0
|
{
"file_path": "tf-keras/tf_keras/kokoro/github/ubuntu/gpu/presubmit.cfg",
"repo_id": "tf-keras",
"token_count": 76
}
| 234 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras 1D convolution layer."""
from tf_keras import activations
from tf_keras import constraints
from tf_keras import initializers
from tf_keras import regularizers
from tf_keras.dtensor import utils
from tf_keras.layers.convolutional.base_conv import Conv
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.Conv1D", "keras.layers.Convolution1D")
class Conv1D(Conv):
"""1D convolution layer (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved
with the layer input over a single spatial (or temporal) dimension
to produce a tensor of outputs.
If `use_bias` is True, a bias vector is created and added to the outputs.
Finally, if `activation` is not `None`,
it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide an `input_shape` argument
(tuple of integers or `None`, e.g.
`(10, 128)` for sequences of 10 vectors of 128-dimensional vectors,
or `(None, 128)` for variable-length sequences of 128-dimensional vectors.
Examples:
>>> # The inputs are 128-length vectors with 10 timesteps, and the
>>> # batch size is 4.
>>> input_shape = (4, 10, 128)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.Conv1D(
... 32, 3, activation='relu',input_shape=input_shape[1:])(x)
>>> print(y.shape)
(4, 8, 32)
>>> # With extended batch shape [4, 7] (e.g. weather data where batch
>>> # dimensions correspond to spatial location and the third dimension
>>> # corresponds to time.)
>>> input_shape = (4, 7, 10, 128)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.Conv1D(
... 32, 3, activation='relu', input_shape=input_shape[2:])(x)
>>> print(y.shape)
(4, 7, 8, 32)
Args:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of a single integer,
specifying the length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"same"` or `"causal"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding with zeros
evenly to the left/right or up/down of the input such that output has
the same height/width dimension as the input.
`"causal"` results in causal (dilated) convolutions, e.g. `output[t]`
does not depend on `input[t+1:]`. Useful when modeling temporal data
where the model should not violate the temporal order.
See [WaveNet: A Generative Model for Raw Audio, section
2.1](https://arxiv.org/abs/1609.03499).
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape `(batch_size, width,
channels)` while `channels_first` corresponds to inputs with shape
`(batch_size, channels, width)`. Note that the `channels_first` format
is currently not supported by TensorFlow on CPU.
dilation_rate: an integer or tuple/list of a single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
groups: A positive integer specifying the number of groups in which the
input is split along the channel axis. Each group is convolved
separately with `filters / groups` filters. The output is the
concatenation of all the `groups` results along the channel axis.
Input channels and `filters` must both be divisible by `groups`.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix
(see `keras.initializers`). Defaults to 'glorot_uniform'.
bias_initializer: Initializer for the bias vector
(see `keras.initializers`). Defaults to 'zeros'.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix (see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector
(see `keras.regularizers`).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")
(see `keras.regularizers`).
kernel_constraint: Constraint function applied to the kernel matrix
(see `keras.constraints`).
bias_constraint: Constraint function applied to the bias vector
(see `keras.constraints`).
Input shape:
3+D tensor with shape: `batch_shape + (steps, input_dim)`
Output shape:
3+D tensor with shape: `batch_shape + (new_steps, filters)`
`steps` value might have changed due to padding or strides.
Returns:
A tensor of rank 3 representing
`activation(conv1d(inputs, kernel) + bias)`.
Raises:
ValueError: when both `strides > 1` and `dilation_rate > 1`.
"""
@utils.allow_initializer_layout
def __init__(
self,
filters,
kernel_size,
strides=1,
padding="valid",
data_format="channels_last",
dilation_rate=1,
groups=1,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs
):
super().__init__(
rank=1,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs
)
# Alias
Convolution1D = Conv1D
|
tf-keras/tf_keras/layers/convolutional/conv1d.py/0
|
{
"file_path": "tf-keras/tf_keras/layers/convolutional/conv1d.py",
"repo_id": "tf-keras",
"token_count": 2798
}
| 235 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the Activation layer."""
from tf_keras import activations
from tf_keras.engine.base_layer import Layer
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.Activation")
class Activation(Layer):
"""Applies an activation function to an output.
Args:
activation: Activation function, such as `tf.nn.relu`, or string name of
built-in activation function, such as "relu".
Usage:
>>> layer = tf.keras.layers.Activation('relu')
>>> output = layer([-3.0, -1.0, 0.0, 2.0])
>>> list(output.numpy())
[0.0, 0.0, 0.0, 2.0]
>>> layer = tf.keras.layers.Activation(tf.nn.relu)
>>> output = layer([-3.0, -1.0, 0.0, 2.0])
>>> list(output.numpy())
[0.0, 0.0, 0.0, 2.0]
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the batch axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, activation, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
self.activation = activations.get(activation)
def call(self, inputs):
return self.activation(inputs)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {"activation": activations.serialize(self.activation)}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
|
tf-keras/tf_keras/layers/core/activation.py/0
|
{
"file_path": "tf-keras/tf_keras/layers/core/activation.py",
"repo_id": "tf-keras",
"token_count": 774
}
| 236 |
"""Builds a vocabulary from inputs to the layer."""
import random
import string
import tensorflow as tf
from tensorflow.python.util.tf_export import keras_export
from tf_keras.layers import Layer
@keras_export("keras.layers.experimental.DynamicLookup")
class DynamicLookup(Layer):
"""A layer that builds a vocabulary from inputs.
This layer maintains a vocabulary that is continuously updated based on the
inputs passed in every forward pass. The frequency of the input is tracked
and used to maintain a vocabulary. The very last index will be treated as
the index. If `vocabulary_size=10`, OOV index will be 9.
Args:
vocabulary_size: Integer value representing size of the vocabulary to
build.
initial_vocabulary: The vocabulary to initialize the layer with. If a 1D
tensor is provided, the vocabulary will be initialized with that tensor.
If a `tf.DType` object is provided, a random tensor of that dtype and of
length `vocabulary_size` will be generated as the initial vocabulary.
Supported `tf.DType` values include `tf.int32`, `tf.int64` and
`tf.string`.
eviction_policy: The eviction policy for the vocabulary. Available options
are string values like "LFU" (Least Frequently Used) and *more to come*.
If not specified, the default eviction policy is "LFU". Expects a
string.
**kwargs: Arguments for super class.
Attributes: get_vocabulary(): Returns a tensor representing the current
vocabulary of the layer. If you want to look up the vocabulary keys given
a set of indices, you can simply use `tf.gather(vocabulary, indices)`.
Example:
Here is an example to demonstrate how to use the DynamicLookup layer
```
vocabulary_size = 3
eviction_policy = "LFU"
vocab = tf.constant(["apple", "banana", "cherry"])
layer = DynamicLookup(
vocabulary_size,
vocab,
eviction_policy=eviction_policy,
)
inputs = tf.constant([
["apple", "banana"],
])
outputs = layer(inputs)
tf.print(outputs)
# you get the following output
[[0 1]]
# get top k vocab
top_k_vocab = layer.get_top_vocabulary(2)
tf.print(top_k_vocab)
# you get the following output
["apple", "banana"]
```
If you want to checkpoint the vocabulary or vocabulary frequency, see
the following example
```
checkpoint =
tf.train.Checkpoint(vocabulary=self.vocabulary)
checkpoint.write(filepath)
```
"""
def __init__(
self,
vocabulary_size,
initial_vocabulary,
eviction_policy="LFU",
**kwargs,
):
"""Initializes the DynamicLookup layer."""
super().__init__(**kwargs)
self.vocabulary_size = vocabulary_size
self.eviction_policy = eviction_policy
if tf.is_tensor(initial_vocabulary):
self.initial_vocabulary = initial_vocabulary
elif initial_vocabulary in (
tf.string,
tf.int32,
tf.int64,
):
self.initial_vocabulary = (
DynamicLookup._generate_random_initial_vocab(
vocabulary_size, initial_vocabulary
)
)
else:
raise ValueError(
"Either specify the initial vocabulary or provide a "
"valid dtype. The dtype argument must be one of the "
"following: tf.string, tf.int32, tf.int64."
)
# maintain a 20% bigger hash table
self.internal_table_size = tf.cast(
tf.floor(
tf.multiply(
tf.cast(self.vocabulary_size, dtype=tf.float32), 1.2
)
),
dtype=tf.int32,
)
self.vocabulary_dtype = self.initial_vocabulary.dtype
if self.eviction_policy == "LFU":
self.vocabulary_table_keys = tf.Variable(
initial_value=pad_tensor(
self.initial_vocabulary, self.internal_table_size
),
shape=tf.TensorShape(self.internal_table_size),
dtype=self.vocabulary_dtype,
trainable=False,
name="vocabulary_table_keys",
per_worker_variable=True,
)
self.vocabulary_table_values = tf.Variable(
initial_value=pad_tensor(
tf.zeros_like(self.initial_vocabulary, dtype=tf.int32),
self.internal_table_size,
),
shape=tf.TensorShape(self.internal_table_size),
dtype=tf.int32,
trainable=False,
name="vocabulary_table_values",
per_worker_variable=True,
)
else:
raise ValueError(
"{} eviction policy is currently unsupported by DynamicLookup"
" layer."
" It currently only supports `LFU`".format(self.eviction_policy)
)
# TODO(b/268243335): add more eviction policy
# TODO(b/268243996): provide multiple OOV
def build(self, input_shape=None):
self.vocabulary = self.add_weight(
shape=self.initial_vocabulary.shape,
dtype=self.vocabulary_dtype,
initializer=tf.constant_initializer(
self.initial_vocabulary.numpy()
),
trainable=False,
name="vocabulary",
)
super().build(input_shape)
def call(self, inputs, learn_vocab=True):
"""Learn vocabulary from inputs and perform vocabulary lookup.
Args:
inputs: Input tensor, or dict/list/tuple of input tensors.
learn_vocab: A boolean value that specifies whether the vocabulary
should be learned from the layer inputs or not. Defaults to True.
Returns:
A tensor or list/tuple of tensors.
"""
flattened_inputs = tf.reshape(inputs, [-1])
# get unique values from inputs
unique, _ = tf.unique(flattened_inputs)
unique = tf.cast(unique, dtype=self.vocabulary_dtype)
# learn vocab form inputs
if learn_vocab and self.eviction_policy == "LFU":
self.update_internal_vocabulary(unique)
# lookup for inputs in self.vocabulary
top_k_vocab = self.vocabulary
lookup_values = tf.expand_dims(flattened_inputs, axis=-1)
condition = tf.reduce_any(
tf.equal(top_k_vocab, tf.expand_dims(lookup_values, -1)), axis=-1
)
# the very last index will be the OOV index
indices = tf.where(
condition,
tf.argmax(
tf.equal(top_k_vocab, tf.expand_dims(lookup_values, -1)),
axis=-1,
),
self.vocabulary_size,
)
# reshape output to the same shape as input
out = tf.reshape(tf.squeeze(indices), tf.shape(inputs))
return out
def update_internal_vocabulary(self, unique):
# get new keys
unpadded_keys = remove_padding(self.vocabulary_table_keys)
unpadded_values = remove_padding(self.vocabulary_table_values)
table_expanded = tf.expand_dims(unpadded_keys, axis=0)
unique_expanded = tf.expand_dims(unique, axis=0)
new_keys = tf.sets.difference(
unique_expanded, table_expanded, aminusb=True
)
number_of_new_keys = tf.shape(new_keys.values)[0]
# get number of keys to be removed from vocab_frequency
number_of_keys_to_remove = (
tf.shape(unpadded_keys)[0]
- self.internal_table_size
+ number_of_new_keys
)
number_of_keys_to_remove = tf.cast(number_of_keys_to_remove, tf.int32)
number_of_keys_to_remove = tf.maximum(number_of_keys_to_remove, 0)
# remove old keys
updated_keys, updated_values = self._remove_old_keys(
unpadded_keys,
unpadded_values,
number_of_keys_to_remove,
)
# add new keys
self._add_new_keys(
updated_keys,
updated_values,
unique,
new_keys,
)
return unique
def _remove_old_keys(self, unpadded_keys, unpadded_values, n):
"""remove old keys."""
updated_keys, updated_values = None, None
if self.eviction_policy == "LFU":
# LFU eviction
# negate the values of counts to find the lower n keys to remove
negative_count = tf.math.negative(unpadded_values)
# get index of lower n counts
_, lower_n_index = tf.nn.top_k(negative_count, k=n)
# gather keys that needs to be removed
keys_to_remove = tf.gather(unpadded_keys, lower_n_index)
# get masks for keys not present in inputs
mask = tf.reduce_all(
unpadded_keys[:, tf.newaxis] != keys_to_remove, axis=1
)
# updated keys and values with least frequent keys removed
updated_keys = tf.boolean_mask(
unpadded_keys,
mask,
)
updated_values = tf.boolean_mask(
unpadded_values,
mask,
)
return updated_keys, updated_values
def _add_new_keys(self, updated_keys, updated_values, unique, new_keys):
"""Add new keys and update internal vocabulary table."""
if self.eviction_policy == "LFU":
# increment values of old keys when present in current inputs
matches = tf.where(
tf.equal(tf.expand_dims(updated_keys, axis=1), unique)
)[:, 0]
updates = tf.ones_like(matches, dtype=tf.int32)
matches = tf.expand_dims(matches, axis=-1)
values_2 = tf.tensor_scatter_nd_add(
updated_values, matches, updates
)
# add new keys and corresponding values = 1
values_difference = tf.ones_like(new_keys.values, dtype=tf.int32)
# concatenate old keys and new keys and pad
updated_keys = pad_tensor(
tf.concat([updated_keys, new_keys.values], axis=0),
self.internal_table_size,
)
self.vocabulary_table_keys.assign(updated_keys)
# concatenate updated old values and new values and pad
updated_values = pad_tensor(
tf.concat([values_2, values_difference], axis=0),
self.internal_table_size,
)
self.vocabulary_table_values.assign(updated_values)
return unique
def get_top_vocabulary(self, k):
"""Get top k vocabulary keys."""
top_k_vocab = None
if self.eviction_policy == "LFU":
values_len = tf.shape(self.vocabulary_table_keys)[0]
if values_len > k:
_, indices = tf.nn.top_k(self.vocabulary_table_values, k=k)
else:
_, indices = tf.nn.top_k(
self.vocabulary_table_values, k=values_len
)
top_k_vocab = tf.gather(self.vocabulary_table_keys, indices)
return top_k_vocab
def get_vocabulary(self):
return self.vocabulary
def get_config(self):
config = super().get_config()
config.update(
{
"vocabulary_size": self.vocabulary_size,
"initial_vocabulary": self.initial_vocabulary.numpy().tolist(),
"eviction_policy": self.eviction_policy,
}
)
return config
def save_assets(self, dir_path):
vocabulary = self.vocabulary.numpy().tolist()
vocabulary_filepath = tf.io.gfile.join(dir_path, "vocabulary.txt")
with open(vocabulary_filepath, "w") as f:
f.write("\n".join([str(w) for w in vocabulary]))
def _generate_random_initial_vocab(
vocabulary_size, dtype
): # pylint: disable=no-self-argument
if dtype == tf.string:
chars = string.ascii_letters
random_strings = [
"".join([random.choice(chars) for _ in range(10)])
for _ in range(vocabulary_size)
]
random_vocab = tf.constant(random_strings, dtype=tf.string)
elif dtype == tf.int32:
random_vocab = tf.random.uniform(
shape=[vocabulary_size],
minval=0,
maxval=vocabulary_size,
dtype=tf.int32,
)
elif dtype == tf.int64:
random_vocab = tf.random.uniform(
shape=[vocabulary_size],
minval=0,
maxval=vocabulary_size,
dtype=tf.int64,
)
else:
raise ValueError(
"Supported dtype for initial vocabulary include `tf.int32`,"
" `tf.int64`, or `tf.string`. But got dtype = {}".format(dtype)
)
return random_vocab
def pad_tensor(tensor, n):
"""Pad a tensor to a fixed length."""
if tensor.dtype == tf.string:
padding = "unk"
else:
padding = -1
pad_length = tf.maximum(n - tf.shape(tensor)[0], 0)
padded_tensor = tf.pad(tensor, [[0, pad_length]], constant_values=padding)
return padded_tensor[:n]
def remove_padding(tensor):
"""Remove padding from a tensor."""
if tensor.dtype == tf.string:
padding = "unk"
else:
padding = -1
mask = tf.reshape(tensor != padding, shape=[-1])
return tf.boolean_mask(tensor, mask)
|
tf-keras/tf_keras/layers/experimental/dynamic_lookup.py/0
|
{
"file_path": "tf-keras/tf_keras/layers/experimental/dynamic_lookup.py",
"repo_id": "tf-keras",
"token_count": 6515
}
| 237 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Private base class for layers that can merge several inputs into one."""
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.engine.base_layer import Layer
from tf_keras.utils import tf_utils
class _Merge(Layer):
"""Generic merge layer for elementwise merge functions.
Used to implement `Sum`, `Average`, etc.
"""
def __init__(self, **kwargs):
"""Initializes a Merge layer.
Args:
**kwargs: standard layer keyword arguments.
"""
super().__init__(**kwargs)
self.supports_masking = True
def _merge_function(self, inputs):
raise NotImplementedError
def _compute_elemwise_op_output_shape(self, shape1, shape2):
"""Computes the shape of the resultant of an elementwise operation.
Args:
shape1: tuple or None. Shape of the first tensor
shape2: tuple or None. Shape of the second tensor
Returns:
expected output shape when an element-wise operation is
carried out on 2 tensors with shapes shape1 and shape2.
tuple or None.
Raises:
ValueError: if shape1 and shape2 are not compatible for
element-wise operations.
"""
if None in [shape1, shape2]:
return None
elif len(shape1) < len(shape2):
return self._compute_elemwise_op_output_shape(shape2, shape1)
elif not shape2:
return shape1
output_shape = list(shape1[: -len(shape2)])
for i, j in zip(shape1[-len(shape2) :], shape2):
if i is None or j is None:
output_shape.append(None)
elif i == 1:
output_shape.append(j)
elif j == 1:
output_shape.append(i)
else:
if i != j:
raise ValueError(
"Inputs have incompatible shapes. "
f"Received shapes {shape1} and {shape2}"
)
output_shape.append(i)
return tuple(output_shape)
@tf_utils.shape_type_conversion
def build(self, input_shape):
# Used purely for shape validation.
if not isinstance(input_shape[0], tuple):
raise ValueError(
"A merge layer should be called on a list of inputs. "
f"Received: input_shape={input_shape} (not a list of shapes)"
)
if len(input_shape) < 1:
raise ValueError(
"A merge layer should be called "
"on a list of at least 1 input. "
f"Got {len(input_shape)} inputs. "
f"Full input_shape received: {input_shape}"
)
batch_sizes = {s[0] for s in input_shape if s} - {None}
if len(batch_sizes) > 1:
raise ValueError(
"Cannot merge tensors with different batch sizes. "
f"Got tensors with shapes {input_shape}"
)
if input_shape[0] is None:
output_shape = None
else:
output_shape = input_shape[0][1:]
for i in range(1, len(input_shape)):
if input_shape[i] is None:
shape = None
else:
shape = input_shape[i][1:]
output_shape = self._compute_elemwise_op_output_shape(
output_shape, shape
)
# If the inputs have different ranks, we have to reshape them
# to make them broadcastable.
if None not in input_shape and len(set(map(len, input_shape))) == 1:
self._reshape_required = False
else:
self._reshape_required = True
def call(self, inputs):
if not isinstance(inputs, (list, tuple)):
raise ValueError(
"A merge layer should be called on a list of inputs. "
f"Received: inputs={inputs} (not a list of tensors)"
)
if self._reshape_required:
reshaped_inputs = []
input_ndims = list(map(backend.ndim, inputs))
if None not in input_ndims:
# If ranks of all inputs are available,
# we simply expand each of them at axis=1
# until all of them have the same rank.
max_ndim = max(input_ndims)
for x in inputs:
x_ndim = backend.ndim(x)
for _ in range(max_ndim - x_ndim):
x = tf.expand_dims(x, axis=1)
reshaped_inputs.append(x)
return self._merge_function(reshaped_inputs)
else:
# Transpose all inputs so that batch size is the last dimension.
# (batch_size, dim1, dim2, ... ) -> (dim1, dim2, ... ,
# batch_size)
transposed = False
for x in inputs:
x_ndim = backend.ndim(x)
if x_ndim is None:
x_shape = tf.shape(x)
batch_size = x_shape[0]
new_shape = backend.concatenate(
[x_shape[1:], tf.expand_dims(batch_size, axis=-1)]
)
x_transposed = tf.reshape(
x,
tf.stack(
[batch_size, tf.reduce_prod(x_shape[1:])],
axis=0,
),
)
x_transposed = tf.transpose(x_transposed, perm=(1, 0))
x_transposed = tf.reshape(x_transposed, new_shape)
reshaped_inputs.append(x_transposed)
transposed = True
elif x_ndim > 1:
dims = list(range(1, x_ndim)) + [0]
reshaped_inputs.append(tf.transpose(x, perm=dims))
transposed = True
else:
# We don't transpose inputs if they are 1D vectors or
# scalars.
reshaped_inputs.append(x)
y = self._merge_function(reshaped_inputs)
y_ndim = backend.ndim(y)
if transposed:
# If inputs have been transposed, we have to transpose the
# output too.
if y_ndim is None:
y_shape = tf.shape(y)
y_ndim = tf.shape(y_shape)[0]
batch_size = y_shape[y_ndim - 1]
new_shape = backend.concatenate(
[
tf.expand_dims(batch_size, axis=-1),
y_shape[: y_ndim - 1],
]
)
y = tf.reshape(y, (-1, batch_size))
y = tf.transpose(y, perm=(1, 0))
y = tf.reshape(y, new_shape)
elif y_ndim > 1:
dims = [y_ndim - 1] + list(range(y_ndim - 1))
y = tf.transpose(y, perm=dims)
return y
else:
return self._merge_function(inputs)
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if input_shape[0] is None:
output_shape = None
else:
output_shape = input_shape[0][1:]
for i in range(1, len(input_shape)):
if input_shape[i] is None:
shape = None
else:
shape = input_shape[i][1:]
output_shape = self._compute_elemwise_op_output_shape(
output_shape, shape
)
batch_sizes = {s[0] for s in input_shape if s is not None} - {None}
if len(batch_sizes) == 1:
output_shape = (list(batch_sizes)[0],) + output_shape
else:
output_shape = (None,) + output_shape
return output_shape
def compute_mask(self, inputs, mask=None):
if mask is None:
return None
if not isinstance(mask, (tuple, list)):
raise ValueError(f"`mask` should be a list. Received: mask={mask}")
if not isinstance(inputs, (tuple, list)):
raise ValueError(
f"`inputs` should be a list. Received: inputs={inputs}"
)
if len(mask) != len(inputs):
raise ValueError(
"The lists `inputs` and `mask` should have the same length. "
f"Received: inputs={inputs} of length {len(inputs)}, and "
f"mask={mask} of length {len(mask)}"
)
if all(m is None for m in mask):
return None
masks = [tf.expand_dims(m, axis=0) for m in mask if m is not None]
return backend.all(
backend.concatenate(masks, axis=0), axis=0, keepdims=False
)
def get_config(self):
return super().get_config()
|
tf-keras/tf_keras/layers/merging/base_merge.py/0
|
{
"file_path": "tf-keras/tf_keras/layers/merging/base_merge.py",
"repo_id": "tf-keras",
"token_count": 5144
}
| 238 |
# Copyright 2022 The TF-Keras Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras.initializers import Constant
from tf_keras.layers import GroupNormalization
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
def _build_group_normalization_model(norm):
model = keras.models.Sequential()
model.add(norm)
model.compile(
loss="mse",
optimizer="rmsprop",
run_eagerly=test_utils.should_run_eagerly(),
)
return model
@test_utils.run_v2_only
class GroupNormalizationTest(test_combinations.TestCase):
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_trainable_weights(self):
# Check if weights get initialized correctly
layer = GroupNormalization(groups=1, scale=False, center=False)
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.weights), 0)
# Check if weights get initialized correctly
layer = GroupNormalization(groups=1, scale=True, center=True)
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 2)
self.assertEqual(len(layer.weights), 2)
@test_combinations.run_all_keras_modes
def test_groupnorm(self):
test_utils.layer_test(
GroupNormalization,
kwargs={
"gamma_regularizer": keras.regularizers.l2(0.01),
"beta_regularizer": keras.regularizers.l2(0.01),
},
input_shape=(3, 4, 32),
)
test_utils.layer_test(
GroupNormalization,
kwargs={
"groups": 4,
"gamma_constraint": keras.constraints.UnitNorm(),
"beta_constraint": keras.constraints.UnitNorm(),
},
input_shape=(3, 4, 4),
)
@test_combinations.run_all_keras_modes
def test_correctness_1d(self):
layer_with_1_group = GroupNormalization(
groups=1, axis=-1, input_shape=(8,), scale=False, center=False
)
layer_with_2_groups = GroupNormalization(
groups=2, axis=1, input_shape=(8,), scale=False, center=False
)
inputs = tf.constant(
[-1.0, -1.0, 1.0, 1.0, 2.0, 2.0, 0, -2.0], shape=(1, 8)
)
expected_output_1_group = tf.constant(
[-0.898, -0.898, 0.539, 0.539, 1.257, 1.257, -0.180, -1.616],
shape=(1, 8),
)
self.assertAllClose(
_build_group_normalization_model(layer_with_1_group)(inputs),
expected_output_1_group,
atol=1e-3,
)
expected_output_2_groups = tf.constant(
[-1.0, -1.0, 1.0, 1.0, 0.904, 0.904, -0.301, -1.507], shape=(1, 8)
)
self.assertAllClose(
_build_group_normalization_model(layer_with_2_groups)(inputs),
expected_output_2_groups,
atol=1e-3,
)
@test_combinations.run_all_keras_modes
def test_correctness_1d_with_mask(self):
layer_with_1_group = GroupNormalization(
groups=1, axis=-1, input_shape=(8,), scale=False, center=False
)
layer_with_2_groups = GroupNormalization(
groups=2, axis=1, input_shape=(8,), scale=False, center=False
)
inputs = tf.constant(
[-1.0, -1.0, 1.0, 1.0, 2.0, 2.0, 0, -2.0], shape=(1, 8)
)
mask1 = tf.constant(
[1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=(1, 8)
)
mask2 = tf.constant(
[1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0], shape=(1, 8)
)
expected_output_1_group = tf.constant(
[-0.706, -0.706, 1.413, 1.413, 2.473, 2.473, 0.353, -1.766],
shape=(1, 8),
)
self.assertAllClose(
_build_group_normalization_model(layer_with_1_group)(
inputs, mask=mask1
),
expected_output_1_group,
atol=1e-3,
)
expected_output_2_groups = tf.constant(
[-1.0, -1.0, 1.0, 1.0, 0.999, 0.999, 0.0, -0.999], shape=(1, 8)
)
self.assertAllClose(
_build_group_normalization_model(layer_with_2_groups)(
inputs, mask=mask2
),
expected_output_2_groups,
atol=1e-3,
)
@test_combinations.run_all_keras_modes
def test_correctness_1d_with_non_binary_mask(self):
norm = GroupNormalization(
groups=1, axis=-1, input_shape=(8,), scale=False, center=False
)
inputs = tf.constant(
[-1.0, -1.0, 1.0, 1.0, 2.0, 2.0, 0, -2.0], shape=(1, 8)
)
mask = tf.constant(
[0.5, 0.5, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=(1, 8)
)
expected_output = tf.constant(
[-0.999, -0.999, 0.999, 0.999, 1.999, 1.999, 0.0, -1.999],
shape=(1, 8),
)
self.assertAllClose(
_build_group_normalization_model(norm)(inputs, mask=mask),
expected_output,
atol=1e-3,
)
@test_combinations.run_all_keras_modes
def test_correctness_2d(self):
layer_with_1_group = GroupNormalization(
groups=1, axis=-1, input_shape=(2, 4), scale=False, center=False
)
layer_with_2_groups = GroupNormalization(
groups=2, axis=2, input_shape=(2, 4), scale=False, center=False
)
inputs = tf.constant(
[[-1.0, -1.0, 2.0, 2.0], [1.0, 1.0, 0, -2.0]], shape=(1, 2, 4)
)
expected_output_1_group = tf.constant(
[[-0.898, -0.898, 1.257, 1.257], [0.539, 0.539, -0.180, -1.616]],
shape=(1, 2, 4),
)
self.assertAllClose(
_build_group_normalization_model(layer_with_1_group)(inputs),
expected_output_1_group,
atol=1e-3,
)
expected_output_2_groups = tf.constant(
[[-1.0, -1.0, 0.904, 0.904], [1.0, 1.0, -0.301, -1.507]],
shape=(1, 2, 4),
)
self.assertAllClose(
_build_group_normalization_model(layer_with_2_groups)(inputs),
expected_output_2_groups,
atol=1e-3,
)
@test_combinations.run_all_keras_modes
def test_correctness_2d_with_mask(self):
layer_with_1_group = GroupNormalization(
groups=1, axis=-1, input_shape=(2, 4), scale=False, center=False
)
layer_with_2_groups = GroupNormalization(
groups=2, axis=2, input_shape=(2, 4), scale=False, center=False
)
inputs = tf.constant(
[[-1.0, -1.0, 2.0, 2.0], [1.0, 1.0, 0, -2.0]], shape=(1, 2, 4)
)
mask1 = tf.constant(
[
[
1.0,
1.0,
0.0,
0.0,
],
[1.0, 0.0, 0.0, 0.0],
],
shape=(1, 2, 4),
)
mask2 = tf.constant(
[
[
1.0,
1.0,
0.0,
1.0,
],
[1.0, 1.0, 0.0, 1.0],
],
shape=(1, 2, 4),
)
expected_output_1_group = tf.constant(
[[-0.706, -0.706, 2.473, 2.473], [1.413, 1.413, 0.353, -1.766]],
shape=(1, 2, 4),
)
self.assertAllClose(
_build_group_normalization_model(layer_with_1_group)(
inputs, mask=mask1
),
expected_output_1_group,
atol=1e-3,
)
expected_output_2_groups = tf.constant(
[[-1.0, -1.0, 0.999, 0.999], [1.0, 1.0, 0.0, -0.999]],
shape=(1, 2, 4),
)
self.assertAllClose(
_build_group_normalization_model(layer_with_2_groups)(
inputs, mask=mask2
),
expected_output_2_groups,
atol=1e-3,
)
@test_combinations.run_all_keras_modes
def test_mask_broadcasting(self):
images = tf.ones((1, 2, 4, 3)) # NHWC
mask = tf.random.uniform((1, 2, 4, 1)) < 0.5 # NHWC
norm = GroupNormalization(
groups=3, axis=-1, input_shape=(2, 4, 9), scale=False, center=False
)
output = norm(images, mask=mask)
self.assertEqual(output.shape, (1, 2, 4, 3))
@test_combinations.run_all_keras_modes
def test_correctness_instance_norm(self):
instance_norm_layer = GroupNormalization(
groups=4, axis=-1, input_shape=(2, 4), scale=False, center=False
)
inputs = tf.constant(
[[-1.0, 1.0, 0, 2.0], [1.0, 3.0, -4, -2.0]], shape=(1, 2, 4)
)
expected_instance_norm_output = tf.constant(
[[-1.0, -1.0, 1.0, 1.0], [1.0, 1.0, -1.0, -1.0]], shape=(1, 2, 4)
)
self.assertAllClose(
_build_group_normalization_model(instance_norm_layer)(inputs),
expected_instance_norm_output,
atol=1e-3,
)
@test_combinations.run_all_keras_modes
def test_correctness_with_centering(self):
normalization_layer = GroupNormalization(
groups=2,
axis=-1,
input_shape=(8,),
scale=False,
center=True,
beta_initializer=Constant(10),
)
inputs = tf.constant(
[-1.0, -1.0, 1.0, 1.0, 2.0, 2.0, 0, -2.0], shape=(1, 8)
)
expected_output = tf.constant(
[9.0, 9.0, 11.0, 11.0, 10.904, 10.904, 9.699, 8.493], shape=(1, 8)
)
self.assertAllClose(
_build_group_normalization_model(normalization_layer)(inputs),
expected_output,
atol=1e-3,
)
@test_combinations.run_all_keras_modes
def test_correctness_with_scaling(self):
normalization_layer = GroupNormalization(
groups=2,
axis=-1,
input_shape=(8,),
scale=True,
center=False,
gamma_initializer=Constant(2),
)
inputs = tf.constant(
[-1.0, -1.0, 1.0, 1.0, 2.0, 2.0, 0, -2.0], shape=(1, 8)
)
expected_output = tf.constant(
[-2.0, -2.0, 2.0, 2.0, 1.809, 1.808, -0.602, -3.014], shape=(1, 8)
)
self.assertAllClose(
_build_group_normalization_model(normalization_layer)(inputs),
expected_output,
atol=1e-3,
)
def test_validates_groups_against_channels(self):
with self.assertRaisesRegex(
ValueError, r"must be a multiple of the number of channels"
):
norm = GroupNormalization(groups=3, axis=-1)
norm.build(input_shape=(2, 10))
with self.assertRaisesRegex(
ValueError, r"cannot be more than the number of channels"
):
norm = GroupNormalization(groups=32, axis=-1)
norm.build(input_shape=(2, 8))
def test_validates_known_number_of_channels(self):
with self.assertRaisesRegex(
ValueError, r"tensor should have a defined dimension"
):
norm = GroupNormalization(axis=-1)
norm.build(input_shape=(1, 32, None))
def test_rejects_invalid_axis(self):
with self.assertRaisesRegex(
ValueError, r"Invalid value for `axis` argument"
):
norm = GroupNormalization(axis=-4)
norm.build(input_shape=(64, 32, 32))
with self.assertRaisesRegex(
ValueError, r"Invalid value for `axis` argument"
):
norm = GroupNormalization(axis=3)
norm.build(input_shape=(64, 32, 32))
if __name__ == "__main__":
tf.test.main()
|
tf-keras/tf_keras/layers/normalization/group_normalization_test.py/0
|
{
"file_path": "tf-keras/tf_keras/layers/normalization/group_normalization_test.py",
"repo_id": "tf-keras",
"token_count": 6667
}
| 239 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Private base class for pooling 1D layers."""
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.engine.base_layer import Layer
from tf_keras.engine.input_spec import InputSpec
from tf_keras.utils import conv_utils
class Pooling1D(Layer):
"""Pooling layer for arbitrary pooling functions, for 1D inputs.
This class only exists for code reuse. It will never be an exposed API.
Args:
pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`.
pool_size: An integer or tuple/list of a single integer,
representing the size of the pooling window.
strides: An integer or tuple/list of a single integer, specifying the
strides of the pooling operation.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, steps, features)` while `channels_first`
corresponds to inputs with shape
`(batch, features, steps)`.
name: A string, the name of the layer.
"""
def __init__(
self,
pool_function,
pool_size,
strides,
padding="valid",
data_format="channels_last",
name=None,
**kwargs
):
super().__init__(name=name, **kwargs)
if data_format is None:
data_format = backend.image_data_format()
if strides is None:
strides = pool_size
self.pool_function = pool_function
self.pool_size = conv_utils.normalize_tuple(pool_size, 1, "pool_size")
self.strides = conv_utils.normalize_tuple(
strides, 1, "strides", allow_zero=True
)
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=3)
def call(self, inputs):
pad_axis = 2 if self.data_format == "channels_last" else 3
inputs = tf.expand_dims(inputs, pad_axis)
outputs = self.pool_function(
inputs,
self.pool_size + (1,),
strides=self.strides + (1,),
padding=self.padding,
data_format=self.data_format,
)
return tf.squeeze(outputs, pad_axis)
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
if self.data_format == "channels_first":
steps = input_shape[2]
features = input_shape[1]
else:
steps = input_shape[1]
features = input_shape[2]
length = conv_utils.conv_output_length(
steps, self.pool_size[0], self.padding, self.strides[0]
)
if self.data_format == "channels_first":
return tf.TensorShape([input_shape[0], features, length])
else:
return tf.TensorShape([input_shape[0], length, features])
def get_config(self):
config = {
"strides": self.strides,
"pool_size": self.pool_size,
"padding": self.padding,
"data_format": self.data_format,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
|
tf-keras/tf_keras/layers/pooling/base_pooling1d.py/0
|
{
"file_path": "tf-keras/tf_keras/layers/pooling/base_pooling1d.py",
"repo_id": "tf-keras",
"token_count": 1648
}
| 240 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark suite for KPL and feature column implementations."""
import itertools
import math
import random
import string
import time
import numpy as np
import tensorflow.compat.v2 as tf
import tf_keras as keras
class LayerBenchmark(tf.test.Benchmark):
"""Benchmark the layer forward pass."""
def report(self, name, keras_time, fc_time, iters):
"""Calculate and report benchmark statistics."""
extras = {
"fc_avg_time": fc_time,
"fc_vs_keras_sec": fc_time - keras_time,
"fc_vs_keras_pct": ((fc_time - keras_time) / fc_time) * 100,
"keras_faster_ratio": fc_time / keras_time,
}
self.report_benchmark(
iters=iters, wall_time=keras_time, extras=extras, name=name
)
class StepTimingCallback(keras.callbacks.Callback):
"""A callback that times non-warmup steps of a TF-Keras predict call."""
def __init__(self):
self.t0 = None
self.steps = 0
def on_predict_batch_begin(self, batch_index, _):
if batch_index == 2:
self.t0 = time.time()
elif batch_index > 2:
self.steps += 1
def on_predict_end(self, _):
self.tn = time.time()
self.t_avg = (self.tn - self.t0) / self.steps
def create_data(length, num_entries, max_value, dtype):
"""Create a ragged tensor with random data entries."""
lengths = (np.random.random(size=num_entries) * length).astype(int)
total_length = np.sum(lengths)
values = (np.random.random(size=total_length) * max_value).astype(dtype)
return tf.RaggedTensor.from_row_lengths(values, lengths)
def create_string_data(
length, num_entries, vocabulary, pct_oov, oov_string="__OOV__"
):
"""Create a ragged tensor with random data entries."""
lengths = (np.random.random(size=num_entries) * length).astype(int)
total_length = np.sum(lengths)
num_oovs = int(pct_oov * total_length)
values = []
for _ in range(total_length):
values.append(random.choice(vocabulary))
if pct_oov > 0:
oov_cadence = int(total_length / num_oovs)
idx = 0
for _ in range(num_oovs):
if idx < total_length:
values[idx] = oov_string
idx += oov_cadence
return tf.RaggedTensor.from_row_lengths(values, lengths)
def create_vocabulary(vocab_size):
base = len(string.ascii_letters)
n = math.ceil(math.log(vocab_size, base))
vocab = []
for i in range(1, n + 1):
for item in itertools.product(string.ascii_letters, repeat=i):
if len(vocab) >= vocab_size:
break
vocab.append("".join(item))
return vocab
def run_keras(data, model, batch_size, num_runs, steps_per_repeat=100):
"""Benchmark a TF-Keras model."""
ds = (
tf.data.Dataset.from_tensor_slices(data)
.repeat()
.prefetch(tf.data.AUTOTUNE)
.batch(batch_size)
.cache()
)
steps = 0
times = []
for _ in range(num_runs):
steps += steps_per_repeat
timer = StepTimingCallback()
# Benchmarked code begins here.
model.predict(ds, steps=steps, callbacks=[timer])
# Benchmarked code ends here.
times.append(timer.t_avg)
avg_time = np.mean(times)
return avg_time
def run_fc(data, fc_fn, batch_size, num_runs, steps_per_repeat=100):
"""Benchmark a Feature Column."""
ds = (
tf.data.Dataset.from_tensor_slices(data)
.repeat()
.prefetch(tf.data.AUTOTUNE)
.batch(batch_size)
.cache()
)
# Trace the fc_fn
ds_iter = ds.__iter__()
fc_fn(next(ds_iter))
fc_starts = []
fc_ends = []
for _ in range(num_runs):
fc_starts.append(time.time())
# Benchmarked code begins here.
for _ in range(steps_per_repeat):
_ = fc_fn(next(ds_iter))
# Benchmarked code ends here.
fc_ends.append(time.time())
avg_per_step_time = (
np.array(fc_ends) - np.array(fc_starts)
) / steps_per_repeat
avg_time = np.mean(avg_per_step_time)
return avg_time
|
tf-keras/tf_keras/layers/preprocessing/benchmarks/feature_column_benchmark.py/0
|
{
"file_path": "tf-keras/tf_keras/layers/preprocessing/benchmarks/feature_column_benchmark.py",
"repo_id": "tf-keras",
"token_count": 2064
}
| 241 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras hashing preprocessing layer."""
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.engine import base_layer
from tf_keras.layers.preprocessing import preprocessing_utils as utils
from tf_keras.utils import layer_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
INT = utils.INT
MULTI_HOT = utils.MULTI_HOT
ONE_HOT = utils.ONE_HOT
COUNT = utils.COUNT
@keras_export(
"keras.layers.Hashing", "keras.layers.experimental.preprocessing.Hashing"
)
class Hashing(base_layer.Layer):
"""A preprocessing layer which hashes and bins categorical features.
This layer transforms categorical inputs to hashed output. It element-wise
converts a ints or strings to ints in a fixed range. The stable hash
function uses `tensorflow::ops::Fingerprint` to produce the same output
consistently across all platforms.
This layer uses [FarmHash64](https://github.com/google/farmhash) by default,
which provides a consistent hashed output across different platforms and is
stable across invocations, regardless of device and context, by mixing the
input bits thoroughly.
If you want to obfuscate the hashed output, you can also pass a random
`salt` argument in the constructor. In that case, the layer will use the
[SipHash64](https://github.com/google/highwayhash) hash function, with
the `salt` value serving as additional input to the hash function.
For an overview and full list of preprocessing layers, see the preprocessing
[guide](https://www.tensorflow.org/guide/keras/preprocessing_layers).
**Example (FarmHash64)**
>>> layer = tf.keras.layers.Hashing(num_bins=3)
>>> inp = [['A'], ['B'], ['C'], ['D'], ['E']]
>>> layer(inp)
<tf.Tensor: shape=(5, 1), dtype=int64, numpy=
array([[1],
[0],
[1],
[1],
[2]])>
**Example (FarmHash64) with a mask value**
>>> layer = tf.keras.layers.Hashing(num_bins=3, mask_value='')
>>> inp = [['A'], ['B'], [''], ['C'], ['D']]
>>> layer(inp)
<tf.Tensor: shape=(5, 1), dtype=int64, numpy=
array([[1],
[1],
[0],
[2],
[2]])>
**Example (SipHash64)**
>>> layer = tf.keras.layers.Hashing(num_bins=3, salt=[133, 137])
>>> inp = [['A'], ['B'], ['C'], ['D'], ['E']]
>>> layer(inp)
<tf.Tensor: shape=(5, 1), dtype=int64, numpy=
array([[1],
[2],
[1],
[0],
[2]])>
**Example (Siphash64 with a single integer, same as `salt=[133, 133]`)**
>>> layer = tf.keras.layers.Hashing(num_bins=3, salt=133)
>>> inp = [['A'], ['B'], ['C'], ['D'], ['E']]
>>> layer(inp)
<tf.Tensor: shape=(5, 1), dtype=int64, numpy=
array([[0],
[0],
[2],
[1],
[0]])>
Args:
num_bins: Number of hash bins. Note that this includes the `mask_value`
bin, so the effective number of bins is `(num_bins - 1)` if `mask_value`
is set.
mask_value: A value that represents masked inputs, which are mapped to
index 0. `None` means no mask term will be added and the
hashing will start at index 0. Defaults to `None`.
salt: A single unsigned integer or None.
If passed, the hash function used will be SipHash64, with these values
used as an additional input (known as a "salt" in cryptography).
These should be non-zero. If `None`, uses the FarmHash64 hash function.
It also supports tuple/list of 2 unsigned integer numbers, see
reference paper for details. Defaults to `None`.
output_mode: Specification for the output of the layer. Values can bes
`"int"`, `"one_hot"`, `"multi_hot"`, or
`"count"` configuring the layer as follows:
- `"int"`: Return the integer bin indices directly.
- `"one_hot"`: Encodes each individual element in the input into an
array the same size as `num_bins`, containing a 1 at the input's bin
index. If the last dimension is size 1, will encode on that
dimension. If the last dimension is not size 1, will append a new
dimension for the encoded output.
- `"multi_hot"`: Encodes each sample in the input into a single array
the same size as `num_bins`, containing a 1 for each bin index
index present in the sample. Treats the last dimension as the sample
dimension, if input shape is `(..., sample_length)`, output shape
will be `(..., num_tokens)`.
- `"count"`: As `"multi_hot"`, but the int array contains a count of
the number of times the bin index appeared in the sample.
Defaults to `"int"`.
sparse: Boolean. Only applicable to `"one_hot"`, `"multi_hot"`,
and `"count"` output modes. If True, returns a `SparseTensor` instead of
a dense `Tensor`. Defaults to `False`.
**kwargs: Keyword arguments to construct a layer.
Input shape:
A single or list of string, int32 or int64 `Tensor`,
`SparseTensor` or `RaggedTensor` of shape `(batch_size, ...,)`
Output shape:
An int64 `Tensor`, `SparseTensor` or `RaggedTensor` of shape
`(batch_size, ...)`. If any input is `RaggedTensor` then output is
`RaggedTensor`, otherwise if any input is `SparseTensor` then output is
`SparseTensor`, otherwise the output is `Tensor`.
Reference:
- [SipHash with salt](https://www.131002.net/siphash/siphash.pdf)
"""
def __init__(
self,
num_bins,
mask_value=None,
salt=None,
output_mode="int",
sparse=False,
**kwargs,
):
if num_bins is None or num_bins <= 0:
raise ValueError(
"The `num_bins` for `Hashing` cannot be `None` or "
f"non-positive values. Received: num_bins={num_bins}."
)
# By default, output int64 when output_mode='int' and floats otherwise.
if "dtype" not in kwargs or kwargs["dtype"] is None:
kwargs["dtype"] = (
tf.int64 if output_mode == INT else backend.floatx()
)
elif (
output_mode == "int" and not tf.as_dtype(kwargs["dtype"]).is_integer
):
# Compat for when dtype was always floating and ignored by the
# layer.
kwargs["dtype"] = tf.int64
super().__init__(**kwargs)
# Check dtype only after base layer parses it; dtype parsing is complex.
if (
output_mode == INT
and not tf.as_dtype(self.compute_dtype).is_integer
):
input_dtype = kwargs["dtype"]
raise ValueError(
'When `output_mode="int"`, `dtype` should be an integer '
f"type. Received: dtype={input_dtype}"
)
# 'output_mode' must be one of (INT, ONE_HOT, MULTI_HOT, COUNT)
layer_utils.validate_string_arg(
output_mode,
allowable_strings=(INT, ONE_HOT, MULTI_HOT, COUNT),
layer_name=self.__class__.__name__,
arg_name="output_mode",
)
if sparse and output_mode == INT:
raise ValueError(
"`sparse` may only be true if `output_mode` is "
'`"one_hot"`, `"multi_hot"`, or `"count"`. '
f"Received: sparse={sparse} and "
f"output_mode={output_mode}"
)
self.num_bins = num_bins
self.mask_value = mask_value
self.strong_hash = True if salt is not None else False
self.output_mode = output_mode
self.sparse = sparse
self.salt = None
if salt is not None:
if isinstance(salt, (tuple, list)) and len(salt) == 2:
self.salt = salt
elif isinstance(salt, int):
self.salt = [salt, salt]
else:
raise ValueError(
"The `salt` argument for `Hashing` can only be a tuple of "
"size 2 integers, or a single integer. "
f"Received: salt={salt}."
)
def call(self, inputs):
inputs = utils.ensure_tensor(inputs)
if isinstance(inputs, tf.SparseTensor):
indices = tf.SparseTensor(
indices=inputs.indices,
values=self._hash_values_to_bins(inputs.values),
dense_shape=inputs.dense_shape,
)
else:
indices = self._hash_values_to_bins(inputs)
return utils.encode_categorical_inputs(
indices,
output_mode=self.output_mode,
depth=self.num_bins,
sparse=self.sparse,
dtype=self.compute_dtype,
)
def _hash_values_to_bins(self, values):
"""Converts a non-sparse tensor of values to bin indices."""
hash_bins = self.num_bins
mask = None
# If mask_value is set, the zeroth bin is reserved for it.
if self.mask_value is not None and hash_bins > 1:
hash_bins -= 1
mask = tf.equal(values, self.mask_value)
# Convert all values to strings before hashing.
if values.dtype.is_integer:
values = tf.as_string(values)
# Hash the strings.
if self.strong_hash:
values = tf.strings.to_hash_bucket_strong(
values, hash_bins, name="hash", key=self.salt
)
else:
values = tf.strings.to_hash_bucket_fast(
values, hash_bins, name="hash"
)
if mask is not None:
values = tf.add(values, tf.ones_like(values))
values = tf.where(mask, tf.zeros_like(values), values)
return values
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_signature(self, input_spec):
output_shape = self.compute_output_shape(input_spec.shape)
if isinstance(input_spec, tf.SparseTensorSpec):
return tf.SparseTensorSpec(
shape=output_shape, dtype=self.compute_dtype
)
else:
return tf.TensorSpec(shape=output_shape, dtype=self.compute_dtype)
def get_config(self):
config = super().get_config()
config.update(
{
"num_bins": self.num_bins,
"salt": self.salt,
"mask_value": self.mask_value,
"output_mode": self.output_mode,
"sparse": self.sparse,
}
)
return config
|
tf-keras/tf_keras/layers/preprocessing/hashing.py/0
|
{
"file_path": "tf-keras/tf_keras/layers/preprocessing/hashing.py",
"repo_id": "tf-keras",
"token_count": 5081
}
| 242 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.