content_type
stringclasses 8
values | main_lang
stringclasses 7
values | message
stringlengths 1
50
| sha
stringlengths 40
40
| patch
stringlengths 52
962k
| file_count
int64 1
300
|
---|---|---|---|---|---|
Python | Python | add tests for individual spacy layers | 6ccacff54e4c279c0b37652119bd507ee466a5df | <ide><path>spacy/tests/pipeline/test_models.py
<ide> from typing import List
<add>
<add>import numpy
<ide> import pytest
<del>from numpy.testing import assert_equal
<add>from numpy.testing import assert_almost_equal
<add>from spacy.vocab import Vocab
<ide> from thinc.api import get_current_ops, Model, data_validation
<del>from thinc.types import Array2d
<add>from thinc.types import Array2d, Ragged
<ide>
<ide> from spacy.lang.en import English
<add>from spacy.ml import FeatureExtractor, StaticVectors
<add>from spacy.ml._character_embed import CharacterEmbed
<ide> from spacy.tokens import Doc
<ide>
<ide> OPS = get_current_ops()
<ide>
<del>texts = ["These are 4 words", "These just three"]
<add>texts = ["These are 4 words", "Here just three"]
<ide> l0 = [[1, 2], [3, 4], [5, 6], [7, 8]]
<ide> l1 = [[9, 8], [7, 6], [5, 4]]
<del>out_list = [OPS.xp.asarray(l0, dtype="f"), OPS.xp.asarray(l1, dtype="f")]
<del>a1 = OPS.xp.asarray(l1, dtype="f")
<add>list_floats = [OPS.xp.asarray(l0, dtype="f"), OPS.xp.asarray(l1, dtype="f")]
<add>list_ints = [OPS.xp.asarray(l0, dtype="i"), OPS.xp.asarray(l1, dtype="i")]
<add>array = OPS.xp.asarray(l1, dtype="f")
<add>ragged = Ragged(array, OPS.xp.asarray([2, 1], dtype="i"))
<add>
<add>
<add>def get_docs():
<add> vocab = Vocab()
<add> for t in texts:
<add> for word in t.split():
<add> hash_id = vocab.strings.add(word)
<add> vector = numpy.random.uniform(-1, 1, (7,))
<add> vocab.set_vector(hash_id, vector)
<add> docs = [English(vocab)(t) for t in texts]
<add> return docs
<add>
<ide>
<ide> # Test components with a model of type Model[List[Doc], List[Floats2d]]
<ide> @pytest.mark.parametrize("name", ["tagger", "tok2vec", "morphologizer", "senter"])
<del>def test_layers_batching_all_list(name):
<add>def test_components_batching_list(name):
<ide> nlp = English()
<del> in_data = [nlp(text) for text in texts]
<ide> proc = nlp.create_pipe(name)
<del> util_batch_unbatch_List(proc.model, in_data, out_list)
<add> util_batch_unbatch_List(proc.model, get_docs(), list_floats)
<ide>
<del>def util_batch_unbatch_List(model: Model[List[Doc], List[Array2d]], in_data: List[Doc], out_data: List[Array2d]):
<del> with data_validation(True):
<del> model.initialize(in_data, out_data)
<del> Y_batched = model.predict(in_data)
<del> Y_not_batched = [model.predict([u])[0] for u in in_data]
<del> assert_equal(Y_batched, Y_not_batched)
<ide>
<ide> # Test components with a model of type Model[List[Doc], Floats2d]
<ide> @pytest.mark.parametrize("name", ["textcat"])
<del>def test_layers_batching_all_array(name):
<add>def test_components_batching_array(name):
<ide> nlp = English()
<ide> in_data = [nlp(text) for text in texts]
<ide> proc = nlp.create_pipe(name)
<del> util_batch_unbatch_Array(proc.model, in_data, a1)
<add> util_batch_unbatch_Array(proc.model, get_docs(), array)
<add>
<add>
<add>LAYERS = [
<add> (CharacterEmbed(nM=5, nC=3), get_docs(), list_floats),
<add> (FeatureExtractor([100, 200]), get_docs(), list_ints),
<add> (StaticVectors(), get_docs(), ragged),
<add>]
<add>
<add>
<add>@pytest.mark.parametrize("model,in_data,out_data", LAYERS)
<add>def test_layers_batching_all(model, in_data, out_data):
<add> # In = List[Doc]
<add> if isinstance(in_data, list) and isinstance(in_data[0], Doc):
<add> if isinstance(out_data, OPS.xp.ndarray) and out_data.ndim == 2:
<add> util_batch_unbatch_Array(model, in_data, out_data)
<add> elif (
<add> isinstance(out_data, list)
<add> and isinstance(out_data[0], OPS.xp.ndarray)
<add> and out_data[0].ndim == 2
<add> ):
<add> util_batch_unbatch_List(model, in_data, out_data)
<add> elif isinstance(out_data, Ragged):
<add> util_batch_unbatch_Ragged(model, in_data, out_data)
<ide>
<del>def util_batch_unbatch_Array(model: Model[List[Doc], Array2d], in_data: List[Doc], out_data: Array2d):
<add>
<add>
<add>def util_batch_unbatch_List(
<add> model: Model[List[Doc], List[Array2d]], in_data: List[Doc], out_data: List[Array2d]
<add>):
<ide> with data_validation(True):
<ide> model.initialize(in_data, out_data)
<ide> Y_batched = model.predict(in_data)
<ide> Y_not_batched = [model.predict([u])[0] for u in in_data]
<del> assert_equal(Y_batched, Y_not_batched)
<ide>\ No newline at end of file
<add> for i in range(len(Y_batched)):
<add> assert_almost_equal(Y_batched[i], Y_not_batched[i], decimal=4)
<add>
<add>
<add>def util_batch_unbatch_Array(
<add> model: Model[List[Doc], Array2d], in_data: List[Doc], out_data: Array2d
<add>):
<add> with data_validation(True):
<add> model.initialize(in_data, out_data)
<add> Y_batched = model.predict(in_data).tolist()
<add> Y_not_batched = [model.predict([u])[0] for u in in_data]
<add> assert_almost_equal(Y_batched, Y_not_batched, decimal=4)
<add>
<add>
<add>def util_batch_unbatch_Ragged(
<add> model: Model[List[Doc], Ragged], in_data: List[Doc], out_data: Ragged
<add>):
<add> with data_validation(True):
<add> model.initialize(in_data, out_data)
<add> Y_batched = model.predict(in_data)
<add> Y_not_batched = []
<add> for u in in_data:
<add> Y_not_batched.extend(model.predict([u]).data.tolist())
<add> assert_almost_equal(Y_batched.data, Y_not_batched, decimal=4) | 1 |
Javascript | Javascript | cache the replaced source for performance | 8a7b508165ee05bb01f64a9c4e3bb7d63c471443 | <ide><path>lib/NormalModule.js
<ide> NormalModule.prototype.build = function build(options, compilation, resolver, fs
<ide> this.dependencies.length = 0;
<ide> this.variables.length = 0;
<ide> this.blocks.length = 0;
<add> this._cachedSource = null;
<ide> if(options.module && options.module.noParse) {
<ide> if(Array.isArray(options.module.noParse)) {
<ide> if(options.module.noParse.some(function(regExp) {
<ide> NormalModule.prototype.build = function build(options, compilation, resolver, fs
<ide> };
<ide>
<ide> NormalModule.prototype.source = function(dependencyTemplates, outputOptions, requestShortener) {
<add> if(this._cachedSource) return this._cachedSource;
<ide> var _source = this._source;
<ide> if(!_source) return new RawSource("throw new Error('No source availible');");
<del> var source = new ReplaceSource(_source);
<add> var source = this._cachedSource = new ReplaceSource(_source);
<ide> var topLevelBlock = this;
<ide> function doDep(dep) {
<ide> var template = dependencyTemplates.get(dep.Class); | 1 |
Javascript | Javascript | specify vendored packages in ember's brocfile | 00ad738432088ab02eba822b5d55137b9e138ffd | <ide><path>Brocfile.js
<ide> var EmberBuild = require('emberjs-build');
<ide> var packages = require('./lib/packages');
<ide>
<add>var vendoredPackage = require('emberjs-build/lib/vendored-package');
<add>var htmlbarsPackage = require('emberjs-build/lib/htmlbars-package');
<add>var vendoredES6Package = require('emberjs-build/lib/es6-vendored-package');
<add>
<ide> var emberBuild = new EmberBuild({
<ide> htmlbars: require('htmlbars'),
<del> packages: packages
<add> packages: packages,
<add> _vendoredPackages: {
<add> 'loader': vendoredPackage('loader'),
<add> 'rsvp': vendoredES6Package('rsvp'),
<add> 'backburner': vendoredES6Package('backburner'),
<add> 'router': vendoredES6Package('router.js'),
<add> 'dag-map': vendoredES6Package('dag-map'),
<add> 'route-recognizer': htmlbarsPackage('route-recognizer', { libPath: 'node_modules/route-recognizer/dist/es6/' }),
<add> 'dom-helper': htmlbarsPackage('dom-helper'),
<add> 'morph-range': htmlbarsPackage('morph-range'),
<add> 'morph-attr': htmlbarsPackage('morph-attr'),
<add> 'htmlbars-compiler': htmlbarsPackage('htmlbars-compiler'),
<add> 'htmlbars-syntax': htmlbarsPackage('htmlbars-syntax'),
<add> 'simple-html-tokenizer': htmlbarsPackage('simple-html-tokenizer'),
<add> 'htmlbars-test-helpers': htmlbarsPackage('htmlbars-test-helpers', { singleFile: true }),
<add> 'htmlbars-util': htmlbarsPackage('htmlbars-util')
<add> }
<ide> });
<ide>
<ide> module.exports = emberBuild.getDistTrees(); | 1 |
PHP | PHP | start removal of magin "test_suite" connection | 9d2e4b1b0449adfe6f74ea647f84833ce9c23df0 | <ide><path>cake/tests/lib/cake_fixture_manager.php
<ide> protected function _initDb() {
<ide> $testDbAvailable = $db->isConnected();
<ide> }
<ide>
<del> // Try for default DB
<ide> if (!$testDbAvailable) {
<del> $db = ConnectionManager::getDataSource('default');
<del> $_prefix = $db->config['prefix'];
<del> $db->config['prefix'] = 'test_suite_';
<add> throw new MissingConnectionException(__('You need to create a $test datasource connection to start using fixtures'));
<ide> }
<ide>
<del> ConnectionManager::create('test_suite', $db->config);
<del> $db->config['prefix'] = $_prefix;
<del>
<del> // Get db connection
<del> $this->_db = ConnectionManager::getDataSource('test_suite');
<del> $this->_db->cacheSources = false;
<del>
<del> ClassRegistry::config(array('ds' => 'test_suite'));
<add> $this->_db = $db;
<add> ClassRegistry::config(array('ds' => 'test'));
<ide> $this->_initialized = true;
<ide> }
<ide>
<ide><path>cake/tests/lib/cake_test_model.php
<ide> * @subpackage cake.cake.tests.lib
<ide> */
<ide> class CakeTestModel extends Model {
<del> public $useDbConfig = 'test_suite';
<add> public $useDbConfig = 'test';
<ide> public $cacheSources = false;
<ide> }
<ide><path>cake/tests/lib/cake_test_suite_dispatcher.php
<ide> function _parseParams() {
<ide> * @return void
<ide> */
<ide> function _runTestCase() {
<del> $Reporter = CakeTestSuiteDispatcher::getReporter();
<del> return $this->Manager->runTestCase($this->params['case'], $Reporter, $this->params['codeCoverage']);
<add> try {
<add> $Reporter = CakeTestSuiteDispatcher::getReporter();
<add> return $this->Manager->runTestCase($this->params['case'], $Reporter, $this->params['codeCoverage']);
<add> } catch (MissingConnectionException $exception) {
<add> ob_end_clean();
<add> $baseDir = $this->_baseDir;
<add> include CAKE_TESTS_LIB . 'templates' . DS . 'missing_conenction.php';
<add> exit();
<add> }
<ide> }
<ide> }
<ide><path>cake/tests/lib/templates/missing_conenction.php
<add><?php
<add>/**
<add> * Missing Connection error page
<add> *
<add> * PHP versions 4 and 5
<add> *
<add> * CakePHP(tm) Tests <http://book.cakephp.org/view/1196/Testing>
<add> * Copyright 2005-2009, Cake Software Foundation, Inc. (http://cakefoundation.org)
<add> *
<add> * Licensed under The Open Group Test Suite License
<add> * Redistributions of files must retain the above copyright notice.
<add> *
<add> * @copyright Copyright 2005-2009, Cake Software Foundation, Inc. (http://cakefoundation.org)
<add> * @link http://book.cakephp.org/view/1196/Testing CakePHP(tm) Tests
<add> * @package cake
<add> * @subpackage cake.cake.tests.libs
<add> * @since CakePHP(tm) v 1.2.0.4433
<add> * @license http://www.opensource.org/licenses/opengroup.php The Open Group Test Suite License
<add> */
<add>?>
<add><?php include dirname(__FILE__) . DS . 'header.php'; ?>
<add><div id="content">
<add> <h2>Missing Test Database Connection</h2>
<add> <h3><?php echo $exception->getMessage(); ?></h3>
<add> <pre><?php echo $exception->getTraceAsString(); ?></pre>
<add></div>
<add><?php include dirname(__FILE__) . DS . 'footer.php'; ?>
<ide>\ No newline at end of file | 4 |
Python | Python | improve module comment | a720b41ffaa78d82faab844bed58bd6397fbf669 | <ide><path>celery/task/__init__.py
<ide> celery.task
<ide> ~~~~~~~~~~~
<ide>
<del> This is the old task module, it should not be used anymore.
<add> This is the old task module, it should not be used anymore,
<add> import from the main 'celery' module instead.
<add> If you're looking for the decorator implementation then that's in
<add> ``celery.app.base.Celery.task``.
<ide>
<ide> """
<ide> from __future__ import absolute_import | 1 |
Java | Java | replace use of jdk 1.8 api | 24dfe8ec19e90b4fae1eb47de9cd0fa3b98687fc | <ide><path>spring-webmvc/src/main/java/org/springframework/web/servlet/mvc/method/annotation/MvcUriComponentsBuilder.java
<ide> private static UriComponents applyContributors(UriComponentsBuilder builder, Met
<ide> contributor = defaultUriComponentsContributor;
<ide> }
<ide>
<del> int paramCount = method.getParameterCount();
<add> int paramCount = method.getParameterTypes().length;
<ide> int argCount = args.length;
<ide>
<ide> Assert.isTrue(paramCount == argCount, "Number of method parameters " + paramCount + | 1 |
Ruby | Ruby | remove default_formula from mpidependency | 404845eaeb8e083a306e9b747c8a65b168abae9c | <ide><path>Library/Homebrew/requirements/mpi_dependency.rb
<ide> class MPIDependency < Requirement
<ide>
<ide> fatal true
<ide>
<del> default_formula 'open-mpi'
<del>
<ide> env :userpaths
<ide>
<ide> def initialize *lang_list | 1 |
Javascript | Javascript | expose force.tick. fixes #419 | 56818fd258c2c9c682c869b15428faf7b266786b | <ide><path>d3.layout.js
<ide> d3.layout.force = function() {
<ide> };
<ide> }
<ide>
<del> function tick() {
<add> force.tick = function() {
<ide> var n = nodes.length,
<ide> m = links.length,
<ide> q,
<ide> d3.layout.force = function() {
<ide>
<ide> // simulated annealing, basically
<ide> return (alpha *= .99) < .005;
<del> }
<add> };
<ide>
<ide> force.nodes = function(x) {
<ide> if (!arguments.length) return nodes;
<ide> d3.layout.force = function() {
<ide>
<ide> force.resume = function() {
<ide> alpha = .1;
<del> d3.timer(tick);
<add> d3.timer(force.tick);
<ide> return force;
<ide> };
<ide>
<ide><path>d3.layout.min.js
<del>(function(){function a(a){var b=a.source,d=a.target,e=c(b,d),f=[b];while(b!==e)b=b.parent,f.push(b);var g=f.length;while(d!==e)f.splice(g,0,d),d=d.parent;return f}function b(a){var b=[],c=a.parent;while(c!=null)b.push(a),a=c,c=c.parent;return b.push(a),b}function c(a,c){if(a===c)return a;var d=b(a),e=b(c),f=d.pop(),g=e.pop(),h=null;while(f===g)h=f,f=d.pop(),g=e.pop();return h}function g(a){a.fixed|=2}function h(a){a!==f&&(a.fixed&=1)}function i(){j(),f.fixed&=1,e=f=null}function j(){f.px=d3.event.x,f.py=d3.event.y,e.resume()}function k(a,b,c){var d=0,e=0;a.charge=0;if(!a.leaf){var f=a.nodes,g=f.length,h=-1,i;while(++h<g){i=f[h];if(i==null)continue;k(i,b,c),a.charge+=i.charge,d+=i.charge*i.cx,e+=i.charge*i.cy}}if(a.point){a.leaf||(a.point.x+=Math.random()-.5,a.point.y+=Math.random()-.5);var j=b*c[a.point.index];a.charge+=a.pointCharge=j,d+=j*a.point.x,e+=j*a.point.y}a.cx=d/a.charge,a.cy=e/a.charge}function l(a){return 20}function m(a){return 1}function o(a){return a.x}function p(a){return a.y}function q(a,b,c){a.y0=b,a.y=c}function t(a){var b=1,c=0,d=a[0][1],e,f=a.length;for(;b<f;++b)(e=a[b][1])>d&&(c=b,d=e);return c}function u(a){return a.reduce(v,0)}function v(a,b){return a+b[1]}function w(a,b){return x(a,Math.ceil(Math.log(b.length)/Math.LN2+1))}function x(a,b){var c=-1,d=+a[0],e=(a[1]-d)/b,f=[];while(++c<=b)f[c]=e*c+d;return f}function y(a){return[d3.min(a),d3.max(a)]}function z(a,b){return d3.rebind(a,b,"sort","children","value"),a.links=D,a.nodes=function(b){return E=!0,(a.nodes=a)(b)},a}function A(a){return a.children}function B(a){return a.value}function C(a,b){return b.value-a.value}function D(a){return d3.merge(a.map(function(a){return(a.children||[]).map(function(b){return{source:a,target:b}})}))}function F(a,b){return a.value-b.value}function G(a,b){var c=a._pack_next;a._pack_next=b,b._pack_prev=a,b._pack_next=c,c._pack_prev=b}function H(a,b){a._pack_next=b,b._pack_prev=a}function I(a,b){var c=b.x-a.x,d=b.y-a.y,e=a.r+b.r;return e*e-c*c-d*d>.001}function J(a){function l(a){b=Math.min(a.x-a.r,b),c=Math.max(a.x+a.r,c),d=Math.min(a.y-a.r,d),e=Math.max(a.y+a.r,e)}var b=Infinity,c=-Infinity,d=Infinity,e=-Infinity,f=a.length,g,h,i,j,k;a.forEach(K),g=a[0],g.x=-g.r,g.y=0,l(g);if(f>1){h=a[1],h.x=h.r,h.y=0,l(h);if(f>2){i=a[2],O(g,h,i),l(i),G(g,i),g._pack_prev=i,G(i,h),h=g._pack_next;for(var m=3;m<f;m++){O(g,h,i=a[m]);var n=0,o=1,p=1;for(j=h._pack_next;j!==h;j=j._pack_next,o++)if(I(j,i)){n=1;break}if(n==1)for(k=g._pack_prev;k!==j._pack_prev;k=k._pack_prev,p++)if(I(k,i))break;n?(o<p||o==p&&h.r<g.r?H(g,h=j):H(g=k,h),m--):(G(g,i),h=i,l(i))}}}var q=(b+c)/2,r=(d+e)/2,s=0;for(var m=0;m<f;m++){var t=a[m];t.x-=q,t.y-=r,s=Math.max(s,t.r+Math.sqrt(t.x*t.x+t.y*t.y))}return a.forEach(L),s}function K(a){a._pack_next=a._pack_prev=a}function L(a){delete a._pack_next,delete a._pack_prev}function M(a){var b=a.children;b&&b.length?(b.forEach(M),a.r=J(b)):a.r=Math.sqrt(a.value)}function N(a,b,c,d){var e=a.children;a.x=b+=d*a.x,a.y=c+=d*a.y,a.r*=d;if(e){var f=-1,g=e.length;while(++f<g)N(e[f],b,c,d)}}function O(a,b,c){var d=a.r+c.r,e=b.x-a.x,f=b.y-a.y;if(d&&(e||f)){var g=b.r+c.r,h=Math.sqrt(e*e+f*f),i=Math.max(-1,Math.min(1,(d*d+h*h-g*g)/(2*d*h))),j=Math.acos(i),k=i*(d/=h),l=Math.sin(j)*d;c.x=a.x+k*e+l*f,c.y=a.y+k*f-l*e}else c.x=a.x+d,c.y=a.y}function P(a){return 1+d3.max(a,function(a){return a.y})}function Q(a){return a.reduce(function(a,b){return a+b.x},0)/a.length}function R(a){var b=a.children;return b&&b.length?R(b[0]):a}function S(a){var b=a.children,c;return b&&(c=b.length)?S(b[c-1]):a}function T(a,b){return a.parent==b.parent?1:2}function U(a){var b=a.children;return b&&b.length?b[0]:a._tree.thread}function V(a){var b=a.children,c;return b&&(c=b.length)?b[c-1]:a._tree.thread}function W(a,b){var c=a.children;if(c&&(e=c.length)){var d,e,f=-1;while(++f<e)b(d=W(c[f],b),a)>0&&(a=d)}return a}function X(a,b){return a.x-b.x}function Y(a,b){return b.x-a.x}function Z(a,b){return a.depth-b.depth}function $(a,b){function c(a,d){var e=a.children;if(e&&(i=e.length)){var f,g=null,h=-1,i;while(++h<i)f=e[h],c(f,g),g=f}b(a,d)}c(a,null)}function _(a){var b=0,c=0,d=a.children,e=d.length,f;while(--e>=0)f=d[e]._tree,f.prelim+=b,f.mod+=b,b+=f.shift+(c+=f.change)}function ba(a,b,c){a=a._tree,b=b._tree;var d=c/(b.number-a.number);a.change+=d,b.change-=d,b.shift+=c,b.prelim+=c,b.mod+=c}function bb(a,b,c){return a._tree.ancestor.parent==b.parent?a._tree.ancestor:c}function bc(a){return{x:a.x,y:a.y,dx:a.dx,dy:a.dy}}function bd(a,b){var c=a.x+b[3],d=a.y+b[0],e=a.dx-b[1]-b[3],f=a.dy-b[0]-b[2];return e<0&&(c+=e/2,e=0),f<0&&(d+=f/2,f=0),{x:c,y:d,dx:e,dy:f}}d3.layout={},d3.layout.bundle=function(){return function(b){var c=[],d=-1,e=b.length;while(++d<e)c.push(a(b[d]));return c}},d3.layout.chord=function(){function j(){var a={},j=[],l=d3.range(e),m=[],n,o,p,q,r;b=[],c=[],n=0,q=-1;while(++q<e){o=0,r=-1;while(++r<e)o+=d[q][r];j.push(o),m.push(d3.range(e)),n+=o}g&&l.sort(function(a,b){return g(j[a],j[b])}),h&&m.forEach(function(a,b){a.sort(function(a,c){return h(d[b][a],d[b][c])})}),n=(2*Math.PI-f*e)/n,o=0,q=-1;while(++q<e){p=o,r=-1;while(++r<e){var s=l[q],t=m[s][r],u=d[s][t],v=o,w=o+=u*n;a[s+"-"+t]={index:s,subindex:t,startAngle:v,endAngle:w,value:u}}c.push({index:s,startAngle:p,endAngle:o,value:(o-p)/n}),o+=f}q=-1;while(++q<e){r=q-1;while(++r<e){var x=a[q+"-"+r],y=a[r+"-"+q];(x.value||y.value)&&b.push(x.value<y.value?{source:y,target:x}:{source:x,target:y})}}i&&k()}function k(){b.sort(function(a,b){return i((a.source.value+a.target.value)/2,(b.source.value+b.target.value)/2)})}var a={},b,c,d,e,f=0,g,h,i;return a.matrix=function(f){return arguments.length?(e=(d=f)&&d.length,b=c=null,a):d},a.padding=function(d){return arguments.length?(f=d,b=c=null,a):f},a.sortGroups=function(d){return arguments.length?(g=d,b=c=null,a):g},a.sortSubgroups=function(c){return arguments.length?(h=c,b=null,a):h},a.sortChords=function(c){return arguments.length?(i=c,b&&k(),a):i},a.chords=function(){return b||j(),b},a.groups=function(){return c||j(),c},a},d3.layout.force=function(){function A(a){return function(b,c,d,e,f){if(b.point!==a){var g=b.cx-a.x,h=b.cy-a.y,i=1/Math.sqrt(g*g+h*h);if((e-c)*i<t){var j=b.charge*i*i;return a.px-=g*j,a.py-=h*j,!0}if(b.point&&isFinite(i)){var j=b.pointCharge*i*i;a.px-=g*j,a.py-=h*j}}return!b.charge}}function B(){var a=v.length,d=w.length,e,f,g,h,i,j,l,m,p;for(f=0;f<d;++f){g=w[f],h=g.source,i=g.target,m=i.x-h.x,p=i.y-h.y;if(j=m*m+p*p)j=n*y[f]*((j=Math.sqrt(j))-x[f])/j,m*=j,p*=j,i.x-=m*(l=h.weight/(i.weight+h.weight)),i.y-=p*l,h.x+=m*(l=1-l),h.y+=p*l}if(l=n*s){m=c[0]/2,p=c[1]/2,f=-1;if(l)while(++f<a)g=v[f],g.x+=(m-g.x)*l,g.y+=(p-g.y)*l}if(r){k(e=d3.geom.quadtree(v),n,z),f=-1;while(++f<a)(g=v[f]).fixed||e.visit(A(g))}f=-1;while(++f<a)g=v[f],g.fixed?(g.x=g.px,g.y=g.py):(g.x-=(g.px-(g.px=g.x))*o,g.y-=(g.py-(g.py=g.y))*o);return b.tick({type:"tick",alpha:n}),(n*=.99)<.005}function C(b){g(f=b),e=a}var a={},b=d3.dispatch("tick"),c=[1,1],d,n,o=.9,p=l,q=m,r=-30,s=.1,t=.8,u,v=[],w=[],x,y,z;return a.nodes=function(b){return arguments.length?(v=b,a):v},a.links=function(b){return arguments.length?(w=b,a):w},a.size=function(b){return arguments.length?(c=b,a):c},a.linkDistance=function(b){return arguments.length?(p=d3.functor(b),a):p},a.distance=a.linkDistance,a.linkStrength=function(b){return arguments.length?(q=d3.functor(b),a):q},a.friction=function(b){return arguments.length?(o=b,a):o},a.charge=function(b){return arguments.length?(r=typeof b=="function"?b:+b,a):r},a.gravity=function(b){return arguments.length?(s=b,a):s},a.theta=function(b){return arguments.length?(t=b,a):t},a.start=function(){function k(a,c){var d=l(b),e=-1,f=d.length,g;while(++e<f)if(!isNaN(g=d[e][a]))return g;return Math.random()*c}function l(){if(!i){i=[];for(d=0;d<e;++d)i[d]=[];for(d=0;d<f;++d){var a=w[d];i[a.source.index].push(a.target),i[a.target.index].push(a.source)}}return i[b]}var b,d,e=v.length,f=w.length,g=c[0],h=c[1],i,j;for(b=0;b<e;++b)(j=v[b]).index=b,j.weight=0;x=[],y=[];for(b=0;b<f;++b)j=w[b],typeof j.source=="number"&&(j.source=v[j.source]),typeof j.target=="number"&&(j.target=v[j.target]),x[b]=p.call(this,j,b),y[b]=q.call(this,j,b),++j.source.weight,++j.target.weight;for(b=0;b<e;++b)j=v[b],isNaN(j.x)&&(j.x=k("x",g)),isNaN(j.y)&&(j.y=k("y",h)),isNaN(j.px)&&(j.px=j.x),isNaN(j.py)&&(j.py=j.y);z=[];if(typeof r=="function")for(b=0;b<e;++b)z[b]=+r.call(this,v[b],b);else for(b=0;b<e;++b)z[b]=r;return a.resume()},a.resume=function(){return n=.1,d3.timer(B),a},a.stop=function(){return n=0,a},a.drag=function(){d||(d=d3.behavior.drag().origin(Object).on("dragstart",C).on("drag",j).on("dragend",i)),this.on("mouseover.force",g).on("mouseout.force",h).call(d)},d3.rebind(a,b,"on")};var e,f;d3.layout.partition=function(){function c(a,b,d,e){var f=a.children;a.x=b,a.y=a.depth*e,a.dx=d,a.dy=e;if(f&&(h=f.length)){var g=-1,h,i,j;d=a.value?d/a.value:0;while(++g<h)c(i=f[g],b,j=i.value*d,e),b+=j}}function d(a){var b=a.children,c=0;if(b&&(f=b.length)){var e=-1,f;while(++e<f)c=Math.max(c,d(b[e]))}return 1+c}function e(e,f){var g=a.call(this,e,f);return c(g[0],0,b[0],b[1]/d(g[0])),g}var a=d3.layout.hierarchy(),b=[1,1];return e.size=function(a){return arguments.length?(b=a,e):b},z(e,a)},d3.layout.pie=function(){function f(g,h){var i=g.map(function(b,c){return+a.call(f,b,c)}),j=+(typeof c=="function"?c.apply(this,arguments):c),k=((typeof e=="function"?e.apply(this,arguments):e)-c)/d3.sum(i),l=d3.range(g.length);b!=null&&l.sort(b===n?function(a,b){return i[b]-i[a]}:function(a,c){return b(g[a],g[c])});var m=[];return l.forEach(function(a){m[a]={data:g[a],value:d=i[a],startAngle:j,endAngle:j+=d*k}}),m}var a=Number,b=n,c=0,e=2*Math.PI;return f.value=function(b){return arguments.length?(a=b,f):a},f.sort=function(a){return arguments.length?(b=a,f):b},f.startAngle=function(a){return arguments.length?(c=a,f):c},f.endAngle=function(a){return arguments.length?(e=a,f):e},f};var n={};d3.layout.stack=function(){function g(h,i){var j=h.map(function(b,c){return a.call(g,b,c)}),k=j.map(function(a,b){return a.map(function(a,b){return[e.call(g,a,b),f.call(g,a,b)]})}),l=b.call(g,k,i);j=d3.permute(j,l),k=d3.permute(k,l);var m=c.call(g,k,i),n=j.length,o=j[0].length,p,q,r;for(q=0;q<o;++q){d.call(g,j[0][q],r=m[q],k[0][q][1]);for(p=1;p<n;++p)d.call(g,j[p][q],r+=k[p-1][q][1],k[p][q][1])}return h}var a=Object,b=r["default"],c=s.zero,d=q,e=o,f=p;return g.values=function(b){return arguments.length?(a=b,g):a},g.order=function(a){return arguments.length?(b=typeof a=="function"?a:r[a],g):b},g.offset=function(a){return arguments.length?(c=typeof a=="function"?a:s[a],g):c},g.x=function(a){return arguments.length?(e=a,g):e},g.y=function(a){return arguments.length?(f=a,g):f},g.out=function(a){return arguments.length?(d=a,g):d},g};var r={"inside-out":function(a){var b=a.length,c,d,e=a.map(t),f=a.map(u),g=d3.range(b).sort(function(a,b){return e[a]-e[b]}),h=0,i=0,j=[],k=[];for(c=0;c<b;++c)d=g[c],h<i?(h+=f[d],j.push(d)):(i+=f[d],k.push(d));return k.reverse().concat(j)},reverse:function(a){return d3.range(a.length).reverse()},"default":function(a){return d3.range(a.length)}},s={silhouette:function(a){var b=a.length,c=a[0].length,d=[],e=0,f,g,h,i=[];for(g=0;g<c;++g){for(f=0,h=0;f<b;f++)h+=a[f][g][1];h>e&&(e=h),d.push(h)}for(g=0;g<c;++g)i[g]=(e-d[g])/2;return i},wiggle:function(a){var b=a.length,c=a[0],d=c.length,e=0,f,g,h,i,j,k,l,m,n,o=[];o[0]=m=n=0;for(g=1;g<d;++g){for(f=0,i=0;f<b;++f)i+=a[f][g][1];for(f=0,j=0,l=c[g][0]-c[g-1][0];f<b;++f){for(h=0,k=(a[f][g][1]-a[f][g-1][1])/(2*l);h<f;++h)k+=(a[h][g][1]-a[h][g-1][1])/l;j+=k*a[f][g][1]}o[g]=m-=i?j/i*l:0,m<n&&(n=m)}for(g=0;g<d;++g)o[g]-=n;return o},expand:function(a){var b=a.length,c=a[0].length,d=1/b,e,f,g,h=[];for(f=0;f<c;++f){for(e=0,g=0;e<b;e++)g+=a[e][f][1];if(g)for(e=0;e<b;e++)a[e][f][1]/=g;else for(e=0;e<b;e++)a[e][f][1]=d}for(f=0;f<c;++f)h[f]=0;return h},zero:function(a){var b=-1,c=a[0].length,d=[];while(++b<c)d[b]=0;return d}};d3.layout.histogram=function(){function e(e,f){var g=[],h=e.map(b,this),i=c.call(this,h,f),j=d.call(this,i,h,f),k,f=-1,l=h.length,m=j.length-1,n=a?1:1/l,o;while(++f<m)k=g[f]=[],k.dx=j[f+1]-(k.x=j[f]),k.y=0;f=-1;while(++f<l)o=h[f],o>=i[0]&&o<=i[1]&&(k=g[d3.bisect(j,o,1,m)-1],k.y+=n,k.push(e[f]));return g}var a=!0,b=Number,c=y,d=w;return e.value=function(a){return arguments.length?(b=a,e):b},e.range=function(a){return arguments.length?(c=d3.functor(a),e):c},e.bins=function(a){return arguments.length?(d=typeof a=="number"?function(b){return x(b,a)}:d3.functor(a),e):d},e.frequency=function(b){return arguments.length?(a=!!b,e):a},e},d3.layout.hierarchy=function(){function e(f,h,i){var j=b.call(g,f,h),k=E?f:{data:f};k.depth=h,i.push(k);if(j&&(m=j.length)){var l=-1,m,n=k.children=[],o=0,p=h+1;while(++l<m)d=e(j[l],p,i),d.parent=k,n.push(d),o+=d.value;a&&n.sort(a),c&&(k.value=o)}else c&&(k.value=+c.call(g,f,h)||0);return k}function f(a,b){var d=a.children,e=0;if(d&&(i=d.length)){var h=-1,i,j=b+1;while(++h<i)e+=f(d[h],j)}else c&&(e=+c.call(g,E?a:a.data,b)||0);return c&&(a.value=e),e}function g(a){var b=[];return e(a,0,b),b}var a=C,b=A,c=B;return g.sort=function(b){return arguments.length?(a=b,g):a},g.children=function(a){return arguments.length?(b=a,g):b},g.value=function(a){return arguments.length?(c=a,g):c},g.revalue=function(a){return f(a,0),a},g};var E=!1;d3.layout.pack=function(){function c(c,d){var e=a.call(this,c,d),f=e[0];f.x=0,f.y=0,M(f);var g=b[0],h=b[1],i=1/Math.max(2*f.r/g,2*f.r/h);return N(f,g/2,h/2,i),e}var a=d3.layout.hierarchy().sort(F),b=[1,1];return c.size=function(a){return arguments.length?(b=a,c):b},z(c,a)},d3.layout.cluster=function(){function d(d,e){var f=a.call(this,d,e),g=f[0],h,i=0,j,k;$(g,function(a){var c=a.children;c&&c.length?(a.x=Q(c),a.y=P(c)):(a.x=h?i+=b(a,h):0,a.y=0,h=a)});var l=R(g),m=S(g),n=l.x-b(l,m)/2,o=m.x+b(m,l)/2;return $(g,function(a){a.x=(a.x-n)/(o-n)*c[0],a.y=(1-(g.y?a.y/g.y:1))*c[1]}),f}var a=d3.layout.hierarchy().sort(null).value(null),b=T,c=[1,1];return d.separation=function(a){return arguments.length?(b=a,d):b},d.size=function(a){return arguments.length?(c=a,d):c},z(d,a)},d3.layout.tree=function(){function d(d,e){function h(a,c){var d=a.children,e=a._tree;if(d&&(f=d.length)){var f,g=d[0],i,k=g,l,m=-1;while(++m<f)l=d[m],h(l,i),k=j(l,i,k),i=l;_(a);var n=.5*(g._tree.prelim+l._tree.prelim);c?(e.prelim=c._tree.prelim+b(a,c),e.mod=e.prelim-n):e.prelim=n}else c&&(e.prelim=c._tree.prelim+b(a,c))}function i(a,b){a.x=a._tree.prelim+b;var c=a.children;if(c&&(e=c.length)){var d=-1,e;b+=a._tree.mod;while(++d<e)i(c[d],b)}}function j(a,c,d){if(c){var e=a,f=a,g=c,h=a.parent.children[0],i=e._tree.mod,j=f._tree.mod,k=g._tree.mod,l=h._tree.mod,m;while(g=V(g),e=U(e),g&&e)h=U(h),f=V(f),f._tree.ancestor=a,m=g._tree.prelim+k-e._tree.prelim-i+b(g,e),m>0&&(ba(bb(g,a,d),a,m),i+=m,j+=m),k+=g._tree.mod,i+=e._tree.mod,l+=h._tree.mod,j+=f._tree.mod;g&&!V(f)&&(f._tree.thread=g,f._tree.mod+=k-j),e&&!U(h)&&(h._tree.thread=e,h._tree.mod+=i-l,d=a)}return d}var f=a.call(this,d,e),g=f[0];$(g,function(a,b){a._tree={ancestor:a,prelim:0,mod:0,change:0,shift:0,number:b?b._tree.number+1:0}}),h(g),i(g,-g._tree.prelim);var k=W(g,Y),l=W(g,X),m=W(g,Z),n=k.x-b(k,l)/2,o=l.x+b(l,k)/2,p=m.depth||1;return $(g,function(a){a.x=(a.x-n)/(o-n)*c[0],a.y=a.depth/p*c[1],delete a._tree}),f}var a=d3.layout.hierarchy().sort(null).value(null),b=T,c=[1,1];return d.separation=function(a){return arguments.length?(b=a,d):b},d.size=function(a){return arguments.length?(c=a,d):c},z(d,a)},d3.layout.treemap=function(){function i(a,b){var c=-1,d=a.length,e,f;while(++c<d)f=(e=a[c]).value*(b<0?0:b),e.area=isNaN(f)||f<=0?0:f}function j(a){var b=a.children;if(b&&b.length){var c=e(a),d=[],f=b.slice(),g,h=Infinity,k,n=Math.min(c.dx,c.dy),o;i(f,c.dx*c.dy/a.value),d.area=0;while((o=f.length)>0)d.push(g=f[o-1]),d.area+=g.area,(k=l(d,n))<=h?(f.pop(),h=k):(d.area-=d.pop().area,m(d,n,c,!1),n=Math.min(c.dx,c.dy),d.length=d.area=0,h=Infinity);d.length&&(m(d,n,c,!0),d.length=d.area=0),b.forEach(j)}}function k(a){var b=a.children;if(b&&b.length){var c=e(a),d=b.slice(),f,g=[];i(d,c.dx*c.dy/a.value),g.area=0;while(f=d.pop())g.push(f),g.area+=f.area,f.z!=null&&(m(g,f.z?c.dx:c.dy,c,!d.length),g.length=g.area=0);b.forEach(k)}}function l(a,b){var c=a.area,d,e=0,f=Infinity,g=-1,i=a.length;while(++g<i){if(!(d=a[g].area))continue;d<f&&(f=d),d>e&&(e=d)}return c*=c,b*=b,c?Math.max(b*e*h/c,c/(b*f*h)):Infinity}function m(a,c,d,e){var f=-1,g=a.length,h=d.x,i=d.y,j=c?b(a.area/c):0,k;if(c==d.dx){if(e||j>d.dy)j=j?d.dy:0;while(++f<g)k=a[f],k.x=h,k.y=i,k.dy=j,h+=k.dx=j?b(k.area/j):0;k.z=!0,k.dx+=d.x+d.dx-h,d.y+=j,d.dy-=j}else{if(e||j>d.dx)j=j?d.dx:0;while(++f<g)k=a[f],k.x=h,k.y=i,k.dx=j,i+=k.dy=j?b(k.area/j):0;k.z=!1,k.dy+=d.y+d.dy-i,d.x+=j,d.dx-=j}}function n(b){var d=g||a(b),e=d[0];return e.x=0,e.y=0,e.dx=c[0],e.dy=c[1],g&&a.revalue(e),i([e],e.dx*e.dy/e.value),(g?k:j)(e),f&&(g=d),d}var a=d3.layout.hierarchy(),b=Math.round,c=[1,1],d=null,e=bc,f=!1,g,h=.5*(1+Math.sqrt(5));return n.size=function(a){return arguments.length?(c=a,n):c},n.padding=function(a){function b(b){var c=a.call(n,b,b.depth);return c==null?bc(b):bd(b,typeof c=="number"?[c,c,c,c]:c)}function c(b){return bd(b,a)}if(!arguments.length)return d;var f;return e=(d=a)==null?bc:(f=typeof a)==="function"?b:f==="number"?(a=[a,a,a,a],c):c,n},n.round=function(a){return arguments.length?(b=a?Math.round:Number,n):b!=Number},n.sticky=function(a){return arguments.length?(f=a,g=null,n):f},n.ratio=function(a){return arguments.length?(h=a,n):h},z(n,a)}})();
<ide>\ No newline at end of file
<add>(function(){function a(a){var b=a.source,d=a.target,e=c(b,d),f=[b];while(b!==e)b=b.parent,f.push(b);var g=f.length;while(d!==e)f.splice(g,0,d),d=d.parent;return f}function b(a){var b=[],c=a.parent;while(c!=null)b.push(a),a=c,c=c.parent;return b.push(a),b}function c(a,c){if(a===c)return a;var d=b(a),e=b(c),f=d.pop(),g=e.pop(),h=null;while(f===g)h=f,f=d.pop(),g=e.pop();return h}function g(a){a.fixed|=2}function h(a){a!==f&&(a.fixed&=1)}function i(){j(),f.fixed&=1,e=f=null}function j(){f.px=d3.event.x,f.py=d3.event.y,e.resume()}function k(a,b,c){var d=0,e=0;a.charge=0;if(!a.leaf){var f=a.nodes,g=f.length,h=-1,i;while(++h<g){i=f[h];if(i==null)continue;k(i,b,c),a.charge+=i.charge,d+=i.charge*i.cx,e+=i.charge*i.cy}}if(a.point){a.leaf||(a.point.x+=Math.random()-.5,a.point.y+=Math.random()-.5);var j=b*c[a.point.index];a.charge+=a.pointCharge=j,d+=j*a.point.x,e+=j*a.point.y}a.cx=d/a.charge,a.cy=e/a.charge}function l(a){return 20}function m(a){return 1}function o(a){return a.x}function p(a){return a.y}function q(a,b,c){a.y0=b,a.y=c}function t(a){var b=1,c=0,d=a[0][1],e,f=a.length;for(;b<f;++b)(e=a[b][1])>d&&(c=b,d=e);return c}function u(a){return a.reduce(v,0)}function v(a,b){return a+b[1]}function w(a,b){return x(a,Math.ceil(Math.log(b.length)/Math.LN2+1))}function x(a,b){var c=-1,d=+a[0],e=(a[1]-d)/b,f=[];while(++c<=b)f[c]=e*c+d;return f}function y(a){return[d3.min(a),d3.max(a)]}function z(a,b){return d3.rebind(a,b,"sort","children","value"),a.links=D,a.nodes=function(b){return E=!0,(a.nodes=a)(b)},a}function A(a){return a.children}function B(a){return a.value}function C(a,b){return b.value-a.value}function D(a){return d3.merge(a.map(function(a){return(a.children||[]).map(function(b){return{source:a,target:b}})}))}function F(a,b){return a.value-b.value}function G(a,b){var c=a._pack_next;a._pack_next=b,b._pack_prev=a,b._pack_next=c,c._pack_prev=b}function H(a,b){a._pack_next=b,b._pack_prev=a}function I(a,b){var c=b.x-a.x,d=b.y-a.y,e=a.r+b.r;return e*e-c*c-d*d>.001}function J(a){function l(a){b=Math.min(a.x-a.r,b),c=Math.max(a.x+a.r,c),d=Math.min(a.y-a.r,d),e=Math.max(a.y+a.r,e)}var b=Infinity,c=-Infinity,d=Infinity,e=-Infinity,f=a.length,g,h,i,j,k;a.forEach(K),g=a[0],g.x=-g.r,g.y=0,l(g);if(f>1){h=a[1],h.x=h.r,h.y=0,l(h);if(f>2){i=a[2],O(g,h,i),l(i),G(g,i),g._pack_prev=i,G(i,h),h=g._pack_next;for(var m=3;m<f;m++){O(g,h,i=a[m]);var n=0,o=1,p=1;for(j=h._pack_next;j!==h;j=j._pack_next,o++)if(I(j,i)){n=1;break}if(n==1)for(k=g._pack_prev;k!==j._pack_prev;k=k._pack_prev,p++)if(I(k,i))break;n?(o<p||o==p&&h.r<g.r?H(g,h=j):H(g=k,h),m--):(G(g,i),h=i,l(i))}}}var q=(b+c)/2,r=(d+e)/2,s=0;for(var m=0;m<f;m++){var t=a[m];t.x-=q,t.y-=r,s=Math.max(s,t.r+Math.sqrt(t.x*t.x+t.y*t.y))}return a.forEach(L),s}function K(a){a._pack_next=a._pack_prev=a}function L(a){delete a._pack_next,delete a._pack_prev}function M(a){var b=a.children;b&&b.length?(b.forEach(M),a.r=J(b)):a.r=Math.sqrt(a.value)}function N(a,b,c,d){var e=a.children;a.x=b+=d*a.x,a.y=c+=d*a.y,a.r*=d;if(e){var f=-1,g=e.length;while(++f<g)N(e[f],b,c,d)}}function O(a,b,c){var d=a.r+c.r,e=b.x-a.x,f=b.y-a.y;if(d&&(e||f)){var g=b.r+c.r,h=Math.sqrt(e*e+f*f),i=Math.max(-1,Math.min(1,(d*d+h*h-g*g)/(2*d*h))),j=Math.acos(i),k=i*(d/=h),l=Math.sin(j)*d;c.x=a.x+k*e+l*f,c.y=a.y+k*f-l*e}else c.x=a.x+d,c.y=a.y}function P(a){return 1+d3.max(a,function(a){return a.y})}function Q(a){return a.reduce(function(a,b){return a+b.x},0)/a.length}function R(a){var b=a.children;return b&&b.length?R(b[0]):a}function S(a){var b=a.children,c;return b&&(c=b.length)?S(b[c-1]):a}function T(a,b){return a.parent==b.parent?1:2}function U(a){var b=a.children;return b&&b.length?b[0]:a._tree.thread}function V(a){var b=a.children,c;return b&&(c=b.length)?b[c-1]:a._tree.thread}function W(a,b){var c=a.children;if(c&&(e=c.length)){var d,e,f=-1;while(++f<e)b(d=W(c[f],b),a)>0&&(a=d)}return a}function X(a,b){return a.x-b.x}function Y(a,b){return b.x-a.x}function Z(a,b){return a.depth-b.depth}function $(a,b){function c(a,d){var e=a.children;if(e&&(i=e.length)){var f,g=null,h=-1,i;while(++h<i)f=e[h],c(f,g),g=f}b(a,d)}c(a,null)}function _(a){var b=0,c=0,d=a.children,e=d.length,f;while(--e>=0)f=d[e]._tree,f.prelim+=b,f.mod+=b,b+=f.shift+(c+=f.change)}function ba(a,b,c){a=a._tree,b=b._tree;var d=c/(b.number-a.number);a.change+=d,b.change-=d,b.shift+=c,b.prelim+=c,b.mod+=c}function bb(a,b,c){return a._tree.ancestor.parent==b.parent?a._tree.ancestor:c}function bc(a){return{x:a.x,y:a.y,dx:a.dx,dy:a.dy}}function bd(a,b){var c=a.x+b[3],d=a.y+b[0],e=a.dx-b[1]-b[3],f=a.dy-b[0]-b[2];return e<0&&(c+=e/2,e=0),f<0&&(d+=f/2,f=0),{x:c,y:d,dx:e,dy:f}}d3.layout={},d3.layout.bundle=function(){return function(b){var c=[],d=-1,e=b.length;while(++d<e)c.push(a(b[d]));return c}},d3.layout.chord=function(){function j(){var a={},j=[],l=d3.range(e),m=[],n,o,p,q,r;b=[],c=[],n=0,q=-1;while(++q<e){o=0,r=-1;while(++r<e)o+=d[q][r];j.push(o),m.push(d3.range(e)),n+=o}g&&l.sort(function(a,b){return g(j[a],j[b])}),h&&m.forEach(function(a,b){a.sort(function(a,c){return h(d[b][a],d[b][c])})}),n=(2*Math.PI-f*e)/n,o=0,q=-1;while(++q<e){p=o,r=-1;while(++r<e){var s=l[q],t=m[s][r],u=d[s][t],v=o,w=o+=u*n;a[s+"-"+t]={index:s,subindex:t,startAngle:v,endAngle:w,value:u}}c.push({index:s,startAngle:p,endAngle:o,value:(o-p)/n}),o+=f}q=-1;while(++q<e){r=q-1;while(++r<e){var x=a[q+"-"+r],y=a[r+"-"+q];(x.value||y.value)&&b.push(x.value<y.value?{source:y,target:x}:{source:x,target:y})}}i&&k()}function k(){b.sort(function(a,b){return i((a.source.value+a.target.value)/2,(b.source.value+b.target.value)/2)})}var a={},b,c,d,e,f=0,g,h,i;return a.matrix=function(f){return arguments.length?(e=(d=f)&&d.length,b=c=null,a):d},a.padding=function(d){return arguments.length?(f=d,b=c=null,a):f},a.sortGroups=function(d){return arguments.length?(g=d,b=c=null,a):g},a.sortSubgroups=function(c){return arguments.length?(h=c,b=null,a):h},a.sortChords=function(c){return arguments.length?(i=c,b&&k(),a):i},a.chords=function(){return b||j(),b},a.groups=function(){return c||j(),c},a},d3.layout.force=function(){function A(a){return function(b,c,d,e,f){if(b.point!==a){var g=b.cx-a.x,h=b.cy-a.y,i=1/Math.sqrt(g*g+h*h);if((e-c)*i<t){var j=b.charge*i*i;return a.px-=g*j,a.py-=h*j,!0}if(b.point&&isFinite(i)){var j=b.pointCharge*i*i;a.px-=g*j,a.py-=h*j}}return!b.charge}}function B(b){g(f=b),e=a}var a={},b=d3.dispatch("tick"),c=[1,1],d,n,o=.9,p=l,q=m,r=-30,s=.1,t=.8,u,v=[],w=[],x,y,z;return a.tick=function(){var a=v.length,d=w.length,e,f,g,h,i,j,l,m,p;for(f=0;f<d;++f){g=w[f],h=g.source,i=g.target,m=i.x-h.x,p=i.y-h.y;if(j=m*m+p*p)j=n*y[f]*((j=Math.sqrt(j))-x[f])/j,m*=j,p*=j,i.x-=m*(l=h.weight/(i.weight+h.weight)),i.y-=p*l,h.x+=m*(l=1-l),h.y+=p*l}if(l=n*s){m=c[0]/2,p=c[1]/2,f=-1;if(l)while(++f<a)g=v[f],g.x+=(m-g.x)*l,g.y+=(p-g.y)*l}if(r){k(e=d3.geom.quadtree(v),n,z),f=-1;while(++f<a)(g=v[f]).fixed||e.visit(A(g))}f=-1;while(++f<a)g=v[f],g.fixed?(g.x=g.px,g.y=g.py):(g.x-=(g.px-(g.px=g.x))*o,g.y-=(g.py-(g.py=g.y))*o);return b.tick({type:"tick",alpha:n}),(n*=.99)<.005},a.nodes=function(b){return arguments.length?(v=b,a):v},a.links=function(b){return arguments.length?(w=b,a):w},a.size=function(b){return arguments.length?(c=b,a):c},a.linkDistance=function(b){return arguments.length?(p=d3.functor(b),a):p},a.distance=a.linkDistance,a.linkStrength=function(b){return arguments.length?(q=d3.functor(b),a):q},a.friction=function(b){return arguments.length?(o=b,a):o},a.charge=function(b){return arguments.length?(r=typeof b=="function"?b:+b,a):r},a.gravity=function(b){return arguments.length?(s=b,a):s},a.theta=function(b){return arguments.length?(t=b,a):t},a.start=function(){function k(a,c){var d=l(b),e=-1,f=d.length,g;while(++e<f)if(!isNaN(g=d[e][a]))return g;return Math.random()*c}function l(){if(!i){i=[];for(d=0;d<e;++d)i[d]=[];for(d=0;d<f;++d){var a=w[d];i[a.source.index].push(a.target),i[a.target.index].push(a.source)}}return i[b]}var b,d,e=v.length,f=w.length,g=c[0],h=c[1],i,j;for(b=0;b<e;++b)(j=v[b]).index=b,j.weight=0;x=[],y=[];for(b=0;b<f;++b)j=w[b],typeof j.source=="number"&&(j.source=v[j.source]),typeof j.target=="number"&&(j.target=v[j.target]),x[b]=p.call(this,j,b),y[b]=q.call(this,j,b),++j.source.weight,++j.target.weight;for(b=0;b<e;++b)j=v[b],isNaN(j.x)&&(j.x=k("x",g)),isNaN(j.y)&&(j.y=k("y",h)),isNaN(j.px)&&(j.px=j.x),isNaN(j.py)&&(j.py=j.y);z=[];if(typeof r=="function")for(b=0;b<e;++b)z[b]=+r.call(this,v[b],b);else for(b=0;b<e;++b)z[b]=r;return a.resume()},a.resume=function(){return n=.1,d3.timer(a.tick),a},a.stop=function(){return n=0,a},a.drag=function(){d||(d=d3.behavior.drag().origin(Object).on("dragstart",B).on("drag",j).on("dragend",i)),this.on("mouseover.force",g).on("mouseout.force",h).call(d)},d3.rebind(a,b,"on")};var e,f;d3.layout.partition=function(){function c(a,b,d,e){var f=a.children;a.x=b,a.y=a.depth*e,a.dx=d,a.dy=e;if(f&&(h=f.length)){var g=-1,h,i,j;d=a.value?d/a.value:0;while(++g<h)c(i=f[g],b,j=i.value*d,e),b+=j}}function d(a){var b=a.children,c=0;if(b&&(f=b.length)){var e=-1,f;while(++e<f)c=Math.max(c,d(b[e]))}return 1+c}function e(e,f){var g=a.call(this,e,f);return c(g[0],0,b[0],b[1]/d(g[0])),g}var a=d3.layout.hierarchy(),b=[1,1];return e.size=function(a){return arguments.length?(b=a,e):b},z(e,a)},d3.layout.pie=function(){function f(g,h){var i=g.map(function(b,c){return+a.call(f,b,c)}),j=+(typeof c=="function"?c.apply(this,arguments):c),k=((typeof e=="function"?e.apply(this,arguments):e)-c)/d3.sum(i),l=d3.range(g.length);b!=null&&l.sort(b===n?function(a,b){return i[b]-i[a]}:function(a,c){return b(g[a],g[c])});var m=[];return l.forEach(function(a){m[a]={data:g[a],value:d=i[a],startAngle:j,endAngle:j+=d*k}}),m}var a=Number,b=n,c=0,e=2*Math.PI;return f.value=function(b){return arguments.length?(a=b,f):a},f.sort=function(a){return arguments.length?(b=a,f):b},f.startAngle=function(a){return arguments.length?(c=a,f):c},f.endAngle=function(a){return arguments.length?(e=a,f):e},f};var n={};d3.layout.stack=function(){function g(h,i){var j=h.map(function(b,c){return a.call(g,b,c)}),k=j.map(function(a,b){return a.map(function(a,b){return[e.call(g,a,b),f.call(g,a,b)]})}),l=b.call(g,k,i);j=d3.permute(j,l),k=d3.permute(k,l);var m=c.call(g,k,i),n=j.length,o=j[0].length,p,q,r;for(q=0;q<o;++q){d.call(g,j[0][q],r=m[q],k[0][q][1]);for(p=1;p<n;++p)d.call(g,j[p][q],r+=k[p-1][q][1],k[p][q][1])}return h}var a=Object,b=r["default"],c=s.zero,d=q,e=o,f=p;return g.values=function(b){return arguments.length?(a=b,g):a},g.order=function(a){return arguments.length?(b=typeof a=="function"?a:r[a],g):b},g.offset=function(a){return arguments.length?(c=typeof a=="function"?a:s[a],g):c},g.x=function(a){return arguments.length?(e=a,g):e},g.y=function(a){return arguments.length?(f=a,g):f},g.out=function(a){return arguments.length?(d=a,g):d},g};var r={"inside-out":function(a){var b=a.length,c,d,e=a.map(t),f=a.map(u),g=d3.range(b).sort(function(a,b){return e[a]-e[b]}),h=0,i=0,j=[],k=[];for(c=0;c<b;++c)d=g[c],h<i?(h+=f[d],j.push(d)):(i+=f[d],k.push(d));return k.reverse().concat(j)},reverse:function(a){return d3.range(a.length).reverse()},"default":function(a){return d3.range(a.length)}},s={silhouette:function(a){var b=a.length,c=a[0].length,d=[],e=0,f,g,h,i=[];for(g=0;g<c;++g){for(f=0,h=0;f<b;f++)h+=a[f][g][1];h>e&&(e=h),d.push(h)}for(g=0;g<c;++g)i[g]=(e-d[g])/2;return i},wiggle:function(a){var b=a.length,c=a[0],d=c.length,e=0,f,g,h,i,j,k,l,m,n,o=[];o[0]=m=n=0;for(g=1;g<d;++g){for(f=0,i=0;f<b;++f)i+=a[f][g][1];for(f=0,j=0,l=c[g][0]-c[g-1][0];f<b;++f){for(h=0,k=(a[f][g][1]-a[f][g-1][1])/(2*l);h<f;++h)k+=(a[h][g][1]-a[h][g-1][1])/l;j+=k*a[f][g][1]}o[g]=m-=i?j/i*l:0,m<n&&(n=m)}for(g=0;g<d;++g)o[g]-=n;return o},expand:function(a){var b=a.length,c=a[0].length,d=1/b,e,f,g,h=[];for(f=0;f<c;++f){for(e=0,g=0;e<b;e++)g+=a[e][f][1];if(g)for(e=0;e<b;e++)a[e][f][1]/=g;else for(e=0;e<b;e++)a[e][f][1]=d}for(f=0;f<c;++f)h[f]=0;return h},zero:function(a){var b=-1,c=a[0].length,d=[];while(++b<c)d[b]=0;return d}};d3.layout.histogram=function(){function e(e,f){var g=[],h=e.map(b,this),i=c.call(this,h,f),j=d.call(this,i,h,f),k,f=-1,l=h.length,m=j.length-1,n=a?1:1/l,o;while(++f<m)k=g[f]=[],k.dx=j[f+1]-(k.x=j[f]),k.y=0;f=-1;while(++f<l)o=h[f],o>=i[0]&&o<=i[1]&&(k=g[d3.bisect(j,o,1,m)-1],k.y+=n,k.push(e[f]));return g}var a=!0,b=Number,c=y,d=w;return e.value=function(a){return arguments.length?(b=a,e):b},e.range=function(a){return arguments.length?(c=d3.functor(a),e):c},e.bins=function(a){return arguments.length?(d=typeof a=="number"?function(b){return x(b,a)}:d3.functor(a),e):d},e.frequency=function(b){return arguments.length?(a=!!b,e):a},e},d3.layout.hierarchy=function(){function e(f,h,i){var j=b.call(g,f,h),k=E?f:{data:f};k.depth=h,i.push(k);if(j&&(m=j.length)){var l=-1,m,n=k.children=[],o=0,p=h+1;while(++l<m)d=e(j[l],p,i),d.parent=k,n.push(d),o+=d.value;a&&n.sort(a),c&&(k.value=o)}else c&&(k.value=+c.call(g,f,h)||0);return k}function f(a,b){var d=a.children,e=0;if(d&&(i=d.length)){var h=-1,i,j=b+1;while(++h<i)e+=f(d[h],j)}else c&&(e=+c.call(g,E?a:a.data,b)||0);return c&&(a.value=e),e}function g(a){var b=[];return e(a,0,b),b}var a=C,b=A,c=B;return g.sort=function(b){return arguments.length?(a=b,g):a},g.children=function(a){return arguments.length?(b=a,g):b},g.value=function(a){return arguments.length?(c=a,g):c},g.revalue=function(a){return f(a,0),a},g};var E=!1;d3.layout.pack=function(){function c(c,d){var e=a.call(this,c,d),f=e[0];f.x=0,f.y=0,M(f);var g=b[0],h=b[1],i=1/Math.max(2*f.r/g,2*f.r/h);return N(f,g/2,h/2,i),e}var a=d3.layout.hierarchy().sort(F),b=[1,1];return c.size=function(a){return arguments.length?(b=a,c):b},z(c,a)},d3.layout.cluster=function(){function d(d,e){var f=a.call(this,d,e),g=f[0],h,i=0,j,k;$(g,function(a){var c=a.children;c&&c.length?(a.x=Q(c),a.y=P(c)):(a.x=h?i+=b(a,h):0,a.y=0,h=a)});var l=R(g),m=S(g),n=l.x-b(l,m)/2,o=m.x+b(m,l)/2;return $(g,function(a){a.x=(a.x-n)/(o-n)*c[0],a.y=(1-(g.y?a.y/g.y:1))*c[1]}),f}var a=d3.layout.hierarchy().sort(null).value(null),b=T,c=[1,1];return d.separation=function(a){return arguments.length?(b=a,d):b},d.size=function(a){return arguments.length?(c=a,d):c},z(d,a)},d3.layout.tree=function(){function d(d,e){function h(a,c){var d=a.children,e=a._tree;if(d&&(f=d.length)){var f,g=d[0],i,k=g,l,m=-1;while(++m<f)l=d[m],h(l,i),k=j(l,i,k),i=l;_(a);var n=.5*(g._tree.prelim+l._tree.prelim);c?(e.prelim=c._tree.prelim+b(a,c),e.mod=e.prelim-n):e.prelim=n}else c&&(e.prelim=c._tree.prelim+b(a,c))}function i(a,b){a.x=a._tree.prelim+b;var c=a.children;if(c&&(e=c.length)){var d=-1,e;b+=a._tree.mod;while(++d<e)i(c[d],b)}}function j(a,c,d){if(c){var e=a,f=a,g=c,h=a.parent.children[0],i=e._tree.mod,j=f._tree.mod,k=g._tree.mod,l=h._tree.mod,m;while(g=V(g),e=U(e),g&&e)h=U(h),f=V(f),f._tree.ancestor=a,m=g._tree.prelim+k-e._tree.prelim-i+b(g,e),m>0&&(ba(bb(g,a,d),a,m),i+=m,j+=m),k+=g._tree.mod,i+=e._tree.mod,l+=h._tree.mod,j+=f._tree.mod;g&&!V(f)&&(f._tree.thread=g,f._tree.mod+=k-j),e&&!U(h)&&(h._tree.thread=e,h._tree.mod+=i-l,d=a)}return d}var f=a.call(this,d,e),g=f[0];$(g,function(a,b){a._tree={ancestor:a,prelim:0,mod:0,change:0,shift:0,number:b?b._tree.number+1:0}}),h(g),i(g,-g._tree.prelim);var k=W(g,Y),l=W(g,X),m=W(g,Z),n=k.x-b(k,l)/2,o=l.x+b(l,k)/2,p=m.depth||1;return $(g,function(a){a.x=(a.x-n)/(o-n)*c[0],a.y=a.depth/p*c[1],delete a._tree}),f}var a=d3.layout.hierarchy().sort(null).value(null),b=T,c=[1,1];return d.separation=function(a){return arguments.length?(b=a,d):b},d.size=function(a){return arguments.length?(c=a,d):c},z(d,a)},d3.layout.treemap=function(){function i(a,b){var c=-1,d=a.length,e,f;while(++c<d)f=(e=a[c]).value*(b<0?0:b),e.area=isNaN(f)||f<=0?0:f}function j(a){var b=a.children;if(b&&b.length){var c=e(a),d=[],f=b.slice(),g,h=Infinity,k,n=Math.min(c.dx,c.dy),o;i(f,c.dx*c.dy/a.value),d.area=0;while((o=f.length)>0)d.push(g=f[o-1]),d.area+=g.area,(k=l(d,n))<=h?(f.pop(),h=k):(d.area-=d.pop().area,m(d,n,c,!1),n=Math.min(c.dx,c.dy),d.length=d.area=0,h=Infinity);d.length&&(m(d,n,c,!0),d.length=d.area=0),b.forEach(j)}}function k(a){var b=a.children;if(b&&b.length){var c=e(a),d=b.slice(),f,g=[];i(d,c.dx*c.dy/a.value),g.area=0;while(f=d.pop())g.push(f),g.area+=f.area,f.z!=null&&(m(g,f.z?c.dx:c.dy,c,!d.length),g.length=g.area=0);b.forEach(k)}}function l(a,b){var c=a.area,d,e=0,f=Infinity,g=-1,i=a.length;while(++g<i){if(!(d=a[g].area))continue;d<f&&(f=d),d>e&&(e=d)}return c*=c,b*=b,c?Math.max(b*e*h/c,c/(b*f*h)):Infinity}function m(a,c,d,e){var f=-1,g=a.length,h=d.x,i=d.y,j=c?b(a.area/c):0,k;if(c==d.dx){if(e||j>d.dy)j=j?d.dy:0;while(++f<g)k=a[f],k.x=h,k.y=i,k.dy=j,h+=k.dx=j?b(k.area/j):0;k.z=!0,k.dx+=d.x+d.dx-h,d.y+=j,d.dy-=j}else{if(e||j>d.dx)j=j?d.dx:0;while(++f<g)k=a[f],k.x=h,k.y=i,k.dx=j,i+=k.dy=j?b(k.area/j):0;k.z=!1,k.dy+=d.y+d.dy-i,d.x+=j,d.dx-=j}}function n(b){var d=g||a(b),e=d[0];return e.x=0,e.y=0,e.dx=c[0],e.dy=c[1],g&&a.revalue(e),i([e],e.dx*e.dy/e.value),(g?k:j)(e),f&&(g=d),d}var a=d3.layout.hierarchy(),b=Math.round,c=[1,1],d=null,e=bc,f=!1,g,h=.5*(1+Math.sqrt(5));return n.size=function(a){return arguments.length?(c=a,n):c},n.padding=function(a){function b(b){var c=a.call(n,b,b.depth);return c==null?bc(b):bd(b,typeof c=="number"?[c,c,c,c]:c)}function c(b){return bd(b,a)}if(!arguments.length)return d;var f;return e=(d=a)==null?bc:(f=typeof a)==="function"?b:f==="number"?(a=[a,a,a,a],c):c,n},n.round=function(a){return arguments.length?(b=a?Math.round:Number,n):b!=Number},n.sticky=function(a){return arguments.length?(f=a,g=null,n):f},n.ratio=function(a){return arguments.length?(h=a,n):h},z(n,a)}})();
<ide>\ No newline at end of file
<ide><path>src/layout/force.js
<ide> d3.layout.force = function() {
<ide> };
<ide> }
<ide>
<del> function tick() {
<add> force.tick = function() {
<ide> var n = nodes.length,
<ide> m = links.length,
<ide> q,
<ide> d3.layout.force = function() {
<ide>
<ide> // simulated annealing, basically
<ide> return (alpha *= .99) < .005;
<del> }
<add> };
<ide>
<ide> force.nodes = function(x) {
<ide> if (!arguments.length) return nodes;
<ide> d3.layout.force = function() {
<ide>
<ide> force.resume = function() {
<ide> alpha = .1;
<del> d3.timer(tick);
<add> d3.timer(force.tick);
<ide> return force;
<ide> };
<ide> | 3 |
Text | Text | remove testing block | d37605766dd6c8e930d46accad64c629f587e6db | <ide><path>README.md
<ide> Check out our [documentation on the docs tab](https://github.com/github/atom/doc
<ide> 1. `gh-setup atom`
<ide>
<ide> 2. `cd ~/github/atom && rake install`
<del>
<del>```coffee
<del>describe "GitHub Flavored Markdown grammar", ->
<del>``` | 1 |
PHP | PHP | remove copy + paste tests | 955889c6c731a56f9cbe6f572cea4594fd887d3a | <ide><path>lib/Cake/Test/TestCase/Console/Command/Task/TemplateTaskTest.php
<ide> public function tearDown() {
<ide> unset($this->Task);
<ide> }
<ide>
<del>/**
<del> * test that set sets variables
<del> *
<del> * @return void
<del> */
<del> public function testSet() {
<del> $this->Task->set('one', 'two');
<del> $this->assertTrue(isset($this->Task->viewVars['one']));
<del> $this->assertEquals('two', $this->Task->viewVars['one']);
<del>
<del> $this->Task->set(array('one' => 'three', 'four' => 'five'));
<del> $this->assertTrue(isset($this->Task->viewVars['one']));
<del> $this->assertEquals('three', $this->Task->viewVars['one']);
<del> $this->assertTrue(isset($this->Task->viewVars['four']));
<del> $this->assertEquals('five', $this->Task->viewVars['four']);
<del>
<del> $this->Task->viewVars = array();
<del> $this->Task->set(array(3 => 'three', 4 => 'four'));
<del> $this->Task->set(array(1 => 'one', 2 => 'two'));
<del> $expected = array(3 => 'three', 4 => 'four', 1 => 'one', 2 => 'two');
<del> $this->assertEquals($expected, $this->Task->viewVars);
<del> }
<del>
<ide> /**
<ide> * test finding themes installed in
<ide> *
<ide><path>lib/Cake/Test/TestCase/Controller/ControllerTest.php
<ide> public function testFlash() {
<ide> App::build();
<ide> }
<ide>
<del>/**
<del> * testControllerSet method
<del> *
<del> * @return void
<del> */
<del> public function testControllerSet() {
<del> $request = new Request('controller_posts/index');
<del> $Controller = new Controller($request);
<del>
<del> $Controller->set('variable_with_underscores', null);
<del> $this->assertTrue(array_key_exists('variable_with_underscores', $Controller->viewVars));
<del>
<del> $Controller->viewVars = array();
<del> $viewVars = array('ModelName' => array('id' => 1, 'name' => 'value'));
<del> $Controller->set($viewVars);
<del> $this->assertTrue(array_key_exists('ModelName', $Controller->viewVars));
<del>
<del> $Controller->viewVars = array();
<del> $Controller->set('variable_with_underscores', 'value');
<del> $this->assertTrue(array_key_exists('variable_with_underscores', $Controller->viewVars));
<del>
<del> $Controller->viewVars = array();
<del> $viewVars = array('ModelName' => 'name');
<del> $Controller->set($viewVars);
<del> $this->assertTrue(array_key_exists('ModelName', $Controller->viewVars));
<del>
<del> $Controller->set('title', 'someTitle');
<del> $this->assertSame($Controller->viewVars['title'], 'someTitle');
<del> $this->assertTrue(empty($Controller->pageTitle));
<del>
<del> $Controller->viewVars = array();
<del> $expected = array('ModelName' => 'name', 'ModelName2' => 'name2');
<del> $Controller->set(array('ModelName', 'ModelName2'), array('name', 'name2'));
<del> $this->assertSame($expected, $Controller->viewVars);
<del>
<del> $Controller->viewVars = array();
<del> $Controller->set(array(3 => 'three', 4 => 'four'));
<del> $Controller->set(array(1 => 'one', 2 => 'two'));
<del> $expected = array(3 => 'three', 4 => 'four', 1 => 'one', 2 => 'two');
<del> $this->assertEquals($expected, $Controller->viewVars);
<del> }
<del>
<ide> /**
<ide> * testRender method
<ide> *
<ide><path>lib/Cake/Test/TestCase/View/ViewTest.php
<ide> public function testRenderStrippingNoCacheTagsOnlyCacheCheck() {
<ide> $this->assertNotRegExp('/cake:nocache/', $result);
<ide> }
<ide>
<del>/**
<del> * testSet method
<del> *
<del> * @return void
<del> */
<del> public function testSet() {
<del> $View = new TestView($this->PostsController);
<del> $View->viewVars = array();
<del> $View->set('somekey', 'someValue');
<del> $this->assertSame($View->viewVars, array('somekey' => 'someValue'));
<del> $this->assertSame($View->getVars(), array('somekey'));
<del>
<del> $View->viewVars = array();
<del> $keys = array('key1', 'key2');
<del> $values = array('value1', 'value2');
<del> $View->set($keys, $values);
<del> $this->assertSame($View->viewVars, array('key1' => 'value1', 'key2' => 'value2'));
<del> $this->assertSame($View->getVars(), array('key1', 'key2'));
<del> $this->assertSame($View->getVar('key1'), 'value1');
<del> $this->assertNull($View->getVar('key3'));
<del>
<del> $View->set(array('key3' => 'value3'));
<del> $this->assertSame($View->getVar('key3'), 'value3');
<del>
<del> $View->viewVars = array();
<del> $View->set(array(3 => 'three', 4 => 'four'));
<del> $View->set(array(1 => 'one', 2 => 'two'));
<del> $expected = array(3 => 'three', 4 => 'four', 1 => 'one', 2 => 'two');
<del> $this->assertEquals($expected, $View->viewVars);
<del> }
<del>
<ide> /**
<ide> * testBadExt method
<ide> * | 3 |
Text | Text | improve description of tz | ac2c082f63e312d5685542b239d5e2d21be6a08b | <ide><path>doc/api/cli.md
<ide> changes:
<ide>
<ide> The `TZ` environment variable is used to specify the timezone configuration.
<ide>
<del>While the Node.js support for `TZ` will not handle all of the various
<del>[ways that `TZ` is handled in other environments][], it will support basic
<del>[timezone IDs][] (such as `'Etc/UTC'`, `'Europe/Paris'` or `'America/New_York'`.
<add>While Node.js does not support all of the various [ways that `TZ` is handled in
<add>other environments][], it does support basic [timezone IDs][] (such as
<add>`'Etc/UTC'`, `'Europe/Paris'`, or `'America/New_York'`).
<ide> It may support a few other abbreviations or aliases, but these are strongly
<ide> discouraged and not guaranteed.
<ide> | 1 |
PHP | PHP | return migrated jobs for customization | f21e942ae8a4104ffdf42c231601efe8759c4c10 | <ide><path>src/Illuminate/Queue/LuaScripts.php
<ide> public static function migrateExpiredJobs()
<ide> end
<ide> end
<ide>
<del>return true
<add>return val
<ide> LUA;
<ide> }
<ide> }
<ide><path>src/Illuminate/Queue/RedisQueue.php
<ide> protected function migrate($queue)
<ide> *
<ide> * @param string $from
<ide> * @param string $to
<del> * @return void
<add> * @return array
<ide> */
<ide> public function migrateExpiredJobs($from, $to)
<ide> {
<del> $this->getConnection()->eval(
<add> return $this->getConnection()->eval(
<ide> LuaScripts::migrateExpiredJobs(), 2, $from, $to, $this->currentTime()
<ide> );
<ide> } | 2 |
Ruby | Ruby | optimize the code added in fa99de0bd054576336c9 | f055bc05d515b80c89b99b775546b954f270bc5c | <ide><path>actionpack/lib/action_view/render/partials.rb
<ide> def render_collection
<ide> end
<ide>
<ide> def collection_with_template(template = @template)
<del> segments, locals, as, template = [], @locals, @options[:as] || @template.variable_name, @template
<add> segments, locals, template = [], @locals, @template
<ide>
<del> counter_name = template.counter_name
<del> locals[counter_name] = -1
<add> if @options[:as]
<add> as = @options[:as]
<add> counter = "#{as}_counter".to_sym
<add> else
<add> as = template.variable_name
<add> counter = template.counter_name
<add> end
<add>
<add> locals[counter] = -1
<ide>
<ide> @collection.each do |object|
<del> locals[counter_name] += 1
<del> locals["#{as.to_s}_counter".to_sym] = locals[counter_name] if as
<add> locals[counter] += 1
<ide> locals[as] = object
<del>
<ide> segments << template.render(@view, locals)
<ide> end
<ide>
<ide> segments
<ide> end
<ide>
<ide> def collection_without_template(collection_paths = @collection_paths)
<del> segments, locals, as = [], @locals, @options[:as]
<del> index, template = -1, nil
<add> segments, locals = [], @locals
<add> index, template = -1, nil
<add>
<add> if @options[:as]
<add> as = @options[:as]
<add> counter = "#{as}_counter"
<add> end
<ide>
<ide> @collection.each_with_index do |object, i|
<ide> template = find_template(collection_paths[i])
<del> locals[template.counter_name] = (index += 1)
<ide> locals[as || template.variable_name] = object
<add> locals[counter || template.counter_name] = (index += 1)
<ide>
<ide> segments << template.render(@view, locals)
<ide> end | 1 |
PHP | PHP | remove psalm false positive issue | 0cd136c13a75a5f964bc9105681ee01f680d2296 | <ide><path>src/Datasource/Locator/AbstractLocator.php
<ide> abstract class AbstractLocator implements LocatorInterface
<ide> protected $options = [];
<ide>
<ide> /**
<del> * @inheritDoc
<add> * {@inheritDoc}
<add> *
<add> * @param string $alias The alias name you want to get.
<add> * @param array<string, mixed> $options The options you want to build the table with.
<add> * @return \Cake\Datasource\RepositoryInterface
<add> * @throws \RuntimeException When trying to get alias for which instance
<add> * has already been created with different options.
<ide> */
<ide> public function get(string $alias, array $options = [])
<ide> { | 1 |
Python | Python | resolve line-too-long in datasets | 25cdbaa23b15b6e5bb0b236d3fc829b390389fd7 | <ide><path>keras/datasets/imdb.py
<ide> def get_word_index(path="imdb_word_index.json"):
<ide> path: where to cache the data (relative to `~/.keras/dataset`).
<ide>
<ide> Returns:
<del> The word index dictionary. Keys are word strings, values are their index.
<add> The word index dictionary. Keys are word strings, values are their
<add> index.
<ide>
<ide> Example:
<ide>
<ide><path>keras/datasets/reuters.py
<ide> def get_word_index(path="reuters_word_index.json"):
<ide> path: where to cache the data (relative to `~/.keras/dataset`).
<ide>
<ide> Returns:
<del> The word index dictionary. Keys are word strings, values are their index.
<add> The word index dictionary. Keys are word strings, values are their
<add> index.
<ide> """
<ide> origin_folder = (
<ide> "https://storage.googleapis.com/tensorflow/tf-keras-datasets/" | 2 |
Ruby | Ruby | replace files atomically when relocating | 91d31ae1f43cade6fac808b6042133c328638274 | <ide><path>Library/Homebrew/keg_fix_install_names.rb
<ide> def relocate_install_names old_prefix, new_prefix, old_cellar, new_cellar, optio
<ide>
<ide> (pkgconfig_files | libtool_files | script_files).each do |file|
<ide> file.ensure_writable do
<del> file.open('rb') do |f|
<del> s = f.read
<del> s.gsub!(old_cellar, new_cellar)
<del> s.gsub!(old_prefix, new_prefix)
<del> f.reopen(file, 'wb')
<del> f.write(s)
<del> end
<add> s = file.open("rb", &:read)
<add> s.gsub!(old_cellar, new_cellar)
<add> s.gsub!(old_prefix, new_prefix)
<add> file.atomic_write(s)
<ide> end
<ide> end
<ide> end | 1 |
Javascript | Javascript | remove redundant declaration | 2288e3f21cf9dc1bc01d4f544c2c339ff517f839 | <ide><path>lib/internal/util.js
<ide> const kCustomPromisifiedSymbol = Symbol('util.promisify.custom');
<ide> const kCustomPromisifyArgsSymbol = Symbol('customPromisifyArgs');
<ide>
<ide> function promisify(orig) {
<del> if (typeof orig !== 'function') {
<del> const errors = require('internal/errors');
<add> if (typeof orig !== 'function')
<ide> throw new errors.TypeError('ERR_INVALID_ARG_TYPE', 'original', 'function');
<del> }
<ide>
<ide> if (orig[kCustomPromisifiedSymbol]) {
<ide> const fn = orig[kCustomPromisifiedSymbol]; | 1 |
PHP | PHP | fix php 5.3 breakage | d14e9e35185f469c039f8bb55f135b73318b57f9 | <ide><path>src/Illuminate/Routing/Router.php
<ide> protected function addResourceDestroy($name, $base, $controller)
<ide> */
<ide> public function getResourceUri($resource)
<ide> {
<del> if ( ! str_contains($resource, '.')) return $resource;
<del>
<ide> // To create the nested resource URI, we will simply explode the segments and
<ide> // create a base URI for each of them, then join all of them back together
<ide> // with slashes. This should create the properly nested resource routes.
<del> $nested = implode('/', array_map(function($segment)
<del> {
<del> $wildcard = $this->getResourceWildcard($segment);
<add> if ( ! str_contains($resource, '.')) return $resource;
<ide>
<del> return $segment.'/{'.$wildcard.'}';
<add> $segments = explode('.', $resource);
<ide>
<del> }, $segments = explode('.', $resource)));
<add> $nested = $this->getNestedResourceUri($segments);
<ide>
<ide> // Once we have built the base URI, we'll remove the wildcard holder for this
<ide> // base resource name so that the individual route adders can suffix these
<ide> public function getResourceUri($resource)
<ide> return str_replace('/{'.$last.'}', '', $nested);
<ide> }
<ide>
<add> /**
<add> * Get the URI for a nested resource segment array.
<add> *
<add> * @param array $segments
<add> * @return string
<add> */
<add> protected function getNestedResourceUri(array $segments)
<add> {
<add> $me = $this;
<add>
<add> // We will spin through the segments and create a place-holder for each of the
<add> // resource segments, as well as the resource itself. Then we should get an
<add> // entire string for the resource URI that contains all nested resources.
<add> return implode('/', array_map(function($s) use ($me)
<add> {
<add> return $s.'/{'.$this->getResourceWildcard($s).'}';
<add>
<add> }, $segments));
<add> }
<add>
<ide> /**
<ide> * Get the action array for a resource route.
<ide> *
<ide> protected function getBaseResource($resource)
<ide> * @param string $value
<ide> * @return string
<ide> */
<del> protected function getResourceWildcard($value)
<add> public function getResourceWildcard($value)
<ide> {
<ide> return str_replace('-', '_', $value);
<ide> } | 1 |
Text | Text | remove cask’s `test` readme | b0feac6645389c3e201fdb96b051c8cc00837f65 | <ide><path>Library/Homebrew/cask/test/README.md
<del># Naming
<del>
<del>Only files matching `*_test.rb` will be executed as tests. | 1 |
Javascript | Javascript | allow 0 as the value of a tag | f0600f0ba9f2e375a997f2a655b01bbaffb48236 | <ide><path>packages/ember-views/lib/system/render_buffer.js
<ide> Ember._RenderBuffer.prototype =
<ide> for (prop in props) {
<ide> if (props.hasOwnProperty(prop)) {
<ide> var value = props[prop];
<del> if (value) {
<add> if (value || typeof(value) === 'number') {
<ide> if (value === true) {
<ide> buffer.push(' ' + prop + '="' + prop + '"');
<ide> } else {
<ide><path>packages/ember-views/tests/system/render_buffer_test.js
<ide> test("RenderBuffers combine strings", function() {
<ide> equal("<div>ab</div>", buffer.string(), "Multiple pushes should concatenate");
<ide> });
<ide>
<add>test("value of 0 is included in output", function() {
<add> var buffer, $el;
<add>
<add> buffer = new Ember.RenderBuffer('input');
<add> buffer.prop('value', 0);
<add> buffer.pushOpeningTag();
<add> $el = buffer.element();
<add>
<add> strictEqual($el.value, '0', "generated element has value of '0'");
<add>
<add> buffer = new Ember.RenderBuffer('input');
<add> buffer.prop('value', 0);
<add> buffer.push('<div>');
<add> buffer.pushOpeningTag();
<add> buffer.push('</div>');
<add> $el = Ember.$(buffer.innerString());
<add>
<add> strictEqual($el.find('input').val(), '0', "raw tag has value of '0'");
<add>});
<add>
<ide> test("prevents XSS injection via `id`", function() {
<ide> var buffer = new Ember.RenderBuffer('div');
<ide> | 2 |
PHP | PHP | add more documentation | e3bc28a330416ca3e82d19a41cb3368b5d69e68a | <ide><path>lib/Cake/Network/Http/Adapter/Stream.php
<ide> protected function _buildSslContext(Request $request, $options) {
<ide> /**
<ide> * Open the stream and send the request.
<ide> *
<del> * @return void
<add> * @param Request $request
<add> * @return Response The populated response object.
<ide> * @throws Cake\Error\Exception
<ide> */
<del> protected function _send($request) {
<add> protected function _send(Request $request) {
<ide> $url = $request->url();
<ide> $this->_open($url);
<ide> $content = '';
<ide> protected function _connectionErrorHandler($code, $message) {
<ide> }
<ide>
<ide> /**
<del> * Get the contextOptions.
<add> * Get the context options
<add> *
<add> * Useful for debugging and testing context creation.
<ide> *
<ide> * @return array
<ide> */
<ide><path>lib/Cake/Network/Http/Client.php
<ide> *
<ide> * ### Scoped clients
<ide> *
<add> * If you're doing multiple requests to the same hostname its often convienent
<add> * to use the constructor arguments to create a scoped client. This allows you
<add> * to keep your code DRY and not repeat hostnames, authentication, and other options.
<add> *
<ide> * ### Doing requests
<ide> *
<add> * Once you've created an instance of Client you can do requests
<add> * using several methods. Each corresponds to a different HTTP method.
<add> *
<add> * - get()
<add> * - post()
<add> * - put()
<add> * - delete()
<add> * - patch()
<add> *
<ide> * ### Sending request bodies
<ide> *
<ide> * By default any POST/PUT/PATCH/DELETE request with $data will
<ide> *
<ide> * ### Using authentication
<ide> *
<add> * By using the `auth` key you can use authentication. The type sub option
<add> * can be used to specify which authentication strategy you want to use.
<add> * CakePHP comes with a few built-in strategies:
<add> *
<add> * - Basic
<add> * - Digest
<add> * - Oauth
<ide> *
<ide> * ### Using proxies
<ide> *
<add> * By using the `proxy` key you can set authentication credentials for
<add> * a proxy if you need to use one.. The type sub option can be used to
<add> * specify which authentication strategy you want to use.
<add> * CakePHP comes with a few built-in strategies:
<add> *
<add> * - Basic
<add> * - Digest
<ide> *
<ide> */
<ide> class Client {
<ide> class Client {
<ide> *
<ide> * ### Config options
<ide> *
<add> * You can set the following options when creating a client:
<add> *
<add> * - host - The hostname to do requests on.
<add> * - port - The port to use.
<add> * - scheme - The default scheme/protocol to use. Defaults to http.
<add> * - timeout - The timeout in seconds. Defaults to 30
<add> * - ssl_verify_peer - Whether or not SSL certificates should be validated.
<add> * Defaults to true.
<add> * - ssl_verify_depth - The maximum certificate chain depth to travers.
<add> * Defaults to 5.
<add> * - ssl_verify_host - Verify that the certificate and hostname match.
<add> * Defaults to true.
<add> * - redirect - Number of redirects to follow. Defaults to false.
<ide> *
<ide> * @param array $config Config options for scoped clients.
<ide> */
<ide><path>lib/Cake/Network/Http/Message.php
<ide> class Message {
<ide> const STATUS_OK = 200;
<ide> const STATUS_CREATED = 201;
<ide> const STATUS_ACCEPTED = 202;
<add> const STATUS_MOVED_PERMANENTLY = 301;
<add> const STATUS_FOUND = 302;
<add> const STATUS_SEE_OTHER = 303;
<add> const STATUS_TEMPORARY_REDIRECT = 307;
<ide>
<ide> const METHOD_GET = 'GET';
<ide> const METHOD_POST = 'POST';
<ide><path>lib/Cake/Network/Http/Request.php
<ide> */
<ide> class Request extends Message {
<ide>
<del> protected $_method;
<add>/**
<add> * The HTTP method to use.
<add> *
<add> * @var string
<add> */
<add> protected $_method = self::METHOD_GET;
<ide>
<add>/**
<add> * Request body to send.
<add> *
<add> * @var mixed
<add> */
<ide> protected $_body;
<ide>
<add>/**
<add> * The URL to request.
<add> *
<add> * @var string
<add> */
<ide> protected $_url;
<ide>
<ide> /**
<ide><path>lib/Cake/Network/Http/Response.php
<ide> protected function _parseCookie($value) {
<ide> * @return boolean
<ide> */
<ide> public function isOk() {
<del> return in_array(
<del> $this->_code,
<del> [static::STATUS_OK, static::STATUS_CREATED, static::STATUS_ACCEPTED]
<del> );
<add> $codes = [
<add> static::STATUS_OK,
<add> static::STATUS_CREATED,
<add> static::STATUS_ACCEPTED
<add> ];
<add> return in_array($this->_code, $codes);
<ide> }
<ide>
<ide> /**
<ide> public function isOk() {
<ide> * @return boolean
<ide> */
<ide> public function isRedirect() {
<add> $codes = [
<add> static::STATUS_MOVED_PERMANENTLY,
<add> static::STATUS_FOUND,
<add> static::STATUS_SEE_OTHER,
<add> static::STATUS_TEMPORARY_REDIRECT,
<add> ];
<ide> return (
<del> in_array($this->_code, array(301, 302, 303, 307)) &&
<add> in_array($this->_code, $codes) &&
<ide> $this->header('Location')
<ide> );
<ide> }
<ide> public function offsetExists($name) {
<ide> * @return null
<ide> */
<ide> public function offsetSet($name, $value) {
<del>
<add>
<ide> }
<ide>
<ide> /**
<ide> public function offsetSet($name, $value) {
<ide> * @return null
<ide> */
<ide> public function offsetUnset($name) {
<del>
<add>
<ide> }
<ide>
<ide> } | 5 |
Go | Go | add options validation to syslog logger test | a7020454ca23fb7b03d5ec92e4a87c5ecd6a5cf3 | <ide><path>daemon/logger/syslog/syslog_test.go
<ide> package syslog // import "github.com/docker/docker/daemon/logger/syslog"
<ide>
<ide> import (
<add> "net"
<ide> "reflect"
<ide> "testing"
<ide>
<ide> func TestValidateLogOptEmpty(t *testing.T) {
<ide> t.Fatal("Failed to parse empty config", err)
<ide> }
<ide> }
<add>
<add>func TestValidateSyslogAddress(t *testing.T) {
<add> err := ValidateLogOpt(map[string]string{
<add> "syslog-address": "this is not an uri",
<add> })
<add> if err == nil {
<add> t.Fatal("Expected error with invalid uri")
<add> }
<add>
<add> // File exists
<add> err = ValidateLogOpt(map[string]string{
<add> "syslog-address": "unix:///",
<add> })
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add>
<add> // File does not exist
<add> err = ValidateLogOpt(map[string]string{
<add> "syslog-address": "unix:///does_not_exist",
<add> })
<add> if err == nil {
<add> t.Fatal("Expected error when address is non existing file")
<add> }
<add>
<add> // accepts udp and tcp URIs
<add> err = ValidateLogOpt(map[string]string{
<add> "syslog-address": "udp://1.2.3.4",
<add> })
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add>
<add> err = ValidateLogOpt(map[string]string{
<add> "syslog-address": "tcp://1.2.3.4",
<add> })
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add>}
<add>
<add>func TestParseAddressDefaultPort(t *testing.T) {
<add> _, address, err := parseAddress("tcp://1.2.3.4")
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add>
<add> _, port, _ := net.SplitHostPort(address)
<add> if port != "514" {
<add> t.Fatalf("Expected to default to port 514. It used port %s", port)
<add> }
<add>}
<add>
<add>func TestValidateSyslogFacility(t *testing.T) {
<add> err := ValidateLogOpt(map[string]string{
<add> "syslog-facility": "Invalid facility",
<add> })
<add> if err == nil {
<add> t.Fatal("Expected error if facility level is invalid")
<add> }
<add>}
<add>
<add>func TestValidateLogOptSyslogFormat(t *testing.T) {
<add> err := ValidateLogOpt(map[string]string{
<add> "syslog-format": "Invalid format",
<add> })
<add> if err == nil {
<add> t.Fatal("Expected error if format is invalid")
<add> }
<add>}
<add>
<add>func TestValidateLogOpt(t *testing.T) {
<add> err := ValidateLogOpt(map[string]string{
<add> "env": "http://127.0.0.1",
<add> "env-regex": "abc",
<add> "labels": "labelA",
<add> "syslog-address": "udp://1.2.3.4:1111",
<add> "syslog-facility": "daemon",
<add> "syslog-tls-ca-cert": "/etc/ca-certificates/custom/ca.pem",
<add> "syslog-tls-cert": "/etc/ca-certificates/custom/cert.pem",
<add> "syslog-tls-key": "/etc/ca-certificates/custom/key.pem",
<add> "syslog-tls-skip-verify": "true",
<add> "tag": "true",
<add> "syslog-format": "rfc3164",
<add> })
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add>
<add> err = ValidateLogOpt(map[string]string{
<add> "not-supported-option": "a",
<add> })
<add> if err == nil {
<add> t.Fatal("Expecting error on unsupported options")
<add> }
<add>} | 1 |
PHP | PHP | implement a formhelper context class for form\form | d43ac7127691de37038c30a12978f5305cf1dbe6 | <ide><path>src/View/Form/FormContext.php
<add><?php
<add>/**
<add> * CakePHP(tm) : Rapid Development Framework (http://cakephp.org)
<add> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<add> *
<add> * Licensed under The MIT License
<add> * For full copyright and license information, please see the LICENSE.txt
<add> * Redistributions of files must retain the above copyright notice.
<add> *
<add> * @copyright Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<add> * @link http://cakephp.org CakePHP(tm) Project
<add> * @since 3.0.0
<add> * @license http://www.opensource.org/licenses/mit-license.php MIT License
<add> */
<add>namespace Cake\View\Form;
<add>
<add>use Cake\Form\Form;
<add>use Cake\Network\Request;
<add>use Cake\View\Form\ContextInterface;
<add>
<add>/**
<add> * Provides a context provider for Cake\Form\Form instances.
<add> *
<add> * This context provider simply fulfils the interface requirements
<add> * that FormHelper has and allows access to the request data.
<add> */
<add>class FormContext implements ContextInterface {
<add>
<add>/**
<add> * The request object.
<add> *
<add> * @var \Cake\Network\Request
<add> */
<add> protected $_request;
<add>
<add>/**
<add> * Constructor.
<add> *
<add> * @param \Cake\Network\Request $request The request object.
<add> * @param array $context Context info.
<add> */
<add> public function __construct(Request $request, array $context) {
<add> $this->_request = $request;
<add> $context += [
<add> 'entity' => null,
<add> ];
<add> $this->_form = $context['entity'];
<add> }
<add>
<add>/**
<add> * {@inheritDoc}
<add> */
<add> public function primaryKey() {
<add> return [];
<add> }
<add>
<add>/**
<add> * {@inheritDoc}
<add> */
<add> public function isPrimaryKey($field) {
<add> return false;
<add> }
<add>
<add>/**
<add> * {@inheritDoc}
<add> */
<add> public function isCreate() {
<add> return true;
<add> }
<add>
<add>/**
<add> * {@inheritDoc}
<add> */
<add> public function val($field) {
<add> return $this->_request->data($field);
<add> }
<add>
<add>/**
<add> * {@inheritDoc}
<add> */
<add> public function isRequired($field) {
<add> $validator = $this->_form->validator();
<add> if (!$validator->hasField($field)) {
<add> return false;
<add> }
<add> if ($this->type($field) !== 'boolean') {
<add> return $validator->isEmptyAllowed($field, true) === false;
<add> }
<add> return false;
<add> }
<add>
<add>/**
<add> * {@inheritDoc}
<add> */
<add> public function fieldNames() {
<add> $schema = $this->_form->schema();
<add> return $schema->fields();
<add> }
<add>
<add>/**
<add> * {@inheritDoc}
<add> */
<add> public function type($field) {
<add> $schema = $this->_form->schema();
<add> return $schema->fieldType($field);
<add> }
<add>
<add>/**
<add> * {@inheritDoc}
<add> */
<add> public function attributes($field) {
<add> $column = (array)$this->_form->schema()->field($field);
<add> $whitelist = ['length' => null, 'precision' => null];
<add> return array_intersect_key($column, $whitelist);
<add> }
<add>
<add>/**
<add> * {@inheritDoc}
<add> */
<add> public function hasError($field) {
<add> $errors = $this->error($field);
<add> return count($errors) > 0;
<add> }
<add>
<add>/**
<add> * {@inheritDoc}
<add> */
<add> public function error($field) {
<add> $errors = $this->_form->errors();
<add> if (isset($errors[$field])) {
<add> return array_values($errors[$field]);
<add> }
<add> return [];
<add> }
<add>
<add>}
<ide><path>tests/TestCase/View/Form/FormContextTest.php
<add><?php
<add>/**
<add> * CakePHP(tm) : Rapid Development Framework (http://cakephp.org)
<add> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<add> *
<add> * Licensed under The MIT License
<add> * For full copyright and license information, please see the LICENSE.txt
<add> * Redistributions of files must retain the above copyright notice.
<add> *
<add> * @copyright Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org)
<add> * @link http://cakephp.org CakePHP(tm) Project
<add> * @since 3.0.0
<add> * @license http://www.opensource.org/licenses/mit-license.php MIT License
<add> */
<add>namespace Cake\Test\TestCase\View\Form;
<add>
<add>use Cake\Form\Form;
<add>use Cake\Network\Request;
<add>use Cake\TestSuite\TestCase;
<add>use Cake\View\Form\FormContext;
<add>
<add>/**
<add> * Form context test case.
<add> */
<add>class FormContextTest extends TestCase {
<add>
<add>/**
<add> * setup method.
<add> *
<add> * @return void
<add> */
<add> public function setUp() {
<add> parent::setUp();
<add> $this->request = new Request();
<add> }
<add>
<add>/**
<add> * Test getting the primary key.
<add> *
<add> * @return void
<add> */
<add> public function testPrimaryKey() {
<add> $context = new FormContext($this->request, ['entity' => new Form()]);
<add> $this->assertEquals([], $context->primaryKey());
<add> }
<add>
<add>/**
<add> * Test isPrimaryKey.
<add> *
<add> * @return void
<add> */
<add> public function testIsPrimaryKey() {
<add> $context = new FormContext($this->request, ['entity' => new Form()]);
<add> $this->assertFalse($context->isPrimaryKey('id'));
<add> }
<add>
<add>/**
<add> * Test the isCreate method.
<add> *
<add> * @return void
<add> */
<add> public function testIsCreate() {
<add> $context = new FormContext($this->request, ['entity' => new Form()]);
<add> $this->assertTrue($context->isCreate());
<add> }
<add>
<add>/**
<add> * Test reading values from the request & defaults.
<add> */
<add> public function testValPresent() {
<add> $this->request->data = [
<add> 'Articles' => [
<add> 'title' => 'New title',
<add> 'body' => 'My copy',
<add> ]
<add> ];
<add> $context = new FormContext($this->request, ['entity' => new Form()]);
<add> $this->assertEquals('New title', $context->val('Articles.title'));
<add> $this->assertEquals('My copy', $context->val('Articles.body'));
<add> $this->assertNull($context->val('Articles.nope'));
<add> }
<add>
<add>/**
<add> * Test getting values when the request and defaults are missing.
<add> *
<add> * @return void
<add> */
<add> public function testValMissing() {
<add> $context = new FormContext($this->request, ['entity' => new Form()]);
<add> $this->assertNull($context->val('Comments.field'));
<add> }
<add>
<add>/**
<add> * Test isRequired
<add> *
<add> * @return void
<add> */
<add> public function testIsRequired() {
<add> $form = new Form();
<add> $form->validator()
<add> ->requirePresence('name')
<add> ->add('email', 'format', ['rule' => 'email']);
<add>
<add> $context = new FormContext($this->request, [
<add> 'entity' => $form
<add> ]);
<add> $this->assertTrue($context->isRequired('name'));
<add> $this->assertTrue($context->isRequired('email'));
<add> $this->assertFalse($context->isRequired('body'));
<add> $this->assertFalse($context->isRequired('Prefix.body'));
<add> }
<add>
<add>/**
<add> * Test the type method.
<add> *
<add> * @return void
<add> */
<add> public function testType() {
<add> $form = new Form();
<add> $form->schema()
<add> ->addField('email', 'string')
<add> ->addField('user_id', 'integer');
<add>
<add> $context = new FormContext($this->request, [
<add> 'entity' => $form
<add> ]);
<add> $this->assertNull($context->type('undefined'));
<add> $this->assertEquals('integer', $context->type('user_id'));
<add> $this->assertEquals('string', $context->type('email'));
<add> $this->assertNull($context->type('Prefix.email'));
<add> }
<add>
<add>/**
<add> * Test fetching attributes.
<add> *
<add> * @return void
<add> */
<add> public function testAttributes() {
<add> $form = new Form();
<add> $form->schema()
<add> ->addField('email', [
<add> 'type' => 'string',
<add> 'length' => 10,
<add> ])
<add> ->addField('amount', [
<add> 'type' => 'decimal',
<add> 'length' => 5,
<add> 'precision' => 2,
<add> ]);
<add> $context = new FormContext($this->request, [
<add> 'entity' => $form
<add> ]);
<add> $this->assertEquals([], $context->attributes('id'));
<add> $this->assertEquals(['length' => 10, 'precision' => null], $context->attributes('email'));
<add> $this->assertEquals(['precision' => 2, 'length' => 5], $context->attributes('amount'));
<add> }
<add>
<add>/**
<add> * Test fetching errors.
<add> *
<add> * @return void
<add> */
<add> public function testError() {
<add> $form = new Form();
<add> $form->validator()
<add> ->add('email', 'format', ['rule' => 'email'])
<add> ->add('name', 'length', ['rule' => ['minLength', 10]]);
<add> $form->validate([
<add> 'email' => 'derp',
<add> 'name' => 'derp'
<add> ]);
<add>
<add> $context = new FormContext($this->request, ['entity' => $form]);
<add> $this->assertEquals([], $context->error('empty'));
<add> $this->assertEquals(['The provided value is invalid'], $context->error('email'));
<add> $this->assertEquals(['The provided value is invalid'], $context->error('name'));
<add>
<add> $this->assertEquals([], $context->error('Alias.name'));
<add> $this->assertEquals([], $context->error('nope.nope'));
<add> }
<add>
<add>/**
<add> * Test checking errors.
<add> *
<add> * @return void
<add> */
<add> public function testHasError() {
<add> $form = new Form();
<add> $form->validator()
<add> ->add('email', 'format', ['rule' => 'email'])
<add> ->add('name', 'length', ['rule' => ['minLength', 10]]);
<add> $form->validate([
<add> 'email' => 'derp',
<add> 'name' => 'derp'
<add> ]);
<add>
<add> $context = new FormContext($this->request, ['entity' => $form]);
<add> $this->assertTrue($context->hasError('email'));
<add> $this->assertTrue($context->hasError('name'));
<add> $this->assertFalse($context->hasError('nope'));
<add> $this->assertFalse($context->hasError('nope.nope'));
<add> }
<add>
<add>} | 2 |
Ruby | Ruby | remove incorrect assert_recognizes example | af4fab7d2ed5568b5c998ff5b02c907da4f3a357 | <ide><path>actionpack/lib/action_dispatch/testing/assertions/routing.rb
<ide> module RoutingAssertions
<ide> #
<ide> # # Test a custom route
<ide> # assert_recognizes({:controller => 'items', :action => 'show', :id => '1'}, 'view/item1')
<del> #
<del> # # Check a Simply RESTful generated route
<del> # assert_recognizes list_items_url, 'items/list'
<ide> def assert_recognizes(expected_options, path, extras={}, message=nil)
<ide> request = recognized_request_for(path)
<ide> | 1 |
Python | Python | resolve line-too-long in root directory | f3cafc77c269f7ecbf80bb4cf4b54e28c153f4e6 | <ide><path>keras/activations.py
<ide> def softmax(x, axis=-1):
<ide>
<ide> **Example 2: usage in a `Dense` layer**
<ide>
<del> >>> layer = tf.keras.layers.Dense(32, activation=tf.keras.activations.softmax)
<add> >>> layer = tf.keras.layers.Dense(32,
<add> ... activation=tf.keras.activations.softmax)
<ide> """
<ide> if x.shape.rank > 1:
<ide> if isinstance(axis, int):
<ide> def elu(x, alpha=1.0):
<ide>
<ide> Args:
<ide> x: Input tensor.
<del> alpha: A scalar, slope of negative section. `alpha` controls the value to
<del> which an ELU saturates for negative net inputs.
<add> alpha: A scalar, slope of negative section. `alpha` controls the value
<add> to which an ELU saturates for negative net inputs.
<ide>
<ide> Returns:
<del> The exponential linear unit (ELU) activation function: `x` if `x > 0` and
<del> `alpha * (exp(x) - 1)` if `x < 0`.
<add> The exponential linear unit (ELU) activation function: `x` if `x > 0`
<add> and `alpha * (exp(x) - 1)` if `x < 0`.
<ide>
<ide>
<ide> Reference:
<ide> def relu(x, alpha=0.0, max_value=None, threshold=0.0):
<ide> x: Input `tensor` or `variable`.
<ide> alpha: A `float` that governs the slope for values lower than the
<ide> threshold.
<del> max_value: A `float` that sets the saturation threshold (the largest value
<del> the function will return).
<del> threshold: A `float` giving the threshold value of the activation function
<del> below which values will be damped or set to zero.
<add> max_value: A `float` that sets the saturation threshold (the largest
<add> value the function will return).
<add> threshold: A `float` giving the threshold value of the activation
<add> function below which values will be damped or set to zero.
<ide>
<ide> Returns:
<ide> A `Tensor` representing the input tensor,
<ide><path>keras/backend.py
<ide> class _DummyEagerGraph(threading.local):
<ide> class _WeakReferencableClass:
<ide> """This dummy class is needed for two reasons.
<ide>
<del> - We need something that supports weak references. Basic types like string
<del> and ints don't.
<add> - We need something that supports weak references. Basic types like
<add> string and ints don't.
<ide> - We need something whose hash and equality are based on object identity
<del> to make sure they are treated as different keys to _GRAPH_LEARNING_PHASES.
<add> to make sure they are treated as different keys to
<add> _GRAPH_LEARNING_PHASES.
<ide>
<ide> An empty Python class satisfies both of these requirements.
<ide> """
<ide> def cast_to_floatx(x):
<ide> x: Numpy array or TensorFlow tensor.
<ide>
<ide> Returns:
<del> The same array (Numpy array if `x` was a Numpy array, or TensorFlow tensor
<del> if `x` was a tensor), cast to its new type.
<add> The same array (Numpy array if `x` was a Numpy array, or TensorFlow
<add> tensor if `x` was a tensor), cast to its new type.
<ide>
<ide> Example:
<ide>
<ide> def clear_session():
<ide>
<ide> If you are creating many models in a loop, this global state will consume
<ide> an increasing amount of memory over time, and you may want to clear it.
<del> Calling `clear_session()` releases the global state: this helps avoid clutter
<del> from old models and layers, especially when memory is limited.
<add> Calling `clear_session()` releases the global state: this helps avoid
<add> clutter from old models and layers, especially when memory is limited.
<ide>
<ide> Example 1: calling `clear_session()` when creating models in a loop
<ide>
<ide> ```python
<ide> for _ in range(100):
<ide> # Without `clear_session()`, each iteration of this loop will
<ide> # slightly increase the size of the global state managed by Keras
<del> model = tf.keras.Sequential([tf.keras.layers.Dense(10) for _ in range(10)])
<add> model = tf.keras.Sequential([
<add> tf.keras.layers.Dense(10) for _ in range(10)])
<ide>
<ide> for _ in range(100):
<ide> # With `clear_session()` called at the beginning,
<ide> # Keras starts with a blank state at each iteration
<ide> # and memory consumption is constant over time.
<ide> tf.keras.backend.clear_session()
<del> model = tf.keras.Sequential([tf.keras.layers.Dense(10) for _ in range(10)])
<add> model = tf.keras.Sequential([
<add> tf.keras.layers.Dense(10) for _ in range(10)])
<ide> ```
<ide>
<ide> Example 2: resetting the layer name generation counter
<ide> def clear_session():
<ide> dense
<ide> """
<ide> global _SESSION
<del> global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
<del> global _GRAPH_VARIABLES # pylint: disable=global-variable-not-assigned
<del> global _GRAPH_TF_OPTIMIZERS # pylint: disable=global-variable-not-assigned
<add> global _GRAPH_LEARNING_PHASES
<add> global _GRAPH_VARIABLES
<add> global _GRAPH_TF_OPTIMIZERS
<ide> global _GRAPH
<ide> _GRAPH.graph = None
<ide> tf.compat.v1.reset_default_graph()
<ide> def clear_session():
<ide> _DUMMY_EAGER_GRAPH.learning_phase_is_set = False
<ide>
<ide> _GRAPH_LEARNING_PHASES = {}
<del> # Create the learning phase placeholder in graph using the default factory
<add> # Create the learning phase placeholder in graph using the default
<add> # factory
<ide> phase = _default_learning_phase()
<ide> _internal_set_learning_phase(graph, phase)
<ide>
<ide> _GRAPH_VARIABLES.pop(graph, None)
<ide> _GRAPH_TF_OPTIMIZERS.pop(graph, None)
<ide> if tf.executing_eagerly():
<del> # Clear pending nodes in eager executors, kernel caches and step_containers.
<add> # Clear pending nodes in eager executors, kernel caches and
<add> # step_containers.
<ide> context.context().clear_kernel_cache()
<ide>
<ide>
<ide> def learning_phase():
<ide> else:
<ide> with tf.init_scope():
<ide> # We always check & set the learning phase inside the init_scope,
<del> # otherwise the wrong default_graph will be used to look up the learning
<del> # phase inside of functions & defuns.
<add> # otherwise the wrong default_graph will be used to look up the
<add> # learning phase inside of functions & defuns.
<ide> #
<ide> # This is because functions & defuns (both in graph & in eager mode)
<ide> # will always execute non-eagerly using a function-specific default
<ide> def _mark_func_graph_as_unsaveable(graph, learning_phase):
<ide> """Mark func graph as unsaveable due to use of symbolic keras learning phase.
<ide>
<ide> Functions that capture the symbolic learning phase cannot be exported to
<del> SavedModel. Mark the funcgraph as unsaveable, so that an error will be raised
<del> if it is exported.
<add> SavedModel. Mark the funcgraph as unsaveable, so that an error will be
<add> raised if it is exported.
<ide>
<ide> Args:
<ide> graph: Graph or FuncGraph object.
<ide> def _mark_func_graph_as_unsaveable(graph, learning_phase):
<ide> if graph.building_function and is_placeholder(learning_phase):
<ide> graph.mark_as_unsaveable(
<ide> "The keras learning phase placeholder was used inside a function. "
<del> "Exporting placeholders is not supported when saving out a SavedModel. "
<del> "Please call `tf.keras.backend.set_learning_phase(0)` in the function "
<del> "to set the learning phase to a constant value."
<add> "Exporting placeholders is not supported when saving out a "
<add> "SavedModel. Please call `tf.keras.backend.set_learning_phase(0)` "
<add> "in the function to set the learning phase to a constant value."
<ide> )
<ide>
<ide>
<ide> def symbolic_learning_phase():
<ide>
<ide>
<ide> def _internal_set_learning_phase(graph, value):
<del> global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
<add> global _GRAPH_LEARNING_PHASES
<ide>
<ide> if isinstance(value, tf.Tensor):
<ide> # The 'value' here is a tf.Tensor with attribute 'graph'.
<del> # There is a circular reference between key 'graph' and attribute 'graph'.
<del> # So we need use a weakref.ref to refer to the 'value' tensor here.
<del> # Otherwise, it would lead to memory leak.
<add> # There is a circular reference between key 'graph' and attribute
<add> # 'graph'. So we need use a weakref.ref to refer to the 'value' tensor
<add> # here. Otherwise, it would lead to memory leak.
<ide> value_ref = weakref.ref(value)
<ide> _GRAPH_LEARNING_PHASES[graph] = value_ref
<ide> else:
<ide> def set_learning_phase(value):
<ide>
<ide> The backend learning phase affects any code that calls
<ide> `backend.learning_phase()`
<del> In particular, all Keras built-in layers use the learning phase as the default
<del> for the `training` arg to `Layer.__call__`.
<add> In particular, all Keras built-in layers use the learning phase as the
<add> default for the `training` arg to `Layer.__call__`.
<ide>
<ide> User-written layers and models can achieve the same behavior with code that
<ide> looks like:
<ide> def deprecated_internal_set_learning_phase(value):
<ide> This method is an internal-only version of `set_learning_phase` that
<ide> does not raise a deprecation error. It is required because
<ide> saved_model needs to keep working with user code that uses the deprecated
<del> learning phase methods until those APIs are fully removed from the public API.
<add> learning phase methods until those APIs are fully removed from the public
<add> API.
<ide>
<ide> Specifically SavedModel saving needs to make sure the learning phase is 0
<ide> during tracing even if users overwrote it to a different value.
<ide> def deprecated_internal_set_learning_phase(value):
<ide> explicitly setting the learning phase for other values.
<ide>
<ide> Args:
<del> value: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train
<add> value: Learning phase value, either 0 or 1 (integers).
<add> 0 = test, 1 = train
<ide>
<ide> Raises:
<ide> ValueError: if `value` is neither `0` nor `1`.
<ide> def deprecated_internal_set_learning_phase(value):
<ide> raise ValueError("Expected learning phase to be 0 or 1.")
<ide> with tf.init_scope():
<ide> if tf.executing_eagerly():
<del> # In an eager context, the learning phase values applies to both the eager
<del> # context and the internal Keras graph.
<add> # In an eager context, the learning phase values applies to both the
<add> # eager context and the internal Keras graph.
<ide> _DUMMY_EAGER_GRAPH.learning_phase_is_set = True
<ide> _internal_set_learning_phase(_DUMMY_EAGER_GRAPH.key, value)
<ide>
<ide> def deprecated_internal_set_learning_phase(value):
<ide> def learning_phase_scope(value):
<ide> """Provides a scope within which the learning phase is equal to `value`.
<ide>
<del> The learning phase gets restored to its original value upon exiting the scope.
<add> The learning phase gets restored to its original value upon exiting the
<add> scope.
<ide>
<ide> Args:
<ide> value: Learning phase value, either 0 or 1 (integers).
<ide> def deprecated_internal_learning_phase_scope(value):
<ide> removed.
<ide>
<ide> Args:
<del> value: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train
<add> value: Learning phase value, either 0 or 1 (integers).
<add> 0 = test, 1 = train
<ide>
<ide> Yields:
<del> None.
<add> None.
<ide>
<ide> Raises:
<del> ValueError: if `value` is neither `0` nor `1`.
<add> ValueError: if `value` is neither `0` nor `1`.
<ide> """
<del> global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
<add> global _GRAPH_LEARNING_PHASES
<ide> if value not in {0, 1}:
<ide> raise ValueError("Expected learning phase to be 0 or 1.")
<ide>
<ide> def eager_learning_phase_scope(value):
<ide> Raises:
<ide> ValueError: if `value` is neither `0` nor `1`.
<ide> """
<del> global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
<add> global _GRAPH_LEARNING_PHASES
<ide> assert value in {0, 1}
<ide> assert tf.compat.v1.executing_eagerly_outside_functions()
<ide> global_learning_phase_was_set = global_learning_phase_is_set()
<ide> def _current_graph(op_input_list, graph=None):
<ide>
<ide> 1. If the default graph is being used to construct a function, we
<ide> use the default graph.
<del> 2. If the "graph" is specified explicitly, we validate that all of the inputs
<del> in "op_input_list" are compatible with that graph.
<add> 2. If the "graph" is specified explicitly, we validate that all of the
<add> inputs in "op_input_list" are compatible with that graph.
<ide> 3. Otherwise, we attempt to select a graph from the first Operation-
<ide> or Tensor-valued input in "op_input_list", and validate that all other
<ide> such inputs are in the same graph.
<ide> 4. If the graph was not specified and it could not be inferred from
<ide> "op_input_list", we attempt to use the default graph.
<ide>
<ide> Args:
<del> op_input_list: A list of inputs to an operation, which may include `Tensor`,
<del> `Operation`, and other objects that may be converted to a graph element.
<add> op_input_list: A list of inputs to an operation, which may include
<add> `Tensor`, `Operation`, and other objects that may be converted to a
<add> graph element.
<ide> graph: (Optional) The explicit graph to use.
<ide>
<ide> Raises:
<ide> TypeError: If op_input_list is not a list or tuple, or if graph is not a
<ide> Graph.
<del> ValueError: If a graph is explicitly passed and not all inputs are from it,
<del> or if the inputs are from multiple graphs, or we could not find a graph
<del> and there was no default graph.
<add> ValueError: If a graph is explicitly passed and not all inputs are from
<add> it, or if the inputs are from multiple graphs, or we could not find a
<add> graph and there was no default graph.
<ide>
<ide> Returns:
<ide> The appropriate graph to use for the given inputs.
<ide> def _current_graph(op_input_list, graph=None):
<ide> original_graph_element = None
<ide> for op_input in op_input_list:
<ide> # Determine if this is a valid graph_element.
<del> # TODO(joshl): Note that we exclude subclasses of Tensor. Need to clean this
<del> # up.
<add> # TODO(joshl): Note that we exclude subclasses of Tensor. Need to clean
<add> # this up.
<ide> if isinstance(
<ide> op_input, (tf.Operation, tf.Tensor, tf.__internal__.CompositeTensor)
<ide> ) and (
<ide> def _get_session(op_input_list=()):
<ide> ) is None or _SESSION.session.graph is not _current_graph(
<ide> op_input_list
<ide> ):
<del> # If we are creating the Session inside a tf.distribute.Strategy scope,
<del> # we ask the strategy for the right session options to use.
<add> # If we are creating the Session inside a tf.distribute.Strategy
<add> # scope, we ask the strategy for the right session options to use.
<ide> if tf.distribute.has_strategy():
<ide> configure_and_create_distributed_session(
<ide> tf.distribute.get_strategy()
<ide> def _is_current_explicit_device(device_type):
<ide> device_type: A string containing `GPU` or `CPU` (case-insensitive).
<ide>
<ide> Returns:
<del> A boolean indicating if the current device scope is explicitly set on the
<del> device type.
<add> A boolean indicating if the current device scope is explicitly set on
<add> the device type.
<ide>
<ide> Raises:
<ide> ValueError: If the `device_type` string indicates an unsupported device.
<ide> def unique_object_name(
<ide> name: String name to make unique.
<ide> name_uid_map: An optional defaultdict(int) to use when creating unique
<ide> names. If None (default), uses a per-Graph dictionary.
<del> avoid_names: An optional set or dict with names which should not be used. If
<del> None (default), don't avoid any names unless `avoid_observed_names` is
<del> True.
<del> namespace: Gets a name which is unique within the (graph, namespace). Layers
<del> which are not Networks use a blank namespace and so get graph-global
<del> names.
<add> avoid_names: An optional set or dict with names which should not be used.
<add> If None (default), don't avoid any names unless `avoid_observed_names`
<add> is True.
<add> namespace: Gets a name which is unique within the (graph, namespace).
<add> Layers which are not Networks use a blank namespace and so get
<add> graph-global names.
<ide> zero_based: If True, name sequences start with no suffix (e.g. "dense",
<ide> "dense_1"). If False, naming is one-based ("dense_1", "dense_2").
<ide> avoid_observed_names: If True, avoid any names that have been observed by
<ide> def is_keras_tensor(x):
<ide> >>> tf.keras.backend.is_keras_tensor(np_var)
<ide> Traceback (most recent call last):
<ide> ...
<del> ValueError: Unexpectedly found an instance of type `<class 'numpy.ndarray'>`.
<add> ValueError: Unexpectedly found an instance of type
<add> `<class 'numpy.ndarray'>`.
<ide> Expected a symbolic tensor instance.
<ide> >>> keras_var = tf.keras.backend.variable(np_var)
<ide> >>> # A variable created with the keras backend is not a Keras tensor.
<ide> def placeholder(
<ide> name: Optional name string for the placeholder.
<ide> ragged: Boolean, whether the placeholder should have a ragged type.
<ide> In this case, values of 'None' in the 'shape' argument represent
<del> ragged dimensions. For more information about RaggedTensors, see this
<del> [guide](https://www.tensorflow.org/guide/ragged_tensors).
<add> ragged dimensions. For more information about RaggedTensors, see
<add> this [guide](https://www.tensorflow.org/guide/ragged_tensors).
<ide>
<ide> Raises:
<ide> ValueError: If called with sparse = True and ragged = True.
<ide> def placeholder(
<ide> """
<ide> if sparse and ragged:
<ide> raise ValueError(
<del> "Cannot set both sparse and ragged to True when creating a placeholder."
<add> "Cannot set both sparse and ragged to "
<add> "True when creating a placeholder."
<ide> )
<ide> if dtype is None:
<ide> dtype = floatx()
<ide> def is_tf_random_generator_enabled():
<ide> usage of `tf.random.Generator`, please use
<ide> `tf.keras.backend.experimental.disable_random_generator`.
<ide>
<del> We expect the `tf.random.Generator` code path to become the default, and will
<del> remove the legacy stateful random ops such as `tf.random.uniform` in the
<del> future (see the
<del> [TF RNG guide](https://www.tensorflow.org/guide/random_numbers)).
<add> We expect the `tf.random.Generator` code path to become the default, and
<add> will remove the legacy stateful random ops such as `tf.random.uniform` in
<add> the future (see the [TF RNG guide](
<add> https://www.tensorflow.org/guide/random_numbers)).
<ide>
<ide> This API will also be removed in a future release as well, together with
<ide> `tf.keras.backend.experimental.enable_tf_random_generator()` and
<ide> `tf.keras.backend.experimental.disable_tf_random_generator()`
<ide>
<ide> Returns:
<del> boolean: whether `tf.random.Generator` is used for random number generation
<del> in Keras.
<add> boolean: whether `tf.random.Generator` is used for random number
<add> generation in Keras.
<ide> """
<ide> return _USE_GENERATOR_FOR_RNG
<ide>
<ide> def __init__(self, seed=None, rng_type=None, **kwargs):
<ide> self._built = False
<ide>
<ide> def _set_rng_type(self, rng_type, **kwargs):
<del> # Only supported kwargs is "force_generator", which we will remove once we
<del> # clean up all the caller.
<add> # Only supported kwargs is "force_generator", which we will remove once
<add> # we clean up all the caller.
<ide> # TODO(scottzhu): Remove the kwargs for force_generator.
<ide> if kwargs.get("force_generator", False):
<ide> rng_type = self.RNG_STATEFUL
<ide> def _set_rng_type(self, rng_type, **kwargs):
<ide> ]:
<ide> raise ValueError(
<ide> "Invalid `rng_type` received. "
<del> 'Valid `rng_type` are ["stateless", "stateful", "legacy_stateful"].'
<add> 'Valid `rng_type` are ["stateless", '
<add> '"stateful", "legacy_stateful"].'
<ide> f" Got: {rng_type}"
<ide> )
<ide> self._rng_type = rng_type
<ide>
<ide> def _maybe_init(self):
<ide> """Lazily init the RandomGenerator.
<ide>
<del> The TF API executing_eagerly_outside_functions() has some side effect, and
<del> couldn't be used before API like tf.enable_eager_execution(). Some of the
<del> client side code was creating the initializer at the code load time, which
<del> triggers the creation of RandomGenerator. Lazy init this class to walkaround
<del> this issue until it is resolved on TF side.
<add> The TF API executing_eagerly_outside_functions() has some side effect,
<add> and couldn't be used before API like tf.enable_eager_execution(). Some
<add> of the client side code was creating the initializer at the code load
<add> time, which triggers the creation of RandomGenerator. Lazy init this
<add> class to walkaround this issue until it is resolved on TF side.
<ide> """
<del> # TODO(b/167482354): Change this back to normal init when the bug is fixed.
<add> # TODO(b/167482354): Change this back to normal init when the bug is
<add> # fixed.
<ide> if self._built:
<ide> return
<ide>
<ide> if (
<ide> self._rng_type == self.RNG_STATEFUL
<ide> and not tf.compat.v1.executing_eagerly_outside_functions()
<ide> ):
<del> # Fall back to legacy stateful since the generator need to work in tf2.
<add> # Fall back to legacy stateful since the generator need to work in
<add> # tf2.
<ide> self._rng_type = self.RNG_LEGACY_STATEFUL
<ide>
<ide> if self._rng_type == self.RNG_STATELESS:
<ide> def _maybe_init(self):
<ide> seed = self._create_seed(self._seed)
<ide> self._generator = tf.random.Generator.from_seed(seed)
<ide> else:
<del> # In legacy stateful, we use stateful op, regardless whether user provide
<del> # seed or not. Seeded stateful op will ensure generating same sequences.
<add> # In legacy stateful, we use stateful op, regardless whether user
<add> # provide seed or not. Seeded stateful op will ensure generating
<add> # same sequences.
<ide> self._generator = None
<ide> self._built = True
<ide>
<ide> def make_seed_for_stateless_op(self):
<ide> """Generate a new seed based on the init config.
<ide>
<del> Note that this will not return python ints which will be frozen in the graph
<del> and cause stateless op to return the same value. It will only return value
<del> when generator is used, otherwise it will return None.
<add> Note that this will not return python ints which will be frozen in the
<add> graph and cause stateless op to return the same value. It will only
<add> return value when generator is used, otherwise it will return None.
<ide>
<ide> Returns:
<ide> A tensor with shape [2,].
<ide> def make_seed_for_stateless_op(self):
<ide> def make_legacy_seed(self):
<ide> """Create a new seed for the legacy stateful ops to use.
<ide>
<del> When user didn't provide any original seed, this method will return None.
<del> Otherwise it will increment the counter and return as the new seed.
<add> When user didn't provide any original seed, this method will return
<add> None. Otherwise it will increment the counter and return as the new
<add> seed.
<ide>
<ide> Note that it is important to generate different seed for stateful ops in
<del> the `tf.function`. The random ops will return same value when same seed is
<del> provided in the `tf.function`.
<add> the `tf.function`. The random ops will return same value when same seed
<add> is provided in the `tf.function`.
<ide>
<ide> Returns:
<ide> int as new seed, or None.
<ide> def random_normal(
<ide> Args:
<ide> shape: The shape of the random values to generate.
<ide> mean: Floats, default to 0. Mean of the random values to generate.
<del> stddev: Floats, default to 1. Standard deviation of the random values to
<del> generate.
<add> stddev: Floats, default to 1. Standard deviation of the random values
<add> to generate.
<ide> dtype: Optional dtype of the tensor. Only floating point types are
<del> supported. If not specified, `tf.keras.backend.floatx()` is used, which
<del> default to `float32` unless you configured it otherwise (via
<add> supported. If not specified, `tf.keras.backend.floatx()` is used,
<add> which default to `float32` unless you configured it otherwise (via
<ide> `tf.keras.backend.set_floatx(float_dtype)`)
<del> nonce: Optional integer scalar, that will be folded into the seed in the
<del> stateless mode.
<add> nonce: Optional integer scalar, that will be folded into the seed in
<add> the stateless mode.
<ide> """
<ide> self._maybe_init()
<ide> dtype = dtype or floatx()
<ide> def random_uniform(
<ide> minval: Floats, default to None. Upper bound of the range of
<ide> random values to generate (exclusive).
<ide> dtype: Optional dtype of the tensor. Only floating point types are
<del> supported. If not specified, `tf.keras.backend.floatx()` is used, which
<del> default to `float32` unless you configured it otherwise (via
<add> supported. If not specified, `tf.keras.backend.floatx()` is used,
<add> which default to `float32` unless you configured it otherwise (via
<ide> `tf.keras.backend.set_floatx(float_dtype)`)
<del> nonce: Optional integer scalar, that will be folded into the seed in the
<del> stateless mode.
<add> nonce: Optional integer scalar, that will be folded into the seed in
<add> the stateless mode.
<ide> """
<ide> self._maybe_init()
<ide> dtype = dtype or floatx()
<ide> def truncated_normal(
<ide> Args:
<ide> shape: The shape of the random values to generate.
<ide> mean: Floats, default to 0. Mean of the random values to generate.
<del> stddev: Floats, default to 1. Standard deviation of the random values to
<del> generate.
<add> stddev: Floats, default to 1. Standard deviation of the random values
<add> to generate.
<ide> dtype: Optional dtype of the tensor. Only floating point types are
<del> supported. If not specified, `tf.keras.backend.floatx()` is used, which
<del> default to `float32` unless you configured it otherwise (via
<add> supported. If not specified, `tf.keras.backend.floatx()` is used,
<add> which default to `float32` unless you configured it otherwise (via
<ide> `tf.keras.backend.set_floatx(float_dtype)`)
<del> nonce: Optional integer scalar, that will be folded into the seed in the
<del> stateless mode.
<add> nonce: Optional integer scalar, that will be folded into the seed in
<add> the stateless mode.
<ide> """
<ide> self._maybe_init()
<ide> dtype = dtype or floatx()
<ide> def std(x, axis=None, keepdims=False):
<ide> `[-rank(x), rank(x))`.
<ide> keepdims: A boolean, whether to keep the dimensions or not.
<ide> If `keepdims` is `False`, the rank of the tensor is reduced
<del> by 1. If `keepdims` is `True`, the reduced dimension is retained with
<del> length 1.
<add> by 1. If `keepdims` is `True`, the reduced dimension is retained
<add> with length 1.
<ide>
<ide> Returns:
<ide> A tensor with the standard deviation of elements of `x` with same dtype.
<ide> def set_value(x, value):
<ide> assign_placeholder = x._assign_placeholder
<ide> assign_op = x._assign_op
<ide> else:
<del> # In order to support assigning weights to resizable variables in
<del> # Keras, we make a placeholder with the correct number of dimensions
<del> # but with None in each dimension. This way, we can assign weights
<del> # of any size (as long as they have the correct dimensionality).
<add> # In order to support assigning weights to resizable variables
<add> # in Keras, we make a placeholder with the correct number of
<add> # dimensions but with None in each dimension. This way, we can
<add> # assign weights of any size (as long as they have the correct
<add> # dimensionality).
<ide> placeholder_shape = tf.TensorShape([None] * value.ndim)
<ide> assign_placeholder = tf.compat.v1.placeholder(
<ide> tf_dtype, shape=placeholder_shape
<ide> def batch_set_value(tuples):
<ide> assign_placeholder = x._assign_placeholder
<ide> assign_op = x._assign_op
<ide> else:
<del> # In order to support assigning weights to resizable variables in
<del> # Keras, we make a placeholder with the correct number of dimensions
<del> # but with None in each dimension. This way, we can assign weights
<del> # of any size (as long as they have the correct dimensionality).
<add> # In order to support assigning weights to resizable
<add> # variables in Keras, we make a placeholder with the
<add> # correct number of dimensions but with None in each
<add> # dimension. This way, we can assign weights of any size
<add> # (as long as they have the correct dimensionality).
<ide> placeholder_shape = tf.TensorShape([None] * value.ndim)
<ide> assign_placeholder = tf.compat.v1.placeholder(
<ide> tf_dtype, shape=placeholder_shape
<ide> def print_tensor(x, message="", summarize=3):
<ide> x: Tensor to print.
<ide> message: Message to print jointly with the tensor.
<ide> summarize: The first and last `summarize` elements within each dimension
<del> are recursively printed per Tensor. If None, then the first 3 and last
<del> 3 elements of each dimension are printed for each tensor. If set to
<del> -1, it will print all elements of every tensor.
<add> are recursively printed per Tensor. If None, then the first 3 and
<add> last 3 elements of each dimension are printed for each tensor. If
<add> set to -1, it will print all elements of every tensor.
<ide>
<ide> Returns:
<ide> The same tensor `x`, unchanged.
<ide> def _make_callable(self, feed_arrays, feed_symbols, symbol_vals, session):
<ide>
<ide> Args:
<ide> feed_arrays: List of input tensors to be fed Numpy arrays at runtime.
<del> feed_symbols: List of input tensors to be fed symbolic tensors at runtime.
<add> feed_symbols: List of input tensors to be fed symbolic tensors at
<add> runtime.
<ide> symbol_vals: List of symbolic tensors to be fed to `feed_symbols`.
<ide> session: Session to use to generate the callable.
<ide>
<ide> def _call_fetch_callbacks(self, fetches_output):
<ide> def _eval_if_composite(self, tensor):
<ide> """Helper method which evaluates any CompositeTensors passed to it."""
<ide> # We need to evaluate any composite tensor objects that have been
<del> # reconstructed in 'pack_sequence_as', since otherwise they'll be output as
<del> # actual CompositeTensor objects instead of the value(s) contained in the
<del> # CompositeTensors. E.g., if output_structure contains a SparseTensor, then
<del> # this ensures that we return its value as a SparseTensorValue rather than
<del> # a SparseTensor.
<add> # reconstructed in 'pack_sequence_as', since otherwise they'll be output
<add> # as actual CompositeTensor objects instead of the value(s) contained in
<add> # the CompositeTensors. E.g., if output_structure contains a
<add> # SparseTensor, then this ensures that we return its value as a
<add> # SparseTensorValue rather than a SparseTensor.
<ide> from keras.utils import tf_utils # pylint: disable=g-import-not-at-top
<ide>
<ide> if tf_utils.is_extension_type(tensor):
<ide> def __call__(self, inputs):
<ide> else:
<ide> # Case: feeding Numpy array.
<ide> feed_arrays.append(tensor)
<del> # We need to do array conversion and type casting at this level, since
<del> # `callable_fn` only supports exact matches.
<add> # We need to do array conversion and type casting at this level,
<add> # since `callable_fn` only supports exact matches.
<ide> tensor_type = tf.as_dtype(tensor.dtype)
<ide> array_vals.append(
<ide> np.asarray(value, dtype=tensor_type.as_numpy_dtype)
<ide> def __call__(self, inputs):
<ide> expand_composites=True,
<ide> )
<ide> # We need to evaluate any composite tensor objects that have been
<del> # reconstructed in 'pack_sequence_as', since otherwise they'll be output as
<del> # actual CompositeTensor objects instead of the value(s) contained in the
<del> # CompositeTensors. E.g., if output_structure contains a SparseTensor, then
<del> # this ensures that we return its value as a SparseTensorValue rather than
<del> # a SparseTensor.
<add> # reconstructed in 'pack_sequence_as', since otherwise they'll be output
<add> # as actual CompositeTensor objects instead of the value(s) contained in
<add> # the CompositeTensors. E.g., if output_structure contains a
<add> # SparseTensor, then this ensures that we return its value as a
<add> # SparseTensorValue rather than a SparseTensor.
<ide> return tf.nest.map_structure(self._eval_if_composite, output_structure)
<ide>
<ide>
<ide> def func(model_inputs):
<ide> 0
<ide> ] and key not in ["inputs", "outputs", "updates", "name"]:
<ide> msg = (
<del> 'Invalid argument "%s" passed to K.function with TensorFlow '
<del> "backend"
<add> 'Invalid argument "%s" passed to K.function with '
<add> "TensorFlow backend"
<ide> ) % key
<ide> raise ValueError(msg)
<ide> return GraphExecutionFunction(
<ide> def rnn(
<ide> (at least 3D), or nested tensors, and each of which has shape
<ide> `(samples, time, ...)`.
<ide> initial_states: Tensor with shape `(samples, state_size)`
<del> (no time dimension), containing the initial values for the states used
<del> in the step function. In the case that state_size is in a nested
<del> shape, the shape of initial_states will also follow the nested
<del> structure.
<add> (no time dimension), containing the initial values for the states
<add> used in the step function. In the case that state_size is in a
<add> nested shape, the shape of initial_states will also follow the
<add> nested structure.
<ide> go_backwards: Boolean. If True, do the iteration over the time
<ide> dimension in reverse order and return the reversed sequence.
<ide> mask: Binary tensor with shape `(samples, time, 1)`,
<ide> with a zero for every element that is masked.
<ide> constants: List of constant values passed at each step.
<ide> unroll: Whether to unroll the RNN or to use a symbolic `while_loop`.
<ide> input_length: An integer or a 1-D Tensor, depending on whether
<del> the time dimension is fixed-length or not. In case of variable length
<del> input, it is used for masking in case there's no mask specified.
<add> the time dimension is fixed-length or not. In case of variable
<add> length input, it is used for masking in case there's no mask
<add> specified.
<ide> time_major: Boolean. If true, the inputs and outputs will be in shape
<ide> `(timesteps, batch, ...)`, whereas in the False case, it will be
<ide> `(batch, timesteps, ...)`. Using `time_major = True` is a bit more
<del> efficient because it avoids transposes at the beginning and end of the
<del> RNN calculation. However, most TensorFlow data is batch-major, so by
<del> default this function accepts input and emits output in batch-major
<del> form.
<add> efficient because it avoids transposes at the beginning and end of
<add> the RNN calculation. However, most TensorFlow data is batch-major,
<add> so by default this function accepts input and emits output in
<add> batch-major form.
<ide> zero_output_for_mask: Boolean. If True, the output for masked timestep
<ide> will be zeros, whereas in the False case, output from previous
<ide> timestep is returned.
<del> return_all_outputs: Boolean. If True, return the recurrent outputs for all
<del> timesteps in the sequence. If False, only return the output for the
<del> last timestep (which consumes less memory).
<add> return_all_outputs: Boolean. If True, return the recurrent outputs for
<add> all timesteps in the sequence. If False, only return the output for
<add> the last timestep (which consumes less memory).
<ide>
<ide> Returns:
<ide> A tuple, `(last_output, outputs, new_states)`.
<ide> def rnn(
<ide> Raises:
<ide> ValueError: if input dimension is less than 3.
<ide> ValueError: if `unroll` is `True` but input timestep is not a fixed
<del> number.
<del> ValueError: if `mask` is provided (not `None`) but states is not provided
<del> (`len(states)` == 0).
<add> number.
<add> ValueError: if `mask` is provided (not `None`) but states is not
<add> provided (`len(states)` == 0).
<ide> """
<ide> if not tf.__internal__.tf2.enabled():
<ide> return_all_outputs = True # Not supported in TF1.
<ide> def _expand_mask(mask_t, input_t, fixed_dim=1):
<ide> successive_outputs = []
<ide>
<ide> # Process the input tensors. The input tensor need to be split on the
<del> # time_step dim, and reverse if go_backwards is True. In the case of nested
<del> # input, the input is flattened and then transformed individually.
<del> # The result of this will be a tuple of lists, each of the item in tuple is
<del> # list of the tensor with shape (batch, feature)
<add> # time_step dim, and reverse if go_backwards is True. In the case of
<add> # nested input, the input is flattened and then transformed
<add> # individually. The result of this will be a tuple of lists, each of
<add> # the item in tuple is list of the tensor with shape (batch, feature)
<ide> def _process_single_input_t(input_t):
<ide> input_t = tf.unstack(input_t) # unstack for time_step dim
<ide> if go_backwards:
<ide> def _get_input_tensor(time):
<ide> else: # Unroll == False
<ide> states = tuple(initial_states)
<ide>
<del> # Create input tensor array, if the inputs is nested tensors, then it will
<del> # be flattened first, and tensor array will be created one per flattened
<del> # tensor.
<add> # Create input tensor array, if the inputs is nested tensors, then it
<add> # will be flattened first, and tensor array will be created one per
<add> # flattened tensor.
<ide> input_ta = tuple(
<ide> tf.TensorArray(
<ide> dtype=inp.dtype,
<ide> def _get_input_tensor(time):
<ide> for ta, input_ in zip(input_ta, flatted_inputs)
<ide> )
<ide>
<del> # Get the time(0) input and compute the output for that, the output will be
<del> # used to determine the dtype of output tensor array. Don't read from
<add> # Get the time(0) input and compute the output for that, the output will
<add> # be used to determine the dtype of output tensor array. Don't read from
<ide> # input_ta due to TensorArray clear_after_read default to True.
<ide> input_time_zero = tf.nest.pack_sequence_as(
<ide> inputs, [inp[0] for inp in flatted_inputs]
<ide> )
<del> # output_time_zero is used to determine the cell output shape and its dtype.
<del> # the value is discarded.
<add> # output_time_zero is used to determine the cell output shape and its
<add> # dtype. the value is discarded.
<ide> output_time_zero, _ = step_function(
<ide> input_time_zero, tuple(initial_states) + tuple(constants)
<ide> )
<ide> def _get_input_tensor(time):
<ide>
<ide> time = tf.constant(0, dtype="int32", name="time")
<ide>
<del> # We only specify the 'maximum_iterations' when building for XLA since that
<del> # causes slowdowns on GPU in TF.
<add> # We only specify the 'maximum_iterations' when building for XLA since
<add> # that causes slowdowns on GPU in TF.
<ide> if (
<ide> not tf.executing_eagerly()
<ide> and control_flow_util.GraphOrParentsInXlaContext(
<ide> def compute_masked_output(mask_t, flat_out, flat_mask):
<ide> masking_fn = None
<ide>
<ide> if masking_fn is not None:
<del> # Mask for the T output will be base on the output of T - 1. In the case
<del> # T = 0, a zero filled tensor will be used.
<add> # Mask for the T output will be base on the output of T - 1. In the
<add> # case T = 0, a zero filled tensor will be used.
<ide> flat_zero_output = tuple(
<ide> tf.zeros_like(o) for o in tf.nest.flatten(output_time_zero)
<ide> )
<ide> def categorical_crossentropy(target, output, from_logits=False, axis=-1):
<ide> [[1. 0. 0.]
<ide> [0. 1. 0.]
<ide> [0. 0. 1.]], shape=(3, 3), dtype=float32)
<del> >>> b = tf.constant([.9, .05, .05, .05, .89, .06, .05, .01, .94], shape=[3,3])
<add> >>> b = tf.constant([.9, .05, .05, .05, .89, .06, .05, .01, .94],
<add> ... shape=[3, 3])
<ide> >>> print(b)
<ide> tf.Tensor(
<ide> [[0.9 0.05 0.05]
<ide> def categorical_crossentropy(target, output, from_logits=False, axis=-1):
<ide> warnings.warn(
<ide> '"`categorical_crossentropy` received `from_logits=True`, but '
<ide> "the `output` argument was produced by a sigmoid or softmax "
<del> 'activation and thus does not represent logits. Was this intended?"',
<add> "activation and thus does not represent logits. "
<add> "Was this intended?",
<ide> stacklevel=2,
<ide> )
<ide> from_logits = True
<ide> def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
<ide> output = output._keras_logits # pylint: disable=protected-access
<ide> if from_logits:
<ide> warnings.warn(
<del> '"`sparse_categorical_crossentropy` received `from_logits=True`, but '
<del> "the `output` argument was produced by a sigmoid or softmax "
<del> 'activation and thus does not represent logits. Was this intended?"',
<add> '"`sparse_categorical_crossentropy` received '
<add> "`from_logits=True`, but the `output` argument "
<add> "was produced by a sigmoid or softmax activation "
<add> 'and thus does not represent logits. Was this intended?"',
<ide> stacklevel=2,
<ide> )
<ide> from_logits = True
<ide> def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
<ide> output = tf.compat.v1.transpose(output, perm=permutation)
<ide> elif axis != -1:
<ide> raise ValueError(
<del> "Cannot compute sparse categorical crossentropy with `axis={}` on an "
<del> "output tensor with unknown rank".format(axis)
<add> "Cannot compute sparse categorical crossentropy with `axis={}` "
<add> "on an output tensor with unknown rank".format(axis)
<ide> )
<ide>
<ide> target = cast(target, "int64")
<ide> def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
<ide> )
<ide>
<ide> if update_shape and output_rank >= 3:
<del> # If our output includes timesteps or spatial dimensions we need to reshape
<add> # If our output includes timesteps or spatial dimensions we need to
<add> # reshape
<ide> return tf.reshape(res, output_shape[:-1])
<ide> else:
<ide> return res
<ide> def binary_crossentropy(target, output, from_logits=False):
<ide> output = output._keras_logits # pylint: disable=protected-access
<ide> if from_logits:
<ide> warnings.warn(
<del> '"`binary_crossentropy` received `from_logits=True`, but the `output`'
<del> " argument was produced by a sigmoid or softmax activation and thus "
<add> '"`binary_crossentropy` received `from_logits=True`, '
<add> "but the `output` argument was produced by a sigmoid "
<add> "or softmax activation and thus "
<ide> 'does not represent logits. Was this intended?"',
<ide> stacklevel=2,
<ide> )
<ide> def binary_focal_crossentropy(
<ide> output: A tensor.
<ide> apply_class_balancing: A bool, whether to apply weight balancing on the
<ide> binary classes 0 and 1.
<del> alpha: A weight balancing factor for class 1, default is `0.25` as mentioned
<del> in the reference. The weight for class 0 is `1.0 - alpha`.
<del> gamma: A focusing parameter, default is `2.0` as mentioned in the reference.
<del> from_logits: Whether `output` is expected to be a logits tensor. By default,
<del> we consider that `output` encodes a probability distribution.
<add> alpha: A weight balancing factor for class 1, default is `0.25` as
<add> mentioned in the reference. The weight for class 0 is `1.0 - alpha`.
<add> gamma: A focusing parameter, default is `2.0` as mentioned in the
<add> reference.
<add> from_logits: Whether `output` is expected to be a logits tensor. By
<add> default, we consider that `output` encodes a probability distribution.
<ide>
<ide> Returns:
<ide> A tensor.
<ide> def in_top_k(predictions, targets, k):
<ide> """Returns whether the `targets` are in the top `k` `predictions`.
<ide>
<ide> Args:
<del> predictions: A tensor of shape `(batch_size, classes)` and type `float32`.
<add> predictions: A tensor of shape `(batch_size, classes)` and type
<add> `float32`.
<ide> targets: A 1D tensor of length `batch_size` and type `int32` or `int64`.
<ide> k: An `int`, number of top elements to consider.
<ide>
<ide> def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
<ide>
<ide> Args:
<ide> shape: A tuple of integers, the shape of tensor to create.
<del> mean: A float, the mean value of the normal distribution to draw samples.
<del> Default to 0.0.
<add> mean: A float, the mean value of the normal distribution to draw
<add> samples. Default to 0.0.
<ide> stddev: A float, the standard deviation of the normal distribution
<ide> to draw samples. Default to 1.0.
<ide> dtype: `tf.dtypes.DType`, dtype of returned tensor. Default to use Keras
<ide> def _create_session(distribution_strategy):
<ide> """Create the Distributed Strategy session."""
<ide> session_config = get_default_session_config()
<ide>
<del> # If a session already exists, merge in its config; in the case there is a
<del> # conflict, take values of the existing config.
<add> # If a session already exists, merge in its config; in the case there is
<add> # a conflict, take values of the existing config.
<ide> global _SESSION
<ide> if getattr(_SESSION, "session", None) and _SESSION.session._config:
<ide> session_config.MergeFrom(_SESSION.session._config)
<ide>
<ide> if is_tpu_strategy(distribution_strategy):
<ide> # TODO(priyag, yuefengz): Remove this workaround when Distribute
<del> # Coordinator is integrated with keras and we can create a session from
<del> # there.
<add> # Coordinator is integrated with keras and we can create a session
<add> # from there.
<ide> distribution_strategy.configure(session_config)
<ide> master = (
<ide> distribution_strategy.extended._tpu_cluster_resolver.master()
<ide> def maybe_convert_to_ragged(
<ide> return output
<ide>
<ide> if go_backwards:
<del> # Reverse based on the timestep dim, so that nested_row_lengths will mask
<del> # from the correct direction. Return the reverse ragged tensor.
<add> # Reverse based on the timestep dim, so that nested_row_lengths will
<add> # mask from the correct direction. Return the reverse ragged tensor.
<ide> output = reverse(output, [1])
<ide> ragged = tf.RaggedTensor.from_tensor(output, nested_row_lengths)
<ide> return reverse(ragged, [1])
<ide> class ContextValueCache(weakref.WeakKeyDictionary):
<ide> """Container that caches (possibly tensor) values based on the context.
<ide>
<ide> This class is similar to defaultdict, where values may be produced by the
<del> default factory specified during initialization. This class also has a default
<del> value for the key (when key is `None`) -- the key is set to the current graph
<del> or eager context. The default factories for key and value are only used in
<del> `__getitem__` and `setdefault`. The `.get()` behavior remains the same.
<add> default factory specified during initialization. This class also has a
<add> default value for the key (when key is `None`) -- the key is set to the
<add> current graph or eager context. The default factories for key and value are
<add> only used in `__getitem__` and `setdefault`. The `.get()` behavior remains
<add> the same.
<ide>
<del> This object will return the value of the current graph or closest parent graph
<del> if the current graph is a function. This is to reflect the fact that if a
<del> tensor is created in eager/graph, child functions may capture that tensor.
<add> This object will return the value of the current graph or closest parent
<add> graph if the current graph is a function. This is to reflect the fact that
<add> if a tensor is created in eager/graph, child functions may capture that
<add> tensor.
<ide>
<ide> The default factory method may accept keyword arguments (unlike defaultdict,
<ide> which only accepts callables with 0 arguments). To pass keyword arguments to
<ide> def _key(self):
<ide>
<ide> def _get_parent_graph(self, graph):
<ide> """Returns the parent graph or dummy eager object."""
<del> # TODO(b/149317164): Currently FuncGraphs use ops.get_default_graph() as the
<del> # outer graph. This results in outer_graph always being a Graph,
<add> # TODO(b/149317164): Currently FuncGraphs use ops.get_default_graph() as
<add> # the outer graph. This results in outer_graph always being a Graph,
<ide> # even in eager mode (get_default_graph will create a new Graph if there
<del> # isn't a default graph). Because of this bug, we have to specially set the
<del> # key when eager execution is enabled.
<add> # isn't a default graph). Because of this bug, we have to specially set
<add> # the key when eager execution is enabled.
<ide> parent_graph = graph.outer_graph
<ide> if (
<ide> not isinstance(parent_graph, tf.__internal__.FuncGraph)
<ide> def _get_recursive(self, key):
<ide> return value
<ide>
<ide> # Since FuncGraphs are able to capture tensors and variables from their
<del> # parent graphs, recursively search to see if there is a value stored for
<del> # one of the parent graphs.
<add> # parent graphs, recursively search to see if there is a value stored
<add> # for one of the parent graphs.
<ide> if isinstance(key, tf.__internal__.FuncGraph):
<ide> return self._get_recursive(self._get_parent_graph(key))
<ide> return None
<ide> def __getitem__(self, key):
<ide> """Gets the value at key (or current context), or sets default value.
<ide>
<ide> Args:
<del> key: May be `None` or `Graph`object. When `None`, the key is set to the
<del> current context.
<add> key: May be `None` or `Graph`object. When `None`, the key is set to
<add> the current context.
<ide>
<ide> Returns:
<ide> Either the cached or default value.
<ide> def __getitem__(self, key):
<ide> return value
<ide>
<ide> def setdefault(self, key=None, default=None, kwargs=None):
<del> """Sets the default value if key is not in dict, and returns the value."""
<add> """Sets the default value if key is not in dict, and returns the
<add> value."""
<ide> if key is None:
<ide> key = self._key()
<ide> kwargs = kwargs or {}
<ide><path>keras/backend_config.py
<ide> def floatx():
<ide> def set_floatx(value):
<ide> """Sets the default float type.
<ide>
<del> Note: It is not recommended to set this to float16 for training, as this will
<del> likely cause numeric stability issues. Instead, mixed precision, which is
<del> using a mix of float16 and float32, can be used by calling
<add> Note: It is not recommended to set this to float16 for training, as this
<add> will likely cause numeric stability issues. Instead, mixed precision, which
<add> is using a mix of float16 and float32, can be used by calling
<ide> `tf.keras.mixed_precision.set_global_policy('mixed_float16')`. See the
<ide> [mixed precision guide](
<ide> https://www.tensorflow.org/guide/keras/mixed_precision) for details.
<ide> def set_floatx(value):
<ide> accepted_dtypes = {"float16", "float32", "float64"}
<ide> if value not in accepted_dtypes:
<ide> raise ValueError(
<del> f"Unknown `floatx` value: {value}. Expected one of {accepted_dtypes}"
<add> f"Unknown `floatx` value: {value}. "
<add> f"Expected one of {accepted_dtypes}"
<ide> )
<ide> _FLOATX = str(value)
<ide>
<ide><path>keras/backend_test.py
<ide> def test_learning_phase(self):
<ide>
<ide> def test_learning_phase_name(self):
<ide> with backend.name_scope("test_scope"):
<del> # Test that outer name scopes do not affect the learning phase's name.
<add> # Test that outer name scopes do not affect the learning phase's
<add> # name.
<ide> lp = backend.symbolic_learning_phase()
<ide> self.assertEqual(lp.name, "keras_learning_phase:0")
<ide>
<ide> def step_function(inputs, states):
<ide>
<ide> # outputs expected to be same as inputs for the first sample
<ide> expected_outputs = inputs_vals.copy()
<del> # but for the second sample all outputs in masked region should be the same
<del> # as last output before masked region
<add> # but for the second sample all outputs in masked region should be the
<add> # same as last output before masked region
<ide> expected_outputs[1, -mask_last_num_timesteps:] = expected_outputs[
<ide> 1, -(mask_last_num_timesteps + 1)
<ide> ]
<ide> def step_function(inputs, states):
<ide> outputs = backend.tile(backend.expand_dims(inputs), [1, 1, 2])
<ide> return outputs, [backend.identity(s) for s in states]
<ide> # Note: cannot just return states (which can be a problem) ->
<del> # tensorflow/python/ops/resource_variable_ops.py", line 824, in set_shape
<del> # NotImplementedError: ResourceVariable does not implement set_shape()
<add> # tensorflow/python/ops/resource_variable_ops.py", line 824, in
<add> # set_shape NotImplementedError: ResourceVariable does not implement
<add> # set_shape()
<ide>
<ide> inputs_vals = np.random.random(
<ide> (num_samples, num_timesteps, num_features)
<ide> def step_function(inputs, states):
<ide> mask_vals[-1, -1] = 0 # final timestep masked for last sample
<ide>
<ide> expected_outputs = np.repeat(inputs_vals[..., None], repeats=2, axis=-1)
<del> # for the last sample, the final timestep (in masked region) should be the
<del> # same as the second to final output (before masked region)
<add> # for the last sample, the final timestep (in masked region) should be
<add> # the same as the second to final output (before masked region)
<ide> expected_outputs[-1, -1] = expected_outputs[-1, -2]
<ide>
<ide> inputs = backend.variable(inputs_vals)
<ide> def test_sparse_categorical_crossentropy_loss(self):
<ide> def test_sparse_categorical_crossentropy_loss_with_unknown_rank_tensor(
<ide> self,
<ide> ):
<del> # This test only runs in graph because the TF op layer is not supported yet
<del> # for sparse ops.
<add> # This test only runs in graph because the TF op layer is not supported
<add> # yet for sparse ops.
<ide> t = backend.placeholder()
<ide> p = backend.placeholder()
<ide> o = backend.sparse_categorical_crossentropy(t, p)
<ide> def test_function_tf_feed_symbols(self):
<ide> self.assertEqual(outs, [11.0, 2.0])
<ide>
<ide> def test_function_tf_fetches(self):
<del> # Additional operations can be passed to tf.compat.v1.Session().run() via
<del> # its `fetches` arguments. In contrast to `updates` argument of
<add> # Additional operations can be passed to tf.compat.v1.Session().run()
<add> # via its `fetches` arguments. In contrast to `updates` argument of
<ide> # backend.function() these do not have control dependency on `outputs`
<del> # so they can run in parallel. Also they should not contribute to output of
<del> # backend.function().
<add> # so they can run in parallel. Also they should not contribute to output
<add> # of backend.function().
<ide> with tf.Graph().as_default(), self.cached_session():
<ide> x = backend.variable(0.0)
<ide> y = backend.variable(0.0)
<ide> def test_function_tf_fetches(self):
<ide> )
<ide>
<ide> def test_function_tf_feed_dict(self):
<del> # Additional substitutions can be passed to `tf.compat.v1.Session().run()`
<del> # via its `feed_dict` arguments. Note that the feed_dict is passed once in
<del> # the constructor but we can modify the values in the dictionary. Through
<del> # this feed_dict we can provide additional substitutions besides Keras
<del> # inputs.
<add> # Additional substitutions can be passed to
<add> # `tf.compat.v1.Session().run()` via its `feed_dict` arguments. Note
<add> # that the feed_dict is passed once in the constructor but we can modify
<add> # the values in the dictionary. Through this feed_dict we can provide
<add> # additional substitutions besides Keras inputs.
<ide> with tf.Graph().as_default(), self.cached_session():
<ide> x = backend.variable(0.0)
<ide> y = backend.variable(0.0)
<ide> def test_function_tf_feed_dict(self):
<ide> backend.get_session().run(fetches=[x, y]), [20.0, 30.0]
<ide> )
<ide>
<del> # updated value in feed_dict will be modified within the K.function()
<add> # updated value in feed_dict will be modified within the
<add> # K.function()
<ide> feed_dict[y_placeholder] = 4.0
<ide> output = f([20.0])
<ide> self.assertEqual(output, [21.0])
<ide> def test_cache_in_parent_graph(self):
<ide> cache.setdefault(None, backend.constant(5))
<ide>
<ide> with tf.Graph().as_default() as g:
<del> # g is not a child graph of the default test context, so the recursive
<del> # lookup will create a new default value.
<add> # g is not a child graph of the default test context, so the
<add> # recursive lookup will create a new default value.
<ide> self.assertAllEqual(cache[g], 0)
<ide>
<ide> @tf.function
<ide> def test_implementation(self):
<ide> self.assertIsNotNone(seeded._generator)
<ide> self.assertIsNotNone(unseeded._generator)
<ide> else:
<del> # In v1, we can't use tf.random.Generator since it is not compatible with
<del> # graph mode.
<add> # In v1, we can't use tf.random.Generator since it is not compatible
<add> # with graph mode.
<ide> self.assertIsNone(seeded._generator)
<ide> self.assertIsNone(unseeded._generator)
<ide>
<ide> def test_unseeded_with_utils_set_random_seed(self):
<ide>
<ide> # Make sure even with unseeded backend generator, as long as we set the
<ide> # keras random seed, it will make the generator to produce the same
<del> # sequence. This will ensure all the client are in sync in the multi-client
<del> # setting, when they all set the keras seed.
<add> # sequence. This will ensure all the client are in sync in the
<add> # multi-client setting, when they all set the keras seed.
<ide> tf_utils.set_random_seed(keras_seed)
<ide> gen2 = backend.RandomGenerator(seed=None, rng_type="stateful")
<ide> output3 = gen2.random_normal(shape=[2, 3])
<ide><path>keras/callbacks.py
<ide> def __init__(
<ide>
<ide> Args:
<ide> callbacks: List of `Callback` instances.
<del> add_history: Whether a `History` callback should be added, if one does not
<del> already exist in the `callbacks` list.
<del> add_progbar: Whether a `ProgbarLogger` callback should be added, if one
<del> does not already exist in the `callbacks` list.
<add> add_history: Whether a `History` callback should be added, if one does
<add> not already exist in the `callbacks` list.
<add> add_progbar: Whether a `ProgbarLogger` callback should be added, if
<add> one does not already exist in the `callbacks` list.
<ide> model: The `Model` these callbacks are used with.
<del> **params: If provided, parameters will be passed to each `Callback` via
<del> `Callback.set_params`.
<add> **params: If provided, parameters will be passed to each `Callback`
<add> via `Callback.set_params`.
<ide> """
<ide> self.callbacks = tf.nest.flatten(callbacks) if callbacks else []
<ide> self._add_default_callbacks(add_history, add_progbar)
<ide> def __init__(
<ide>
<ide> self._disallow_batch_hooks_in_ps_strategy()
<ide>
<del> # Performance check: Check batch hooks for slowness compared to batch time.
<del> # Only run check for custom callbacks (i.e. not present in this file).
<add> # Performance check: Check batch hooks for slowness compared to batch
<add> # time. Only run check for custom callbacks (i.e. not present in this
<add> # file).
<ide> self._check_timing = any(
<ide> cbk.__class__.__name__ not in globals() for cbk in self.callbacks
<ide> )
<ide> def _call_batch_hook(self, mode, hook, batch, logs=None):
<ide> self._call_batch_end_hook(mode, batch, logs)
<ide> else:
<ide> raise ValueError(
<del> f'Unrecognized hook: {hook}. Expected values are ["begin", "end"]'
<add> f"Unrecognized hook: {hook}. "
<add> 'Expected values are ["begin", "end"]'
<ide> )
<ide>
<ide> def _call_batch_begin_hook(self, mode, batch, logs):
<ide> def on_epoch_begin(self, epoch, logs=None):
<ide>
<ide> Args:
<ide> epoch: Integer, index of epoch.
<del> logs: Dict. Currently no data is passed to this argument for this method
<del> but that may change in the future.
<add> logs: Dict. Currently no data is passed to this argument for this
<add> method but that may change in the future.
<ide> """
<ide> logs = self._process_logs(logs)
<ide> for callback in self.callbacks:
<ide> def on_epoch_end(self, epoch, logs=None):
<ide> Args:
<ide> epoch: Integer, index of epoch.
<ide> logs: Dict, metric results for this training epoch, and for the
<del> validation epoch if validation is performed. Validation result keys
<del> are prefixed with `val_`.
<add> validation epoch if validation is performed. Validation result
<add> keys are prefixed with `val_`.
<ide> """
<ide> logs = self._process_logs(logs)
<ide> for callback in self.callbacks:
<ide> def on_train_batch_begin(self, batch, logs=None):
<ide>
<ide> Args:
<ide> batch: Integer, index of batch within the current epoch.
<del> logs: Dict, contains the return value of `model.train_step`. Typically,
<del> the values of the `Model`'s metrics are returned. Example:
<del> `{'loss': 0.2, 'accuracy': 0.7}`.
<add> logs: Dict, contains the return value of `model.train_step`.
<add> Typically, the values of the `Model`'s metrics are returned.
<add> Example: `{'loss': 0.2, 'accuracy': 0.7}`.
<ide> """
<ide> if self._should_call_train_batch_hooks:
<ide> self._call_batch_hook(ModeKeys.TRAIN, "begin", batch, logs=logs)
<ide> def on_test_batch_begin(self, batch, logs=None):
<ide>
<ide> Args:
<ide> batch: Integer, index of batch within the current epoch.
<del> logs: Dict, contains the return value of `model.test_step`. Typically,
<del> the values of the `Model`'s metrics are returned. Example:
<del> `{'loss': 0.2, 'accuracy': 0.7}`.
<add> logs: Dict, contains the return value of `model.test_step`.
<add> Typically, the values of the `Model`'s metrics are returned.
<add> Example: `{'loss': 0.2, 'accuracy': 0.7}`.
<ide> """
<ide> if self._should_call_test_batch_hooks:
<ide> self._call_batch_hook(ModeKeys.TEST, "begin", batch, logs=logs)
<ide> def on_test_begin(self, logs=None):
<ide> """Calls the `on_test_begin` methods of its callbacks.
<ide>
<ide> Args:
<del> logs: Dict. Currently no data is passed to this argument for this method
<del> but that may change in the future.
<add> logs: Dict. Currently no data is passed to this argument for this
<add> method but that may change in the future.
<ide> """
<ide> logs = self._process_logs(logs)
<ide> for callback in self.callbacks:
<ide> def on_predict_begin(self, logs=None):
<ide> """Calls the 'on_predict_begin` methods of its callbacks.
<ide>
<ide> Args:
<del> logs: Dict. Currently no data is passed to this argument for this method
<del> but that may change in the future.
<add> logs: Dict. Currently no data is passed to this argument for this
<add> method but that may change in the future.
<ide> """
<ide> logs = self._process_logs(logs)
<ide> for callback in self.callbacks:
<ide> class Callback:
<ide> `predict` in order to hook into the various stages of the model training and
<ide> inference lifecycle.
<ide>
<del> To create a custom callback, subclass `keras.callbacks.Callback` and override
<del> the method associated with the stage of interest. See
<add> To create a custom callback, subclass `keras.callbacks.Callback` and
<add> override the method associated with the stage of interest. See
<ide> https://www.tensorflow.org/guide/keras/custom_callback for more information.
<ide>
<ide> Example:
<ide> class Callback:
<ide> ... def on_train_end(self, logs=None):
<ide> ... global training_finished
<ide> ... training_finished = True
<del> >>> model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=(1,))])
<add> >>> model = tf.keras.Sequential([
<add> ... tf.keras.layers.Dense(1, input_shape=(1,))])
<ide> >>> model.compile(loss='mean_squared_error')
<ide> >>> model.fit(tf.constant([[1.0]]), tf.constant([[1.0]]),
<ide> ... callbacks=[MyCallback()])
<ide> def on_batch_end(self, batch, logs=None):
<ide> def on_epoch_begin(self, epoch, logs=None):
<ide> """Called at the start of an epoch.
<ide>
<del> Subclasses should override for any actions to run. This function should only
<del> be called during TRAIN mode.
<add> Subclasses should override for any actions to run. This function should
<add> only be called during TRAIN mode.
<ide>
<ide> Args:
<ide> epoch: Integer, index of epoch.
<del> logs: Dict. Currently no data is passed to this argument for this method
<del> but that may change in the future.
<add> logs: Dict. Currently no data is passed to this argument for this
<add> method but that may change in the future.
<ide> """
<ide>
<ide> @doc_controls.for_subclass_implementers
<ide> def on_epoch_end(self, epoch, logs=None):
<ide> """Called at the end of an epoch.
<ide>
<del> Subclasses should override for any actions to run. This function should only
<del> be called during TRAIN mode.
<add> Subclasses should override for any actions to run. This function should
<add> only be called during TRAIN mode.
<ide>
<ide> Args:
<ide> epoch: Integer, index of epoch.
<ide> logs: Dict, metric results for this training epoch, and for the
<del> validation epoch if validation is performed. Validation result keys
<del> are prefixed with `val_`. For training epoch, the values of the
<del> `Model`'s metrics are returned. Example : `{'loss': 0.2, 'accuracy':
<del> 0.7}`.
<add> validation epoch if validation is performed. Validation result
<add> keys are prefixed with `val_`. For training epoch, the values of
<add> the `Model`'s metrics are returned. Example:
<add> `{'loss': 0.2, 'accuracy': 0.7}`.
<ide> """
<ide>
<ide> @doc_controls.for_subclass_implementers
<ide> def on_train_batch_begin(self, batch, logs=None):
<ide> Subclasses should override for any actions to run.
<ide>
<ide> Note that if the `steps_per_execution` argument to `compile` in
<del> `tf.keras.Model` is set to `N`, this method will only be called every `N`
<del> batches.
<add> `tf.keras.Model` is set to `N`, this method will only be called every
<add> `N` batches.
<ide>
<ide> Args:
<ide> batch: Integer, index of batch within the current epoch.
<del> logs: Dict. Currently no data is passed to this argument for this method
<del> but that may change in the future.
<add> logs: Dict. Currently no data is passed to this argument for this
<add> method but that may change in the future.
<ide> """
<ide> # For backwards compatibility.
<ide> self.on_batch_begin(batch, logs=logs)
<ide> def on_train_batch_end(self, batch, logs=None):
<ide> Subclasses should override for any actions to run.
<ide>
<ide> Note that if the `steps_per_execution` argument to `compile` in
<del> `tf.keras.Model` is set to `N`, this method will only be called every `N`
<del> batches.
<add> `tf.keras.Model` is set to `N`, this method will only be called every
<add> `N` batches.
<ide>
<ide> Args:
<ide> batch: Integer, index of batch within the current epoch.
<ide> def on_test_batch_begin(self, batch, logs=None):
<ide> Subclasses should override for any actions to run.
<ide>
<ide> Note that if the `steps_per_execution` argument to `compile` in
<del> `tf.keras.Model` is set to `N`, this method will only be called every `N`
<del> batches.
<add> `tf.keras.Model` is set to `N`, this method will only be called every
<add> `N` batches.
<ide>
<ide> Args:
<ide> batch: Integer, index of batch within the current epoch.
<del> logs: Dict. Currently no data is passed to this argument for this method
<del> but that may change in the future.
<add> logs: Dict. Currently no data is passed to this argument for this
<add> method but that may change in the future.
<ide> """
<ide>
<ide> @doc_controls.for_subclass_implementers
<ide> def on_test_batch_end(self, batch, logs=None):
<ide> Subclasses should override for any actions to run.
<ide>
<ide> Note that if the `steps_per_execution` argument to `compile` in
<del> `tf.keras.Model` is set to `N`, this method will only be called every `N`
<del> batches.
<add> `tf.keras.Model` is set to `N`, this method will only be called every
<add> `N` batches.
<ide>
<ide> Args:
<ide> batch: Integer, index of batch within the current epoch.
<ide> def on_predict_batch_begin(self, batch, logs=None):
<ide> Subclasses should override for any actions to run.
<ide>
<ide> Note that if the `steps_per_execution` argument to `compile` in
<del> `tf.keras.Model` is set to `N`, this method will only be called every `N`
<del> batches.
<add> `tf.keras.Model` is set to `N`, this method will only be called every
<add> `N` batches.
<ide>
<ide> Args:
<ide> batch: Integer, index of batch within the current epoch.
<del> logs: Dict. Currently no data is passed to this argument for this method
<del> but that may change in the future.
<add> logs: Dict. Currently no data is passed to this argument for this
<add> method but that may change in the future.
<ide> """
<ide>
<ide> @doc_controls.for_subclass_implementers
<ide> def on_predict_batch_end(self, batch, logs=None):
<ide> Subclasses should override for any actions to run.
<ide>
<ide> Note that if the `steps_per_execution` argument to `compile` in
<del> `tf.keras.Model` is set to `N`, this method will only be called every `N`
<del> batches.
<add> `tf.keras.Model` is set to `N`, this method will only be called every
<add> `N` batches.
<ide>
<ide> Args:
<ide> batch: Integer, index of batch within the current epoch.
<ide> def on_train_begin(self, logs=None):
<ide> Subclasses should override for any actions to run.
<ide>
<ide> Args:
<del> logs: Dict. Currently no data is passed to this argument for this method
<del> but that may change in the future.
<add> logs: Dict. Currently no data is passed to this argument for this
<add> method but that may change in the future.
<ide> """
<ide>
<ide> @doc_controls.for_subclass_implementers
<ide> def on_train_end(self, logs=None):
<ide> Subclasses should override for any actions to run.
<ide>
<ide> Args:
<del> logs: Dict. Currently the output of the last call to `on_epoch_end()`
<del> is passed to this argument for this method but that may change in
<del> the future.
<add> logs: Dict. Currently the output of the last call to
<add> `on_epoch_end()` is passed to this argument for this method but
<add> that may change in the future.
<ide> """
<ide>
<ide> @doc_controls.for_subclass_implementers
<ide> def on_test_begin(self, logs=None):
<ide> Subclasses should override for any actions to run.
<ide>
<ide> Args:
<del> logs: Dict. Currently no data is passed to this argument for this method
<del> but that may change in the future.
<add> logs: Dict. Currently no data is passed to this argument for this
<add> method but that may change in the future.
<ide> """
<ide>
<ide> @doc_controls.for_subclass_implementers
<ide> def on_predict_begin(self, logs=None):
<ide> Subclasses should override for any actions to run.
<ide>
<ide> Args:
<del> logs: Dict. Currently no data is passed to this argument for this method
<del> but that may change in the future.
<add> logs: Dict. Currently no data is passed to this argument for this
<add> method but that may change in the future.
<ide> """
<ide>
<ide> @doc_controls.for_subclass_implementers
<ide> def on_predict_end(self, logs=None):
<ide> Subclasses should override for any actions to run.
<ide>
<ide> Args:
<del> logs: Dict. Currently no data is passed to this argument for this method
<del> but that may change in the future.
<add> logs: Dict. Currently no data is passed to this argument for this
<add> method but that may change in the future.
<ide> """
<ide>
<ide> def _implements_train_batch_hooks(self):
<ide> def _implements_test_batch_hooks(self):
<ide> ) or not generic_utils.is_default(self.on_test_batch_end)
<ide>
<ide> def _implements_predict_batch_hooks(self):
<del> """Determines if this Callback should be called for each predict batch."""
<add> """Determines if this Callback should be called for each predict
<add> batch."""
<ide> return not generic_utils.is_default(
<ide> self.on_predict_batch_begin
<ide> ) or not generic_utils.is_default(self.on_predict_batch_end)
<ide> def on_batch_end(self, batch, logs=None):
<ide> logs = logs or {}
<ide> batch_size = logs.get("size", 0)
<ide> # In case of distribution strategy we can potentially run multiple steps
<del> # at the same time, we should account for that in the `seen` calculation.
<add> # at the same time, we should account for that in the `seen`
<add> # calculation.
<ide> num_steps = logs.get("num_steps", 1)
<ide> self.seen += batch_size * num_steps
<ide>
<ide> def _reset_progbar(self):
<ide> self.progbar = None
<ide>
<ide> def _maybe_init_progbar(self):
<del> """Instantiate a `Progbar` if not yet, and update the stateful metrics."""
<add> """Instantiate a `Progbar` if not yet, and update the stateful
<add> metrics."""
<ide> # TODO(rchao): Legacy TF1 code path may use list for
<del> # `self.stateful_metrics`. Remove "cast to set" when TF1 support is dropped.
<add> # `self.stateful_metrics`. Remove "cast to set" when TF1 support is
<add> # dropped.
<ide> self.stateful_metrics = set(self.stateful_metrics)
<ide>
<ide> if self.model:
<del> # Update the existing stateful metrics as `self.model.metrics` may contain
<del> # updated metrics after `MetricsContainer` is built in the first train
<del> # step.
<add> # Update the existing stateful metrics as `self.model.metrics` may
<add> # contain updated metrics after `MetricsContainer` is built in the
<add> # first train step.
<ide> self.stateful_metrics = self.stateful_metrics.union(
<ide> set(m.name for m in self.model.metrics)
<ide> )
<ide> class ModelCheckpoint(Callback):
<ide>
<ide> `ModelCheckpoint` callback is used in conjunction with training using
<ide> `model.fit()` to save a model or weights (in a checkpoint file) at some
<del> interval, so the model or weights can be loaded later to continue the training
<del> from the state saved.
<add> interval, so the model or weights can be loaded later to continue the
<add> training from the state saved.
<ide>
<ide> A few options this callback provides include:
<ide>
<ide> class ModelCheckpoint(Callback):
<ide> performance.
<ide> - Definition of 'best'; which quantity to monitor and whether it should be
<ide> maximized or minimized.
<del> - The frequency it should save at. Currently, the callback supports saving at
<del> the end of every epoch, or after a fixed number of training batches.
<add> - The frequency it should save at. Currently, the callback supports saving
<add> at the end of every epoch, or after a fixed number of training batches.
<ide> - Whether only weights are saved, or the whole model is saved.
<ide>
<ide> Note: If you get `WARNING:tensorflow:Can save best model only with <name>
<ide> class ModelCheckpoint(Callback):
<ide> # so far.
<ide> model.fit(epochs=EPOCHS, callbacks=[model_checkpoint_callback])
<ide>
<del> # The model weights (that are considered the best) are loaded into the model.
<add> # The model weights (that are considered the best) are loaded into the
<add> # model.
<ide> model.load_weights(checkpoint_filepath)
<ide> ```
<ide>
<ide> Args:
<ide> filepath: string or `PathLike`, path to save the model file. e.g.
<ide> filepath = os.path.join(working_dir, 'ckpt', file_name). `filepath`
<del> can contain named formatting options, which will be filled the value of
<del> `epoch` and keys in `logs` (passed in `on_epoch_end`). For example: if
<del> `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`, then the model
<del> checkpoints will be saved with the epoch number and the validation loss
<del> in the filename. The directory of the filepath should not be reused by
<del> any other callbacks to avoid conflicts.
<del> monitor: The metric name to monitor. Typically the metrics are set by the
<del> `Model.compile` method. Note:
<add> can contain named formatting options, which will be filled the value
<add> of `epoch` and keys in `logs` (passed in `on_epoch_end`). For example:
<add> if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`, then the
<add> model checkpoints will be saved with the epoch number and the
<add> validation loss in the filename. The directory of the filepath should
<add> not be reused by any other callbacks to avoid conflicts.
<add> monitor: The metric name to monitor. Typically the metrics are set by
<add> the `Model.compile` method. Note:
<ide>
<ide> * Prefix the name with `"val_`" to monitor validation metrics.
<ide> * Use `"loss"` or "`val_loss`" to monitor the model's total loss.
<ide> class ModelCheckpoint(Callback):
<ide> save_weights_only: if True, then only the model's weights will be saved
<ide> (`model.save_weights(filepath)`), else the full model is saved
<ide> (`model.save(filepath)`).
<del> save_freq: `'epoch'` or integer. When using `'epoch'`, the callback saves
<del> the model after each epoch. When using integer, the callback saves the
<del> model at end of this many batches. If the `Model` is compiled with
<del> `steps_per_execution=N`, then the saving criteria will be
<del> checked every Nth batch. Note that if the saving isn't aligned to
<add> save_freq: `'epoch'` or integer. When using `'epoch'`, the callback
<add> saves the model after each epoch. When using integer, the callback
<add> saves the model at end of this many batches. If the `Model` is
<add> compiled with `steps_per_execution=N`, then the saving criteria will
<add> be checked every Nth batch. Note that if the saving isn't aligned to
<ide> epochs, the monitored metric may potentially be less reliable (it
<ide> could reflect as little as 1 batch, since the metrics get reset every
<ide> epoch). Defaults to `'epoch'`.
<ide> options: Optional `tf.train.CheckpointOptions` object if
<ide> `save_weights_only` is true or optional `tf.saved_model.SaveOptions`
<ide> object if `save_weights_only` is false.
<del> initial_value_threshold: Floating point initial "best" value of the metric
<del> to be monitored. Only applies if `save_best_value=True`. Only overwrites
<del> the model weights already saved if the performance of current
<del> model is better than this value.
<add> initial_value_threshold: Floating point initial "best" value of the
<add> metric to be monitored. Only applies if `save_best_value=True`. Only
<add> overwrites the model weights already saved if the performance of
<add> current model is better than this value.
<ide> **kwargs: Additional arguments for backwards compatibility. Possible key
<ide> is `period`.
<ide> """
<ide> def __init__(
<ide> else:
<ide> raise TypeError(
<ide> "If save_weights_only is True, then `options` must be "
<del> f"either None or a tf.train.CheckpointOptions. Got {options}."
<add> f"either None or a tf.train.CheckpointOptions. "
<add> f"Got {options}."
<ide> )
<ide> else:
<ide> if options is None or isinstance(
<ide> def __init__(
<ide> else:
<ide> raise TypeError(
<ide> "If save_weights_only is False, then `options` must be "
<del> f"either None or a tf.saved_model.SaveOptions. Got {options}."
<add> f"either None or a tf.saved_model.SaveOptions. "
<add> f"Got {options}."
<ide> )
<ide>
<del> # Deprecated field `load_weights_on_restart` is for loading the checkpoint
<del> # file from `filepath` at the start of `model.fit()`
<add> # Deprecated field `load_weights_on_restart` is for loading the
<add> # checkpoint file from `filepath` at the start of `model.fit()`
<ide> # TODO(rchao): Remove the arg during next breaking release.
<ide> if "load_weights_on_restart" in kwargs:
<ide> self.load_weights_on_restart = kwargs["load_weights_on_restart"]
<ide> def on_train_begin(self, logs=None):
<ide> filepath_to_load
<ide> ):
<ide> try:
<del> # `filepath` may contain placeholders such as `{epoch:02d}`, and
<del> # thus it attempts to load the most recently modified file with file
<del> # name matching the pattern.
<add> # `filepath` may contain placeholders such as `{epoch:02d}`,
<add> # and thus it attempts to load the most recently modified
<add> # file with file name matching the pattern.
<ide> self.model.load_weights(filepath_to_load)
<ide> except (IOError, ValueError) as e:
<ide> raise ValueError(
<del> f"Error loading file from {filepath_to_load}. Reason: {e}"
<add> f"Error loading file from {filepath_to_load}. "
<add> f"Reason: {e}"
<ide> )
<ide>
<ide> def _implements_train_batch_hooks(self):
<ide> def _save_model(self, epoch, batch, logs):
<ide> if self.monitor_op(current, self.best):
<ide> if self.verbose > 0:
<ide> io_utils.print_msg(
<del> f"\nEpoch {epoch + 1}: {self.monitor} improved "
<add> f"\nEpoch {epoch + 1}: {self.monitor} "
<add> "improved "
<ide> f"from {self.best:.5f} to {current:.5f}, "
<ide> f"saving model to {filepath}"
<ide> )
<ide> def _save_model(self, epoch, batch, logs):
<ide> if self.verbose > 0:
<ide> io_utils.print_msg(
<ide> f"\nEpoch {epoch + 1}: "
<del> f"{self.monitor} did not improve from {self.best:.5f}"
<add> f"{self.monitor} did not improve "
<add> f"from {self.best:.5f}"
<ide> )
<ide> else:
<ide> if self.verbose > 0:
<ide> def _save_model(self, epoch, batch, logs):
<ide> f"directory: {filepath}"
<ide> )
<ide> except IOError as e: # h5py 2.x
<del> # `e.errno` appears to be `None` so checking the content of `e.args[0]`.
<add> # `e.errno` appears to be `None` so checking the content of
<add> # `e.args[0]`.
<ide> if "is a directory" in str(e.args[0]).lower():
<ide> raise IOError(
<ide> "Please specify a non-directory filepath for "
<ide> def _get_file_path(self, epoch, batch, logs):
<ide> """Returns the file path for checkpoint."""
<ide> # pylint: disable=protected-access
<ide> try:
<del> # `filepath` may contain placeholders such as `{epoch:02d}`,`{batch:02d}`
<del> # and `{mape:.2f}`. A mismatch between logged metrics and the path's
<del> # placeholders can cause formatting to fail.
<add> # `filepath` may contain placeholders such as
<add> # `{epoch:02d}`,`{batch:02d}` and `{mape:.2f}`. A mismatch between
<add> # logged metrics and the path's placeholders can cause formatting to
<add> # fail.
<ide> if batch is None or "batch" in logs:
<ide> file_path = self.filepath.format(epoch=epoch + 1, **logs)
<ide> else:
<ide> def _get_file_path(self, epoch, batch, logs):
<ide> return self._write_filepath
<ide>
<ide> def _maybe_remove_file(self):
<del> # Remove the checkpoint directory in multi-worker training where this worker
<del> # should not checkpoint. It is a dummy directory previously saved for sync
<del> # distributed training.
<add> # Remove the checkpoint directory in multi-worker training where this
<add> # worker should not checkpoint. It is a dummy directory previously saved
<add> # for sync distributed training.
<ide> distributed_file_utils.remove_temp_dir_with_filepath(
<ide> self._write_filepath, self.model.distribute_strategy
<ide> )
<ide> def _get_most_recently_modified_file_matching_pattern(self, pattern):
<ide> """Returns the most recently modified filepath matching pattern.
<ide>
<ide> Pattern may contain python formatting placeholder. If
<del> `tf.train.latest_checkpoint()` does not return None, use that; otherwise,
<del> check for most recently modified one that matches the pattern.
<del>
<del> In the rare case where there are more than one pattern-matching file having
<del> the same modified time that is most recent among all, return the filepath
<del> that is largest (by `>` operator, lexicographically using the numeric
<del> equivalents). This provides a tie-breaker when multiple files are most
<del> recent. Note that a larger `filepath` can sometimes indicate a later time of
<del> modification (for instance, when epoch/batch is used as formatting option),
<del> but not necessarily (when accuracy or loss is used). The tie-breaker is
<del> put in the logic as best effort to return the most recent, and to avoid
<del> undeterministic result.
<add> `tf.train.latest_checkpoint()` does not return None, use that;
<add> otherwise, check for most recently modified one that matches the
<add> pattern.
<add>
<add> In the rare case where there are more than one pattern-matching file
<add> having the same modified time that is most recent among all, return the
<add> filepath that is largest (by `>` operator, lexicographically using the
<add> numeric equivalents). This provides a tie-breaker when multiple files
<add> are most recent. Note that a larger `filepath` can sometimes indicate a
<add> later time of modification (for instance, when epoch/batch is used as
<add> formatting option), but not necessarily (when accuracy or loss is used).
<add> The tie-breaker is put in the logic as best effort to return the most
<add> recent, and to avoid undeterministic result.
<ide>
<ide> Modified time of a file is obtained with `os.path.getmtime()`.
<ide>
<ide> def _get_most_recently_modified_file_matching_pattern(self, pattern):
<ide> path_pattern = os.path.join(test_dir, file_pattern)
<ide> file_paths = [
<ide> os.path.join(test_dir, file_name) for file_name in
<del> ['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.batch01epoch01.h5']
<add> ['f.batch03epoch02.h5',
<add> 'f.batch02epoch02.h5', 'f.batch01epoch01.h5']
<ide> ]
<ide> for file_path in file_paths:
<ide> # Write something to each of the files
<ide> def _get_most_recently_modified_file_matching_pattern(self, pattern):
<ide> ```
<ide>
<ide> Args:
<del> pattern: The file pattern that may optionally contain python placeholder
<del> such as `{epoch:02d}`.
<add> pattern: The file pattern that may optionally contain python
<add> placeholder such as `{epoch:02d}`.
<ide>
<ide> Returns:
<del> The most recently modified file's full filepath matching `pattern`. If
<del> `pattern` does not contain any placeholder, this returns the filepath
<del> that
<del> exactly matches `pattern`. Returns `None` if no match is found.
<add> The most recently modified file's full filepath matching `pattern`.
<add> If `pattern` does not contain any placeholder, this returns the
<add> filepath that exactly matches `pattern`. Returns `None` if no match
<add> is found.
<ide> """
<ide> dir_name = os.path.dirname(pattern)
<ide> base_name = os.path.basename(pattern)
<ide> base_name_regex = "^" + re.sub(r"{.*}", r".*", base_name) + "$"
<ide>
<del> # If tf.train.latest_checkpoint tells us there exists a latest checkpoint,
<del> # use that as it is more robust than `os.path.getmtime()`.
<add> # If tf.train.latest_checkpoint tells us there exists a latest
<add> # checkpoint, use that as it is more robust than `os.path.getmtime()`.
<ide> latest_tf_checkpoint = tf.train.latest_checkpoint(dir_name)
<ide> if latest_tf_checkpoint is not None and re.match(
<ide> base_name_regex, os.path.basename(latest_tf_checkpoint)
<ide> def _get_most_recently_modified_file_matching_pattern(self, pattern):
<ide> if mod_time > latest_mod_time:
<ide> latest_mod_time = mod_time
<ide> file_path_with_latest_mod_time = file_path
<del> # In the case a file with later modified time is found, reset
<del> # the counter for the number of files with latest modified time.
<add> # In the case a file with later modified time is found,
<add> # reset the counter for the number of files with latest
<add> # modified time.
<ide> n_file_with_latest_mod_time = 1
<ide> elif mod_time == latest_mod_time:
<del> # In the case a file has modified time tied with the most recent,
<del> # increment the counter for the number of files with latest modified
<del> # time by 1.
<add> # In the case a file has modified time tied with the
<add> # most recent, increment the counter for the number of
<add> # files with latest modified time by 1.
<ide> n_file_with_latest_mod_time += 1
<ide>
<ide> if n_file_with_latest_mod_time == 1:
<ide> # Return the sole file that has most recent modified time.
<ide> return file_path_with_latest_mod_time
<ide> else:
<del> # If there are more than one file having latest modified time, return
<del> # the file path with the largest file name.
<add> # If there are more than one file having latest modified time,
<add> # return the file path with the largest file name.
<ide> return file_path_with_largest_file_name
<ide>
<ide>
<ide> class BackupAndRestore(Callback):
<ide> interruption that has happened in the middle of a `Model.fit` execution, by
<ide> backing up the training states in a temporary checkpoint file (with the help
<ide> of a `tf.train.CheckpointManager`), at the end of each epoch. Each backup
<del> overwrites the previously written checkpoint file, so at any given time there
<del> is at most one such checkpoint file for backup/restoring purpose.
<add> overwrites the previously written checkpoint file, so at any given time
<add> there is at most one such checkpoint file for backup/restoring purpose.
<ide>
<del> If training restarts before completion, the training state (which includes the
<del> `Model` weights and epoch number) is restored to the most recently saved state
<del> at the beginning of a new `Model.fit` run. At the completion of a `Model.fit`
<del> run, the temporary checkpoint file is deleted.
<add> If training restarts before completion, the training state (which includes
<add> the `Model` weights and epoch number) is restored to the most recently saved
<add> state at the beginning of a new `Model.fit` run. At the completion of a
<add> `Model.fit` run, the temporary checkpoint file is deleted.
<ide>
<ide> Note that the user is responsible to bring jobs back after the interruption.
<ide> This callback is important for the backup and restore mechanism for fault
<del> tolerance purpose, and the model to be restored from an previous checkpoint is
<del> expected to be the same as the one used to back up. If user changes arguments
<del> passed to compile or fit, the checkpoint saved for fault tolerance can become
<del> invalid.
<add> tolerance purpose, and the model to be restored from an previous checkpoint
<add> is expected to be the same as the one used to back up. If user changes
<add> arguments passed to compile or fit, the checkpoint saved for fault tolerance
<add> can become invalid.
<ide>
<ide> Note:
<ide>
<ide> class BackupAndRestore(Callback):
<ide> `Model.fit` redoes any partial work during the unfinished epoch in which the
<ide> training got restarted (so the work done before the interruption doesn't
<ide> affect the final model state).
<del> 3. This works for both single worker and multi-worker modes. When `Model.fit`
<del> is used with `tf.distribute`, it supports `tf.distribute.MirroredStrategy`,
<del> `tf.distribute.MultiWorkerMirroredStrategy`, `tf.distribute.TPUStrategy`, and
<del> `tf.distribute.experimental.ParameterServerStrategy`.
<add> 3. This works for both single worker and multi-worker modes. When
<add> `Model.fit` is used with `tf.distribute`, it supports
<add> `tf.distribute.MirroredStrategy`,
<add> `tf.distribute.MultiWorkerMirroredStrategy`, `tf.distribute.TPUStrategy`,
<add> and `tf.distribute.experimental.ParameterServerStrategy`.
<ide>
<ide> Example:
<ide>
<ide> class BackupAndRestore(Callback):
<ide> ... verbose=0)
<ide> ... except:
<ide> ... pass
<del> >>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5), epochs=10,
<del> ... batch_size=1, callbacks=[callback], verbose=0)
<add> >>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5),
<add> ... epochs=10, batch_size=1, callbacks=[callback],
<add> ... verbose=0)
<ide> >>> # Only 6 more epochs are run, since first trainning got interrupted at
<ide> >>> # zero-indexed epoch 4, second training will continue from 4 to 9.
<ide> >>> len(history.history['loss'])
<ide> def __init__(self, backup_dir):
<ide> "BackupAndRestore only supports eager mode. In graph "
<ide> "mode, consider using ModelCheckpoint to manually save "
<ide> "and restore weights with `model.load_weights()` and by "
<del> "providing `initial_epoch` in `model.fit()` for fault tolerance."
<add> "providing `initial_epoch` in `model.fit()` for fault "
<add> "tolerance."
<ide> )
<ide>
<ide> # Only the chief worker writes model checkpoints, but all workers
<ide> def on_train_begin(self, logs=None):
<ide> ):
<ide> raise NotImplementedError(
<ide> f"{type(self.model.distribute_strategy)} is not supported yet. "
<del> "Currently BackupAndRestore callback only supports empty strategy, "
<add> "Currently BackupAndRestore callback "
<add> "only supports empty strategy, "
<ide> "MirroredStrategy, MultiWorkerMirroredStrategy and TPUStrategy."
<ide> )
<ide> self.model._training_state = worker_training_state.WorkerTrainingState(
<ide> def on_train_begin(self, logs=None):
<ide>
<ide> def on_train_end(self, logs=None):
<ide> # pylint: disable=protected-access
<del> # On exit of training, delete the training state backup file that was saved
<del> # for the purpose of worker recovery.
<add> # On exit of training, delete the training state backup file that was
<add> # saved for the purpose of worker recovery.
<ide> self._training_state.delete_backup()
<ide>
<ide> # Clean up the training state.
<ide> def on_epoch_end(self, epoch, logs=None):
<ide> self.best_epoch = epoch
<ide> if self.restore_best_weights:
<ide> self.best_weights = self.model.get_weights()
<del> # Only restart wait if we beat both the baseline and our previous best.
<add> # Only restart wait if we beat both the baseline and our previous
<add> # best.
<ide> if self.baseline is None or self._is_improvement(
<ide> current, self.baseline
<ide> ):
<ide> def on_epoch_end(self, epoch, logs=None):
<ide> if self.restore_best_weights and self.best_weights is not None:
<ide> if self.verbose > 0:
<ide> io_utils.print_msg(
<del> "Restoring model weights from the end of the best epoch: "
<add> "Restoring model weights from "
<add> "the end of the best epoch: "
<ide> f"{self.best_epoch + 1}."
<ide> )
<ide> self.model.set_weights(self.best_weights)
<ide> def on_epoch_end(self, epoch, logs=None):
<ide> class LearningRateScheduler(Callback):
<ide> """Learning rate scheduler.
<ide>
<del> At the beginning of every epoch, this callback gets the updated learning rate
<del> value from `schedule` function provided at `__init__`, with the current epoch
<del> and current learning rate, and applies the updated learning rate
<del> on the optimizer.
<add> At the beginning of every epoch, this callback gets the updated learning
<add> rate value from `schedule` function provided at `__init__`, with the current
<add> epoch and current learning rate, and applies the updated learning rate on
<add> the optimizer.
<ide>
<ide> Args:
<ide> schedule: a function that takes an epoch index (integer, indexed from 0)
<ide> def keras_model_summary(name, data, step=None):
<ide> """Writes a Keras model as JSON to as a Summary.
<ide>
<ide> Writing the Keras model configuration allows the TensorBoard graph plugin to
<del> render a conceptual graph, as opposed to graph of ops. In case the model fails
<del> to serialize as JSON, it ignores and returns False.
<add> render a conceptual graph, as opposed to graph of ops. In case the model
<add> fails to serialize as JSON, it ignores and returns False.
<ide>
<ide> Args:
<del> name: A name for this summary. The summary tag used for TensorBoard will be
<del> this name prefixed by any active name scopes.
<add> name: A name for this summary. The summary tag used for TensorBoard will
<add> be this name prefixed by any active name scopes.
<ide> data: A Keras Model to write.
<ide> step: Explicit `int64`-castable monotonic step value for this summary. If
<del> omitted, this defaults to `tf.summary.experimental.get_step()`, which must
<del> not be None.
<add> omitted, this defaults to `tf.summary.experimental.get_step()`, which
<add> must not be None.
<ide>
<ide> Returns:
<ide> True on success, or False if no summary was written because no default
<ide> class TensorBoard(Callback, version_utils.TensorBoardVersionSelector):
<ide>
<ide> Args:
<ide> log_dir: the path of the directory where to save the log files to be
<del> parsed by TensorBoard. e.g. log_dir = os.path.join(working_dir, 'logs')
<del> This directory should not be reused by any other callbacks.
<add> parsed by TensorBoard. e.g. log_dir = os.path.join(working_dir,
<add> 'logs') This directory should not be reused by any other callbacks.
<ide> histogram_freq: frequency (in epochs) at which to compute
<ide> weight histograms for the layers of the model. If set to 0, histograms
<ide> won't be computed. Validation data (or split) must be specified for
<ide> class TensorBoard(Callback, version_utils.TensorBoardVersionSelector):
<ide> can become quite large when write_graph is set to True.
<ide> write_images: whether to write model weights to visualize as image in
<ide> TensorBoard.
<del> write_steps_per_second: whether to log the training steps per second into
<del> Tensorboard. This supports both epoch and batch frequency logging.
<add> write_steps_per_second: whether to log the training steps per second
<add> into Tensorboard. This supports both epoch and batch frequency
<add> logging.
<ide> update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`,
<del> writes the losses and metrics to TensorBoard after each batch. The same
<del> applies for `'epoch'`. If using an integer, let's say `1000`, the
<add> writes the losses and metrics to TensorBoard after each batch. The
<add> same applies for `'epoch'`. If using an integer, let's say `1000`, the
<ide> callback will write the metrics and losses to TensorBoard every 1000
<ide> batches. Note that writing too frequently to TensorBoard can slow down
<ide> your training.
<ide> def call(self, x):
<ide> model = MyModel()
<ide> model.compile('sgd', 'mse')
<ide>
<del> # Make sure to set `update_freq=N` to log a batch-level summary every N batches.
<del> # In addition to any `tf.summary` contained in `Model.call`, metrics added in
<del> # `Model.compile` will be logged every N batches.
<add> # Make sure to set `update_freq=N` to log a batch-level summary every N
<add> # batches. In addition to any `tf.summary` contained in `Model.call`,
<add> # metrics added in `Model.compile` will be logged every N batches.
<ide> tb_callback = tf.keras.callbacks.TensorBoard('./logs', update_freq=1)
<ide> model.fit(x_train, y_train, callbacks=[tb_callback])
<ide> ```
<ide> def my_summary(x):
<ide> model = tf.keras.Model(inputs, outputs)
<ide> model.compile('sgd', 'mse')
<ide>
<del> # Make sure to set `update_freq=N` to log a batch-level summary every N batches.
<del> # In addition to any `tf.summary` contained in `Model.call`, metrics added in
<del> # `Model.compile` will be logged every N batches.
<add> # Make sure to set `update_freq=N` to log a batch-level summary every N
<add> # batches. In addition to any `tf.summary` contained in `Model.call`,
<add> # metrics added in `Model.compile` will be logged every N batches.
<ide> tb_callback = tf.keras.callbacks.TensorBoard('./logs', update_freq=1)
<ide> model.fit(x_train, y_train, callbacks=[tb_callback])
<ide> ```
<ide> def _validate_kwargs(self, kwargs):
<ide> if unrecognized_kwargs:
<ide> raise ValueError(
<ide> "Unrecognized arguments in `TensorBoard` Callback: "
<del> f"{unrecognized_kwargs}. Supported kwargs are: {supported_kwargs}"
<add> f"{unrecognized_kwargs}. "
<add> f"Supported kwargs are: {supported_kwargs}"
<ide> )
<ide>
<ide> def set_model(self, model):
<ide> def _write_keras_model_train_graph(self):
<ide> with self._train_writer.as_default():
<ide> with tf.summary.record_if(True):
<ide> train_fn = self.model.train_tf_function
<del> # If the train_function is a `tf.function`, we can write out a graph
<add> # If the train_function is a `tf.function`, we can write out a
<add> # graph
<ide> if hasattr(train_fn, "function_spec"):
<ide> tf.summary.graph(
<ide> train_fn._concrete_stateful_fn.graph
<ide> def _write_keras_model_summary(self):
<ide> with tf.summary.record_if(True):
<ide> summary_writable = (
<ide> self.model._is_graph_network
<del> or self.model.__class__.__name__ # pylint: disable=protected-access
<del> == "Sequential"
<add> or self.model.__class__.__name__ == "Sequential"
<ide> ) # pylint: disable=protected-access
<ide> if summary_writable:
<ide> keras_model_summary("keras", self.model, step=0)
<ide> def _configure_embeddings(self):
<ide> for layer in self.model.layers:
<ide> if isinstance(layer, core.Embedding):
<ide> embedding = config.embeddings.add()
<del> # Embeddings are always the first layer, so this naming should be
<del> # consistent in any keras models checkpoints.
<add> # Embeddings are always the first layer, so this naming should
<add> # be consistent in any keras models checkpoints.
<ide> name = (
<ide> "layer_with_weights-0/embeddings/.ATTRIBUTES/VARIABLE_VALUE"
<ide> )
<ide> def _pop_writer(self):
<ide> if self.update_freq == "epoch":
<ide> return
<ide>
<del> # See _push_writer for the content of the previous_context, which is pair
<del> # of context.
<add> # See _push_writer for the content of the previous_context, which is
<add> # pair of context.
<ide> previous_context = self._prev_summary_state.pop()
<ide> previous_context[1].__exit__(*sys.exc_info())
<ide> previous_context[0].__exit__(*sys.exc_info())
<ide> def _init_profile_batch(self, profile_batch):
<ide> Setting `profile_batch=0` disables profiling.
<ide>
<ide> Args:
<del> profile_batch: The range of batches to profile. Should be a non-negative
<del> integer or a comma separated string of pair of positive integers. A pair
<del> of positive integers signify a range of batches to profile.
<add> profile_batch: The range of batches to profile. Should be a
<add> non-negative integer or a comma separated string of pair of positive
<add> integers. A pair of positive integers signify a range of batches to
<add> profile.
<ide>
<ide> Raises:
<del> ValueError: If profile_batch is not an integer or a comma separated pair
<del> of positive integers.
<add> ValueError: If profile_batch is not an integer or a comma separated
<add> pair of positive integers.
<ide>
<ide> """
<ide> profile_batch_error_message = (
<del> "profile_batch must be a non-negative integer or 2-tuple of positive "
<del> "integers. A pair of positive integers signifies a range of batches "
<add> "profile_batch must be a non-negative integer or "
<add> "2-tuple of positive "
<add> "integers. A pair of positive integers "
<add> "signifies a range of batches "
<ide> f"to profile. Found: {profile_batch}"
<ide> )
<ide>
<ide> def on_test_end(self, logs=None):
<ide> self._pop_writer()
<ide>
<ide> def _implements_train_batch_hooks(self):
<del> # Only call batch hooks when tracing or write_steps_per_second are enabled
<add> # Only call batch hooks when tracing or write_steps_per_second are
<add> # enabled
<ide> return self._should_trace or self.write_steps_per_second
<ide>
<ide> def on_train_batch_begin(self, batch, logs=None):
<ide> def _log_weights(self, epoch):
<ide> histogram_weight_name, weight, step=epoch
<ide> )
<ide> if self.write_images:
<del> # Add a suffix to prevent summary tag name collision.
<add> # Add a suffix to prevent summary tag name
<add> # collision.
<ide> image_weight_name = weight_name + "/image"
<ide> self._log_weight_as_image(
<ide> weight, image_weight_name, epoch
<ide> class ReduceLROnPlateau(Callback):
<ide> mode: one of `{'auto', 'min', 'max'}`. In `'min'` mode,
<ide> the learning rate will be reduced when the
<ide> quantity monitored has stopped decreasing; in `'max'` mode it will be
<del> reduced when the quantity monitored has stopped increasing; in `'auto'`
<del> mode, the direction is automatically inferred from the name of the
<del> monitored quantity.
<add> reduced when the quantity monitored has stopped increasing; in
<add> `'auto'` mode, the direction is automatically inferred from the name
<add> of the monitored quantity.
<ide> min_delta: threshold for measuring the new optimum, to only focus on
<ide> significant changes.
<del> cooldown: number of epochs to wait before resuming normal operation after
<del> lr has been reduced.
<add> cooldown: number of epochs to wait before resuming normal operation
<add> after lr has been reduced.
<ide> min_lr: lower bound on the learning rate.
<ide> """
<ide>
<ide> def __init__(
<ide> self.monitor = monitor
<ide> if factor >= 1.0:
<ide> raise ValueError(
<del> f"ReduceLROnPlateau does not support a factor >= 1.0. Got {factor}"
<add> f"ReduceLROnPlateau does not support "
<add> f"a factor >= 1.0. Got {factor}"
<ide> )
<ide> if "epsilon" in kwargs:
<ide> min_delta = kwargs.pop("epsilon")
<ide> def on_epoch_end(self, epoch, logs=None):
<ide> if self.verbose > 0:
<ide> io_utils.print_msg(
<ide> f"\nEpoch {epoch +1}: "
<del> f"ReduceLROnPlateau reducing learning rate to {new_lr}."
<add> f"ReduceLROnPlateau reducing "
<add> f"learning rate to {new_lr}."
<ide> )
<ide> self.cooldown_counter = self.cooldown
<ide> self.wait = 0
<ide><path>keras/callbacks_test.py
<ide> def test_backup_restore_train_counter(self):
<ide> cbk = BackupAndRestore(self.get_temp_dir())
<ide>
<ide> class InterruptingCallback(keras.callbacks.Callback):
<del> """A callback to intentionally introduce interruption to training."""
<add> """A callback to intentionally introduce interruption to
<add> training."""
<ide>
<ide> def on_epoch_end(self, epoch, log=None):
<ide> logging.info(f"counter: {model._train_counter}")
<ide> def _test_backup_and_restore_callback_with(self, cls):
<ide> )
<ide>
<ide> class InterruptingCallback(keras.callbacks.Callback):
<del> """A callback to intentionally introduce interruption to training."""
<add> """A callback to intentionally introduce interruption to
<add> training."""
<ide>
<ide> def on_epoch_end(self, epoch, log=None):
<ide> if epoch == 15:
<ide> def test_ModelCheckpoint(self):
<ide>
<ide> model_type = test_utils.get_model_type()
<ide> if model_type == "subclass":
<del> return # Skip test since subclassed models cannot be saved in .h5 format.
<add> # Skip test since subclassed models cannot be saved in .h5 format.
<add> return
<ide> if not tf.__internal__.tf2.enabled():
<ide> self.skipTest("Checkpoint callback only available in v2.")
<ide>
<ide> def test_ModelCheckpoint(self):
<ide> mode="unknown",
<ide> )
<ide>
<del> # Case 7: `ModelCheckpoint` with a combination of `save_freq` and `period`.
<del> # Though `period` is deprecated, we're testing it for
<add> # Case 7: `ModelCheckpoint` with a combination of `save_freq` and
<add> # `period`. Though `period` is deprecated, we're testing it for
<ide> # backward-compatibility.
<ide> filepath = os.path.join(temp_dir, "checkpoint.epoch{epoch:02d}.h5")
<ide> cbks = [
<ide> def test_ModelCheckpoint(self):
<ide> os.remove(filepath.format(epoch=5, batch=1))
<ide> os.remove(filepath.format(epoch=5, batch=2))
<ide>
<del> # Case 12: ModelCheckpoint saves model with initial_value_threshold param
<add> # Case 12: ModelCheckpoint saves model with initial_value_threshold
<add> # param
<ide> mode = "max"
<ide> monitor = "val_acc"
<ide> initial_value_threshold = 0
<ide> def test_ModelCheckpoint(self):
<ide> assert os.path.exists(filepath)
<ide> os.remove(filepath)
<ide>
<del> # Case 13: ModelCheckpoint saves model with initial_value_threshold param
<add> # Case 13: ModelCheckpoint saves model with initial_value_threshold
<add> # param
<ide> mode = "auto"
<ide> monitor = "val_loss"
<ide> initial_value_threshold = None
<ide> def test_ModelCheckpoint(self):
<ide> )
<ide> assert not os.path.exists(filepath)
<ide>
<del> # Case 15: ModelCheckpoint doesnt save model if loss was min earlier in auto
<del> # mode
<add> # Case 15: ModelCheckpoint doesnt save model if loss was min earlier in
<add> # auto mode
<ide> mode = "auto"
<ide> monitor = "val_loss"
<ide> initial_value_threshold = 0
<ide> def func(self):
<ide> weights_after_one_more_epoch,
<ide> ) = self._run_load_weights_on_restart_test_common_iterations()
<ide>
<del> # Sleep for some short time period ensuring the files are created with
<del> # a different time (in MacOS OSS the granularity is only 1 second).
<add> # Sleep for some short time period ensuring the files are created
<add> # with a different time (in MacOS OSS the granularity is only 1
<add> # second).
<ide> time.sleep(2)
<ide> callback = keras.callbacks.ModelCheckpoint(
<ide> filepath=filepath,
<ide> def func(self):
<ide> )
<ide> weights_with_one_final_extra_epoch = model.get_weights()
<ide>
<del> # Asserting the weights one epoch after initial fitting and another epoch
<del> # after that are closed, if a ModelCheckpoint with
<del> # load_weights_on_restart=True is given (so the model is restored at the
<del> # beginning of training).
<add> # Asserting the weights one epoch after initial fitting and another
<add> # epoch after that are closed, if a ModelCheckpoint with
<add> # load_weights_on_restart=True is given (so the model is restored at
<add> # the beginning of training).
<ide> self.assertAllClose(
<ide> weights_after_one_more_epoch,
<ide> weights_after_model_restoring_and_one_more_epoch,
<ide> def func(self):
<ide> model.get_weights()
<ide> )
<ide>
<del> # Asserting the weights one epoch after initial fitting and another epoch
<del> # after that are different, if a ModelCheckpoint with
<del> # load_weights_on_restart=False is given (so the model is not restored at
<del> # the beginning of training).
<add> # Asserting the weights one epoch after initial fitting and another
<add> # epoch after that are different, if a ModelCheckpoint with
<add> # load_weights_on_restart=False is given (so the model is not
<add> # restored at the beginning of training).
<ide> self.assertNotAllClose(
<ide> weights_after_one_more_epoch,
<ide> weights_after_model_restoring_and_one_more_epoch,
<ide> def set_weight_to_epoch(self, epoch):
<ide> early_stop.on_epoch_end(epoch, logs={"val_loss": losses[epoch]})
<ide> if early_stop.model.stop_training:
<ide> break
<del> # No epoch improves on the baseline, so we should train for only 5 epochs,
<del> # and restore the second model.
<add> # No epoch improves on the baseline, so we should train for only 5
<add> # epochs, and restore the second model.
<ide> self.assertEqual(epochs_trained, 5)
<ide> self.assertEqual(early_stop.model.get_weights(), 2)
<ide>
<ide> def make_model():
<ide> )
<ide> return model
<ide>
<del> # TODO(psv): Make sure the callback works correctly when min_delta is
<del> # set as 0. Test fails when the order of this callback and assertion is
<del> # interchanged.
<add> # TODO(psv): Make sure the callback works correctly when min_delta
<add> # is set as 0. Test fails when the order of this callback and
<add> # assertion is interchanged.
<ide> model = make_model()
<ide> cbks = [
<ide> keras.callbacks.ReduceLROnPlateau(
<ide> def make_model():
<ide> )
<ide>
<ide> model = make_model()
<del> # This should reduce the LR after the first epoch (due to high epsilon).
<add> # This should reduce the LR after the first epoch (due to high
<add> # epsilon).
<ide> cbks = [
<ide> keras.callbacks.ReduceLROnPlateau(
<ide> monitor="val_loss",
<ide> def make_model():
<ide> os.remove(filepath)
<ide>
<ide> def test_stop_training_csv(self):
<del> # Test that using the CSVLogger callback with the TerminateOnNaN callback
<del> # does not result in invalid CSVs.
<add> # Test that using the CSVLogger callback with the TerminateOnNaN
<add> # callback does not result in invalid CSVs.
<ide> np.random.seed(1337)
<ide> tmpdir = self.get_temp_dir()
<ide> self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
<ide> def data_generator():
<ide>
<ide> values = []
<ide> with open(fp) as f:
<del> # On Windows, due to \r\n line ends, we may end up reading empty lines
<del> # after each line. Skip empty lines.
<add> # On Windows, due to \r\n line ends, we may end up reading empty
<add> # lines after each line. Skip empty lines.
<ide> values = [x for x in csv.reader(f) if x]
<ide>
<ide> assert "nan" in values[-1], "The last epoch was not logged."
<ide> def list_summaries(logdir):
<ide> continue
<ide> for value in event.summary.value:
<ide> tag = value.tag
<del> # Case on the `value` rather than the summary metadata because
<del> # the Keras callback uses `summary_ops_v2` to emit old-style
<del> # summaries. See b/124535134.
<add> # Case on the `value` rather than the summary metadata
<add> # because the Keras callback uses `summary_ops_v2` to emit
<add> # old-style summaries. See b/124535134.
<ide> kind = value.WhichOneof("value")
<ide> container = {
<ide> "simple_value": result.scalars,
<ide> def list_summaries(logdir):
<ide> % (kind, path, event)
<ide> )
<ide> elif kind == "tensor" and tag != "keras":
<del> # Convert the tf2 summary proto to old style for type checking.
<add> # Convert the tf2 summary proto to old style for type
<add> # checking.
<ide> plugin_name = value.metadata.plugin_data.plugin_name
<ide> container = {
<ide> "images": result.images,
<ide> def test_TensorBoard_projector_callback(self):
<ide> "embeddings {\n",
<ide> (
<ide> " tensor_name: "
<del> '"layer_with_weights-0/embeddings/.ATTRIBUTES/VARIABLE_VALUE"\n'
<add> '"layer_with_weights-0/embeddings/.ATTRIBUTES/'
<add> 'VARIABLE_VALUE"\n'
<ide> ),
<ide> ' metadata_path: "metadata.tsv"\n',
<ide> "}\n",
<ide> def test_custom_summary(self):
<ide> self.skipTest("Custom summaries only supported in V2 code path.")
<ide>
<ide> def scalar_v2_mock(name, data, step=None):
<del> """A reimplementation of the scalar plugin to avoid circular deps."""
<add> """A reimplementation of the scalar plugin to avoid circular
<add> deps."""
<ide> metadata = tf.compat.v1.SummaryMetadata()
<ide> # Should match value in tensorboard/plugins/scalar/metadata.py.
<ide> metadata.plugin_data.plugin_name = "scalars"
<ide> def test_TensorBoard_autoTrace(self):
<ide> def test_TensorBoard_autoTrace_outerProfiler(self):
<ide> """Runs a profiler session that interferes with the one from the callback.
<ide>
<del> The callback will not generate a profile but execution will proceed without
<del> crashing due to unhandled exceptions.
<add> The callback will not generate a profile but execution will proceed
<add> without crashing due to unhandled exceptions.
<ide> """
<ide> tf.profiler.experimental.start(logdir="")
<ide> model = self._get_seq_model()
<ide> def test_using_checkpoint_management_latest_checkpoint(self):
<ide> f.write("foo bar")
<ide>
<ide> # The result returned from checkpoint_management.latest_checkpoint takes
<del> # priority, so even if it was written earlier, we should still return that.
<add> # priority, so even if it was written earlier, we should still return
<add> # that.
<ide> self.assertEqual(
<ide> keras.callbacks.ModelCheckpoint(
<ide> None
<ide> def keras_model(self, *args, **kwargs):
<ide> keras.callbacks.keras_model_summary(*args, **kwargs)
<ide> writer.close()
<ide> events = events_from_logdir(logdir)
<del> # The first event contains no summary values. The written content goes to
<del> # the second event.
<add> # The first event contains no summary values. The written content goes
<add> # to the second event.
<ide> return events[1]
<ide>
<ide> @test_utils.run_v2_only
<ide> def call(self, inputs):
<ide> x = self.dense(inputs)
<ide> return self.activation(x)
<ide>
<del> # Intentionally erroring out at json serialization to test the warning.
<add> # Intentionally erroring out at json serialization to test the
<add> # warning.
<ide> def get_config(self):
<ide> raise NotImplementedError
<ide>
<ide><path>keras/callbacks_v1.py
<ide> class TensorBoard(callbacks.TensorBoard):
<ide> can become quite large when write_graph is set to True.
<ide> write_grads: whether to visualize gradient histograms in TensorBoard.
<ide> `histogram_freq` must be greater than 0.
<del> batch_size: size of batch of inputs to feed to the network for histograms
<del> computation.
<add> batch_size: size of batch of inputs to feed to the network for
<add> histograms computation.
<ide> write_images: whether to write model weights to visualize as image in
<ide> TensorBoard.
<del> embeddings_freq: frequency (in epochs) at which selected embedding layers
<del> will be saved. If set to 0, embeddings won't be computed. Data to be
<del> visualized in TensorBoard's Embedding tab must be passed as
<add> embeddings_freq: frequency (in epochs) at which selected embedding
<add> layers will be saved. If set to 0, embeddings won't be computed. Data
<add> to be visualized in TensorBoard's Embedding tab must be passed as
<ide> `embeddings_data`.
<del> embeddings_layer_names: a list of names of layers to keep eye on. If None
<del> or empty list all the embedding layer will be watched.
<del> embeddings_metadata: a dictionary which maps layer name to a file name in
<del> which metadata for this embedding layer is saved.
<add> embeddings_layer_names: a list of names of layers to keep eye on. If
<add> None or empty list all the embedding layer will be watched.
<add> embeddings_metadata: a dictionary which maps layer name to a file name
<add> in which metadata for this embedding layer is saved.
<ide> [Here are details](
<ide> https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)
<ide> about metadata files format. In case if the same metadata file is
<ide> used for all embedding layers, string can be passed.
<ide> embeddings_data: data to be embedded at layers specified in
<del> `embeddings_layer_names`. Numpy array (if the model has a single input)
<del> or list of Numpy arrays (if the model has multiple inputs). Learn more
<del> about embeddings [in this guide](
<del> https://www.tensorflow.org/programmers_guide/embedding).
<add> `embeddings_layer_names`. Numpy array (if the model has a single
<add> input) or list of Numpy arrays (if the model has multiple inputs).
<add> Learn more about embeddings [in this guide](
<add> https://www.tensorflow.org/programmers_guide/embedding).
<ide> update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`,
<del> writes the losses and metrics to TensorBoard after each batch. The same
<del> applies for `'epoch'`. If using an integer, let's say `1000`, the
<add> writes the losses and metrics to TensorBoard after each batch. The
<add> same applies for `'epoch'`. If using an integer, let's say `1000`, the
<ide> callback will write the metrics and losses to TensorBoard every 1000
<ide> samples. Note that writing too frequently to TensorBoard can slow down
<ide> your training.
<ide> def set_model(self, model):
<ide> self.embeddings_data, model.input_names
<ide> )
<ide>
<del> # If embedding_layer_names are not provided, get all of the embedding
<del> # layers from the model.
<add> # If embedding_layer_names are not provided, get all of the
<add> # embedding layers from the model.
<ide> embeddings_layer_names = self.embeddings_layer_names
<ide> if not embeddings_layer_names:
<ide> embeddings_layer_names = [
<ide> def set_model(self, model):
<ide> )
<ide>
<ide> # TODO(psv): Add integration tests to test embedding visualization
<del> # with TensorBoard callback. We are unable to write a unit test for this
<del> # because TensorBoard dependency assumes TensorFlow package is installed.
<add> # with TensorBoard callback. We are unable to write a unit test for
<add> # this because TensorBoard dependency assumes TensorFlow package is
<add> # installed.
<ide> config = projector.ProjectorConfig()
<ide> for layer_name, tensor in embeddings_vars.items():
<ide> embedding = config.embeddings.add()
<ide> def on_train_begin(self, logs=None):
<ide> pass
<ide>
<ide> def on_epoch_begin(self, epoch, logs=None):
<del> """Add histogram op to Model eval_function callbacks, reset batch count."""
<add> """Add histogram op to Model eval_function callbacks, reset batch
<add> count."""
<ide>
<ide> # check if histogram summary should be run for this epoch
<ide> if self.histogram_freq and epoch % self.histogram_freq == 0:
<ide> def on_epoch_begin(self, epoch, logs=None):
<ide> # pylint: enable=protected-access
<ide>
<ide> def on_epoch_end(self, epoch, logs=None):
<del> """Checks if summary ops should run next epoch, logs scalar summaries."""
<add> """Checks if summary ops should run next epoch, logs scalar
<add> summaries."""
<ide>
<ide> # don't output batch_size and
<ide> # batch number as TensorBoard summaries
<ide><path>keras/callbacks_v1_test.py
<ide> def data_generator(train):
<ide> metrics=["accuracy"],
<ide> )
<ide>
<del> # we must generate new callbacks for each test, as they aren't stateless
<add> # we must generate new callbacks for each test, as they aren't
<add> # stateless
<ide> def callbacks_factory(histogram_freq):
<ide> return [
<ide> callbacks_v1.TensorBoard(
<ide><path>keras/constraints.py
<ide> class should override the `__call__` method, which takes a single
<ide>
<ide> >>> weight = tf.constant((-1.0, 1.0))
<ide> >>> NonNegative()(weight)
<del> <tf.Tensor: shape=(2,), dtype=float32, numpy=array([0., 1.], dtype=float32)>
<add> <tf.Tensor: shape=(2,), dtype=float32, numpy=array([0., 1.],
<add> dtype=float32)>
<ide>
<ide> >>> tf.keras.layers.Dense(4, kernel_constraint=NonNegative())
<ide> """
<ide> class MinMaxNorm(Constraint):
<ide> Constrains the weights incident to each hidden unit
<ide> to have the norm between a lower bound and an upper bound.
<ide>
<del> Also available via the shortcut function `tf.keras.constraints.min_max_norm`.
<add> Also available via the shortcut function
<add> `tf.keras.constraints.min_max_norm`.
<ide>
<ide> Args:
<ide> min_value: the minimum norm for the incoming weights.
<ide> class RadialConstraint(Constraint):
<ide> ```
<ide>
<ide> This constraint can be applied to any `Conv2D` layer version, including
<del> `Conv2DTranspose` and `SeparableConv2D`, and with either `"channels_last"` or
<del> `"channels_first"` data format. The method assumes the weight tensor is of
<del> shape `(rows, cols, input_depth, output_depth)`.
<add> `Conv2DTranspose` and `SeparableConv2D`, and with either `"channels_last"`
<add> or `"channels_first"` data format. The method assumes the weight tensor is
<add> of shape `(rows, cols, input_depth, output_depth)`.
<ide> """
<ide>
<ide> @doc_controls.do_not_generate_docs
<ide> def __call__(self, w):
<ide> )
<ide>
<ide> def _kernel_constraint(self, kernel):
<del> """Radially constraints a kernel with shape (height, width, channels)."""
<add> """Radially constraints a kernel with shape (height, width,
<add> channels)."""
<ide> padding = backend.constant([[1, 1], [1, 1]], dtype="int32")
<ide>
<ide> kernel_shape = backend.shape(kernel)[0]
<ide><path>keras/losses.py
<ide> class Loss:
<ide> """Loss base class.
<ide>
<ide> To be implemented by subclasses:
<del> * `call()`: Contains the logic for loss calculation using `y_true`, `y_pred`.
<add> * `call()`: Contains the logic for loss calculation using `y_true`,
<add> `y_pred`.
<ide>
<ide> Example subclass implementation:
<ide>
<ide> def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name=None):
<ide> option will be determined by the usage context. For almost all cases
<ide> this defaults to `SUM_OVER_BATCH_SIZE`. When used with
<ide> `tf.distribute.Strategy`, outside of built-in training loops such as
<del> `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
<add> `tf.keras` `compile` and `fit`, using `AUTO` or
<add> `SUM_OVER_BATCH_SIZE`
<ide> will raise an error. Please see this custom training [tutorial](
<del> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<del> more details.
<add> https://www.tensorflow.org/tutorials/distribute/custom_training)
<add> for more details.
<ide> name: Optional name for the instance.
<ide> """
<ide> losses_utils.ReductionV2.validate(reduction)
<ide> def __call__(self, y_true, y_pred, sample_weight=None):
<ide> y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`
<ide> sample_weight: Optional `sample_weight` acts as a coefficient for the
<ide> loss. If a scalar is provided, then the loss is simply scaled by the
<del> given value. If `sample_weight` is a tensor of size `[batch_size]`, then
<del> the total loss for each sample of the batch is rescaled by the
<add> given value. If `sample_weight` is a tensor of size `[batch_size]`,
<add> then the total loss for each sample of the batch is rescaled by the
<ide> corresponding element in the `sample_weight` vector. If the shape of
<del> `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be broadcasted to
<del> this shape), then each loss element of `y_pred` is scaled
<del> by the corresponding value of `sample_weight`. (Note on`dN-1`: all loss
<del> functions reduce by 1 dimension, usually axis=-1.)
<add> `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be
<add> broadcasted to this shape), then each loss element of `y_pred` is
<add> scaled by the corresponding value of `sample_weight`. (Note
<add> on`dN-1`: all loss functions reduce by 1 dimension, usually
<add> axis=-1.)
<ide>
<ide> Returns:
<ide> Weighted loss float `Tensor`. If `reduction` is `NONE`, this has
<del> shape `[batch_size, d0, .. dN-1]`; otherwise, it is scalar. (Note `dN-1`
<del> because all loss functions reduce by 1 dimension, usually axis=-1.)
<add> shape `[batch_size, d0, .. dN-1]`; otherwise, it is scalar. (Note
<add> `dN-1` because all loss functions reduce by 1 dimension, usually
<add> axis=-1.)
<ide>
<ide> Raises:
<ide> ValueError: If the shape of `sample_weight` is invalid.
<ide> """
<del> # If we are wrapping a lambda function strip '<>' from the name as it is not
<del> # accepted in scope name.
<add> # If we are wrapping a lambda function strip '<>' from the name as it is
<add> # not accepted in scope name.
<ide> graph_ctx = tf_utils.graph_context_for_symbolic_tensors(
<ide> y_true, y_pred, sample_weight
<ide> )
<ide> def _get_reduction(self):
<ide> ):
<ide> raise ValueError(
<ide> "Please use `tf.keras.losses.Reduction.SUM` or "
<del> "`tf.keras.losses.Reduction.NONE` for loss reduction when losses are "
<del> "used with `tf.distribute.Strategy` outside of the built-in training "
<del> "loops. You can implement "
<del> "`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` using global batch "
<del> "size like:\n```\nwith strategy.scope():\n"
<add> "`tf.keras.losses.Reduction.NONE` for loss reduction when "
<add> "losses are used with `tf.distribute.Strategy` outside "
<add> "of the built-in training loops. You can implement "
<add> "`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` using "
<add> "global batch size like:\n```\nwith strategy.scope():\n"
<ide> " loss_obj = tf.keras.losses.CategoricalCrossentropy("
<ide> "reduction=tf.keras.losses.Reduction.NONE)\n....\n"
<ide> " loss = tf.reduce_sum(loss_obj(labels, predictions)) * "
<ide> "(1. / global_batch_size)\n```\nPlease see "
<del> "https://www.tensorflow.org/tutorials/distribute/custom_training"
<add> "https://www.tensorflow.org/tutorials"
<add> "/distribute/custom_training"
<ide> " for more details."
<ide> )
<ide>
<ide> def __init__(
<ide> option will be determined by the usage context. For almost all cases
<ide> this defaults to `SUM_OVER_BATCH_SIZE`. When used with
<ide> `tf.distribute.Strategy`, outside of built-in training loops such as
<del> `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
<del> will raise an error. Please see this custom training [tutorial](
<del> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<del> more details.
<add> `tf.keras` `compile` and `fit`, using `AUTO` or
<add> `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom
<add> training [tutorial](
<add> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<add> more details.
<ide> name: Optional name for the instance.
<ide> **kwargs: The keyword arguments that are passed on to `fn`.
<ide> """
<ide> def __init__(
<ide> option will be determined by the usage context. For almost all cases
<ide> this defaults to `SUM_OVER_BATCH_SIZE`. When used with
<ide> `tf.distribute.Strategy`, outside of built-in training loops such as
<del> `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
<del> will raise an error. Please see this custom training [tutorial](
<del> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<del> more details.
<del> name: Optional name for the instance. Defaults to 'mean_squared_error'.
<add> `tf.keras` `compile` and `fit`, using `AUTO` or
<add> `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom
<add> training [tutorial](
<add> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<add> more details.
<add> name: Optional name for the instance. Defaults to
<add> 'mean_squared_error'.
<ide> """
<ide> super().__init__(mean_squared_error, name=name, reduction=reduction)
<ide>
<ide> def __init__(
<ide> option will be determined by the usage context. For almost all cases
<ide> this defaults to `SUM_OVER_BATCH_SIZE`. When used with
<ide> `tf.distribute.Strategy`, outside of built-in training loops such as
<del> `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
<del> will raise an error. Please see this custom training [tutorial](
<del> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<del> more details.
<del> name: Optional name for the instance. Defaults to 'mean_absolute_error'.
<add> `tf.keras` `compile` and `fit`, using `AUTO` or
<add> `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom
<add> training [tutorial](
<add> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<add> more details.
<add> name: Optional name for the instance. Defaults to
<add> 'mean_absolute_error'.
<ide> """
<ide> super().__init__(mean_absolute_error, name=name, reduction=reduction)
<ide>
<ide> def __init__(
<ide> option will be determined by the usage context. For almost all cases
<ide> this defaults to `SUM_OVER_BATCH_SIZE`. When used with
<ide> `tf.distribute.Strategy`, outside of built-in training loops such as
<del> `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
<del> will raise an error. Please see this custom training [tutorial](
<del> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<del> more details.
<add> `tf.keras` `compile` and `fit`, using `AUTO` or
<add> `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom
<add> training [tutorial](
<add> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<add> more details.
<ide> name: Optional name for the instance. Defaults to
<ide> 'mean_absolute_percentage_error'.
<ide> """
<ide> def __init__(
<ide> option will be determined by the usage context. For almost all cases
<ide> this defaults to `SUM_OVER_BATCH_SIZE`. When used with
<ide> `tf.distribute.Strategy`, outside of built-in training loops such as
<del> `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
<del> will raise an error. Please see this custom training [tutorial](
<del> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<del> more details.
<add> `tf.keras` `compile` and `fit`, using `AUTO` or
<add> `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom
<add> training [tutorial](
<add> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<add> more details.
<ide> name: Optional name for the instance. Defaults to
<ide> 'mean_squared_logarithmic_error'.
<ide> """
<ide> def __init__(
<ide> Args:
<ide> from_logits: Whether to interpret `y_pred` as a tensor of
<ide> [logit](https://en.wikipedia.org/wiki/Logit) values. By default, we
<del> assume that `y_pred` contains probabilities (i.e., values in [0, 1]).
<del> label_smoothing: Float in [0, 1]. When 0, no smoothing occurs. When > 0,
<del> we compute the loss between the predicted labels and a smoothed version
<del> of the true labels, where the smoothing squeezes the labels towards 0.5.
<del> Larger values of `label_smoothing` correspond to heavier smoothing.
<del> axis: The axis along which to compute crossentropy (the features axis).
<del> Defaults to -1.
<add> assume that `y_pred` contains probabilities (i.e., values in [0,
<add> 1]).
<add> label_smoothing: Float in [0, 1]. When 0, no smoothing occurs. When >
<add> 0, we compute the loss between the predicted labels and a smoothed
<add> version of the true labels, where the smoothing squeezes the labels
<add> towards 0.5. Larger values of `label_smoothing` correspond to
<add> heavier smoothing.
<add> axis: The axis along which to compute crossentropy (the features
<add> axis). Defaults to -1.
<ide> reduction: Type of `tf.keras.losses.Reduction` to apply to
<ide> loss. Default value is `AUTO`. `AUTO` indicates that the reduction
<ide> option will be determined by the usage context. For almost all cases
<ide> this defaults to `SUM_OVER_BATCH_SIZE`. When used with
<ide> `tf.distribute.Strategy`, outside of built-in training loops such as
<del> `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
<del> will raise an error. Please see this custom training [tutorial](
<del> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<del> more details.
<add> `tf.keras` `compile` and `fit`, using `AUTO` or
<add> `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom
<add> training [tutorial](
<add> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<add> more details.
<ide> name: Name for the op. Defaults to 'binary_crossentropy'.
<ide> """
<ide> super().__init__(
<ide> class BinaryFocalCrossentropy(LossFunctionWrapper):
<ide> `from_logits=False`).
<ide>
<ide> According to [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf), it
<del> helps to apply a "focal factor" to down-weight easy examples and focus more on
<del> hard examples. By default, the focal tensor is computed as follows:
<add> helps to apply a "focal factor" to down-weight easy examples and focus more
<add> on hard examples. By default, the focal tensor is computed as follows:
<ide>
<ide> `focal_factor = (1 - output) ** gamma` for class 1
<ide> `focal_factor = output ** gamma` for class 0
<ide> class BinaryFocalCrossentropy(LossFunctionWrapper):
<ide> >>> # Example 1: (batch_size = 1, number of samples = 4)
<ide> >>> y_true = [0, 1, 0, 0]
<ide> >>> y_pred = [-18.6, 0.51, 2.94, -12.8]
<del> >>> loss = tf.keras.losses.BinaryFocalCrossentropy(gamma=2, from_logits=True)
<add> >>> loss = tf.keras.losses.BinaryFocalCrossentropy(gamma=2,
<add> ... from_logits=True)
<ide> >>> loss(y_true, y_pred).numpy()
<ide> 0.691
<ide>
<ide> class BinaryFocalCrossentropy(LossFunctionWrapper):
<ide> >>> y_true = [[0, 1], [0, 0]]
<ide> >>> y_pred = [[-18.6, 0.51], [2.94, -12.8]]
<ide> >>> # Using default 'auto'/'sum_over_batch_size' reduction type.
<del> >>> loss = tf.keras.losses.BinaryFocalCrossentropy(gamma=3, from_logits=True)
<add> >>> loss = tf.keras.losses.BinaryFocalCrossentropy(gamma=3,
<add> ... from_logits=True)
<ide> >>> loss(y_true, y_pred).numpy()
<ide> 0.647
<ide>
<ide> class BinaryFocalCrossentropy(LossFunctionWrapper):
<ide> 0.482
<ide>
<ide> >>> # Using 'sample_weight' attribute with focal effect
<del> >>> loss = tf.keras.losses.BinaryFocalCrossentropy(gamma=3, from_logits=True)
<add> >>> loss = tf.keras.losses.BinaryFocalCrossentropy(gamma=3,
<add> ... from_logits=True)
<ide> >>> loss(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy()
<ide> 0.133
<ide>
<ide> class BinaryFocalCrossentropy(LossFunctionWrapper):
<ide> 0.097
<ide>
<ide> >>> # Using 'sum' reduction` type.
<del> >>> loss = tf.keras.losses.BinaryFocalCrossentropy(gamma=4, from_logits=True,
<add> >>> loss = tf.keras.losses.BinaryFocalCrossentropy(gamma=4,
<add> ... from_logits=True,
<ide> ... reduction=tf.keras.losses.Reduction.SUM)
<ide> >>> loss(y_true, y_pred).numpy()
<ide> 1.222
<ide> class BinaryFocalCrossentropy(LossFunctionWrapper):
<ide> 0.914
<ide>
<ide> >>> # Using 'none' reduction type.
<del> >>> loss = tf.keras.losses.BinaryFocalCrossentropy(gamma=5, from_logits=True,
<add> >>> loss = tf.keras.losses.BinaryFocalCrossentropy(
<add> ... gamma=5, from_logits=True,
<ide> ... reduction=tf.keras.losses.Reduction.NONE)
<ide> >>> loss(y_true, y_pred).numpy()
<ide> array([0.0017 1.1561], dtype=float32)
<ide> class BinaryFocalCrossentropy(LossFunctionWrapper):
<ide> Args:
<ide> apply_class_balancing: A bool, whether to apply weight balancing on the
<ide> binary classes 0 and 1.
<del> alpha: A weight balancing factor for class 1, default is `0.25` as mentioned
<del> in reference [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf).
<del> The weight for class 0 is `1.0 - alpha`.
<add> alpha: A weight balancing factor for class 1, default is `0.25` as
<add> mentioned in reference [Lin et al., 2018](
<add> https://arxiv.org/pdf/1708.02002.pdf). The weight for class 0 is
<add> `1.0 - alpha`.
<ide> gamma: A focusing parameter used to compute the focal factor, default is
<ide> `2.0` as mentioned in the reference
<ide> [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf).
<ide> def get_config(self):
<ide> class CategoricalCrossentropy(LossFunctionWrapper):
<ide> """Computes the crossentropy loss between the labels and predictions.
<ide>
<del> Use this crossentropy loss function when there are two or more label classes.
<del> We expect labels to be provided in a `one_hot` representation. If you want to
<del> provide labels as integers, please use `SparseCategoricalCrossentropy` loss.
<del> There should be `# classes` floating point values per feature.
<add> Use this crossentropy loss function when there are two or more label
<add> classes. We expect labels to be provided in a `one_hot` representation. If
<add> you want to provide labels as integers, please use
<add> `SparseCategoricalCrossentropy` loss. There should be `# classes` floating
<add> point values per feature.
<ide>
<ide> In the snippet below, there is `# classes` floating pointing values per
<ide> example. The shape of both `y_pred` and `y_true` are
<ide> class CategoricalCrossentropy(LossFunctionWrapper):
<ide> Usage with the `compile()` API:
<ide>
<ide> ```python
<del> model.compile(optimizer='sgd', loss=tf.keras.losses.CategoricalCrossentropy())
<add> model.compile(optimizer='sgd',
<add> loss=tf.keras.losses.CategoricalCrossentropy())
<ide> ```
<ide> """
<ide>
<ide> def __init__(
<ide> meaning the confidence on label values are relaxed. For example, if
<ide> `0.1`, use `0.1 / num_classes` for non-target labels and
<ide> `0.9 + 0.1 / num_classes` for target labels.
<del> axis: The axis along which to compute crossentropy (the features axis).
<del> Defaults to -1.
<add> axis: The axis along which to compute crossentropy (the features
<add> axis). Defaults to -1.
<ide> reduction: Type of `tf.keras.losses.Reduction` to apply to
<ide> loss. Default value is `AUTO`. `AUTO` indicates that the reduction
<ide> option will be determined by the usage context. For almost all cases
<ide> this defaults to `SUM_OVER_BATCH_SIZE`. When used with
<ide> `tf.distribute.Strategy`, outside of built-in training loops such as
<del> `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
<del> will raise an error. Please see this custom training [tutorial](
<del> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<del> more details.
<add> `tf.keras` `compile` and `fit`, using `AUTO` or
<add> `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom
<add> training [tutorial](
<add> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<add> more details.
<ide> name: Optional name for the instance.
<ide> Defaults to 'categorical_crossentropy'.
<ide> """
<ide> def __init__(
<ide> class SparseCategoricalCrossentropy(LossFunctionWrapper):
<ide> """Computes the crossentropy loss between the labels and predictions.
<ide>
<del> Use this crossentropy loss function when there are two or more label classes.
<del> We expect labels to be provided as integers. If you want to provide labels
<del> using `one-hot` representation, please use `CategoricalCrossentropy` loss.
<del> There should be `# classes` floating point values per feature for `y_pred`
<del> and a single floating point value per feature for `y_true`.
<add> Use this crossentropy loss function when there are two or more label
<add> classes. We expect labels to be provided as integers. If you want to
<add> provide labels using `one-hot` representation, please use
<add> `CategoricalCrossentropy` loss. There should be `# classes` floating point
<add> values per feature for `y_pred` and a single floating point value per
<add> feature for `y_true`.
<ide>
<ide> In the snippet below, there is a single floating point value per example for
<ide> `y_true` and `# classes` floating pointing values per example for `y_pred`.
<ide> def __init__(
<ide> option will be determined by the usage context. For almost all cases
<ide> this defaults to `SUM_OVER_BATCH_SIZE`. When used with
<ide> `tf.distribute.Strategy`, outside of built-in training loops such as
<del> `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
<del> will raise an error. Please see this custom training [tutorial](
<del> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<del> more details.
<add> `tf.keras` `compile` and `fit`, using `AUTO` or
<add> `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom
<add> training [tutorial](
<add> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<add> more details.
<ide> name: Optional name for the instance. Defaults to
<ide> 'sparse_categorical_crossentropy'.
<ide> """
<ide> def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name="hinge"):
<ide> option will be determined by the usage context. For almost all cases
<ide> this defaults to `SUM_OVER_BATCH_SIZE`. When used with
<ide> `tf.distribute.Strategy`, outside of built-in training loops such as
<del> `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
<del> will raise an error. Please see this custom training [tutorial](
<del> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<del> more details.
<add> `tf.keras` `compile` and `fit`, using `AUTO` or
<add> `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom
<add> training [tutorial](
<add> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<add> more details.
<ide> name: Optional name for the instance. Defaults to 'hinge'.
<ide> """
<ide> super().__init__(hinge, name=name, reduction=reduction)
<ide> def __init__(
<ide> option will be determined by the usage context. For almost all cases
<ide> this defaults to `SUM_OVER_BATCH_SIZE`. When used with
<ide> `tf.distribute.Strategy`, outside of built-in training loops such as
<del> `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
<del> will raise an error. Please see this custom training [tutorial](
<del> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<del> more details.
<add> `tf.keras` `compile` and `fit`, using `AUTO` or
<add> `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom
<add> training [tutorial](
<add> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<add> more details.
<ide> name: Optional name for the instance. Defaults to 'squared_hinge'.
<ide> """
<ide> super().__init__(squared_hinge, name=name, reduction=reduction)
<ide> def __init__(
<ide> option will be determined by the usage context. For almost all cases
<ide> this defaults to `SUM_OVER_BATCH_SIZE`. When used with
<ide> `tf.distribute.Strategy`, outside of built-in training loops such as
<del> `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
<del> will raise an error. Please see this custom training [tutorial](
<del> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<del> more details.
<add> `tf.keras` `compile` and `fit`, using `AUTO` or
<add> `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom
<add> training [tutorial](
<add> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<add> more details.
<ide> name: Optional name for the instance. Defaults to 'categorical_hinge'.
<ide> """
<ide> super().__init__(categorical_hinge, name=name, reduction=reduction)
<ide> def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name="poisson"):
<ide> option will be determined by the usage context. For almost all cases
<ide> this defaults to `SUM_OVER_BATCH_SIZE`. When used with
<ide> `tf.distribute.Strategy`, outside of built-in training loops such as
<del> `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
<del> will raise an error. Please see this custom training [tutorial](
<del> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<del> more details.
<add> `tf.keras` `compile` and `fit`, using `AUTO` or
<add> `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom
<add> training [tutorial](
<add> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<add> more details.
<ide> name: Optional name for the instance. Defaults to 'poisson'.
<ide> """
<ide> super().__init__(poisson, name=name, reduction=reduction)
<ide> def __init__(
<ide> option will be determined by the usage context. For almost all cases
<ide> this defaults to `SUM_OVER_BATCH_SIZE`. When used with
<ide> `tf.distribute.Strategy`, outside of built-in training loops such as
<del> `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
<del> will raise an error. Please see this custom training [tutorial](
<del> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<del> more details.
<add> `tf.keras` `compile` and `fit`, using `AUTO` or
<add> `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom
<add> training [tutorial](
<add> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<add> more details.
<ide> name: Optional name for the instance. Defaults to 'log_cosh'.
<ide> """
<ide> super().__init__(log_cosh, name=name, reduction=reduction)
<ide> def __init__(
<ide> option will be determined by the usage context. For almost all cases
<ide> this defaults to `SUM_OVER_BATCH_SIZE`. When used with
<ide> `tf.distribute.Strategy`, outside of built-in training loops such as
<del> `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
<del> will raise an error. Please see this custom training [tutorial](
<del> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<del> more details.
<add> `tf.keras` `compile` and `fit`, using `AUTO` or
<add> `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom
<add> training [tutorial](
<add> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<add> more details.
<ide> name: Optional name for the instance. Defaults to 'kl_divergence'.
<ide> """
<ide> super().__init__(kl_divergence, name=name, reduction=reduction)
<ide> def __init__(
<ide> option will be determined by the usage context. For almost all cases
<ide> this defaults to `SUM_OVER_BATCH_SIZE`. When used with
<ide> `tf.distribute.Strategy`, outside of built-in training loops such as
<del> `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
<del> will raise an error. Please see this custom training [tutorial](
<del> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<del> more details.
<add> `tf.keras` `compile` and `fit`, using `AUTO` or
<add> `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom
<add> training [tutorial](
<add> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<add> more details.
<ide> name: Optional name for the instance. Defaults to 'huber_loss'.
<ide> """
<ide> super().__init__(huber, name=name, reduction=reduction, delta=delta)
<ide> def mean_absolute_percentage_error(y_true, y_pred):
<ide> y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
<ide>
<ide> Returns:
<del> Mean absolute percentage error values. shape = `[batch_size, d0, .. dN-1]`.
<add> Mean absolute percentage error values. shape = `[batch_size, d0, ..
<add> dN-1]`.
<ide> """
<ide> y_pred = tf.convert_to_tensor(y_pred)
<ide> y_true = tf.cast(y_true, y_pred.dtype)
<ide> def mean_squared_logarithmic_error(y_true, y_pred):
<ide> y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
<ide>
<ide> Returns:
<del> Mean squared logarithmic error values. shape = `[batch_size, d0, .. dN-1]`.
<add> Mean squared logarithmic error values. shape = `[batch_size, d0, ..
<add> dN-1]`.
<ide> """
<ide> y_pred = tf.convert_to_tensor(y_pred)
<ide> y_true = tf.cast(y_true, y_pred.dtype)
<ide> def squared_hinge(y_true, y_pred):
<ide> ... np.mean(np.square(np.maximum(1. - y_true * y_pred, 0.)), axis=-1))
<ide>
<ide> Args:
<del> y_true: The ground truth values. `y_true` values are expected to be -1 or 1.
<del> If binary (0 or 1) labels are provided we will convert them to -1 or 1.
<del> shape = `[batch_size, d0, .. dN]`.
<add> y_true: The ground truth values. `y_true` values are expected to be -1 or
<add> 1. If binary (0 or 1) labels are provided we will convert them to -1 or
<add> 1. shape = `[batch_size, d0, .. dN]`.
<ide> y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
<ide>
<ide> Returns:
<ide> def hinge(y_true, y_pred):
<ide> ... np.mean(np.maximum(1. - y_true * y_pred, 0.), axis=-1))
<ide>
<ide> Args:
<del> y_true: The ground truth values. `y_true` values are expected to be -1 or 1.
<del> If binary (0 or 1) labels are provided they will be converted to -1 or 1.
<del> shape = `[batch_size, d0, .. dN]`.
<add> y_true: The ground truth values. `y_true` values are expected to be -1 or
<add> 1. If binary (0 or 1) labels are provided they will be converted to -1
<add> or 1. shape = `[batch_size, d0, .. dN]`.
<ide> y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
<ide>
<ide> Returns:
<ide> def log_cosh(y_true, y_pred):
<ide> >>> x = y_pred - y_true
<ide> >>> assert np.allclose(
<ide> ... loss.numpy(),
<del> ... np.mean(x + np.log(np.exp(-2. * x) + 1.) - tf.math.log(2.), axis=-1),
<add> ... np.mean(x + np.log(np.exp(-2. * x) + 1.) - tf.math.log(2.),
<add> ... axis=-1),
<ide> ... atol=1e-5)
<ide>
<ide> Args:
<ide> def categorical_crossentropy(
<ide> Args:
<ide> y_true: Tensor of one-hot true targets.
<ide> y_pred: Tensor of predicted targets.
<del> from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
<del> we assume that `y_pred` encodes a probability distribution.
<add> from_logits: Whether `y_pred` is expected to be a logits tensor. By
<add> default, we assume that `y_pred` encodes a probability distribution.
<ide> label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For
<ide> example, if `0.1`, use `0.1 / num_classes` for non-target labels
<ide> and `0.9 + 0.1 / num_classes` for target labels.
<ide> def categorical_crossentropy(
<ide> """
<ide> if isinstance(axis, bool):
<ide> raise ValueError(
<del> f"`axis` must be of type `int`. Received: axis={axis} of type {type(axis)}"
<add> f"`axis` must be of type `int`. "
<add> f"Received: axis={axis} of type {type(axis)}"
<ide> )
<ide> y_pred = tf.convert_to_tensor(y_pred)
<ide> y_true = tf.cast(y_true, y_pred.dtype)
<ide> def _ragged_tensor_categorical_crossentropy(
<ide> Args:
<ide> y_true: Tensor of one-hot true targets.
<ide> y_pred: Tensor of predicted targets.
<del> from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
<del> we assume that `y_pred` encodes a probability distribution.
<add> from_logits: Whether `y_pred` is expected to be a logits tensor. By
<add> default, we assume that `y_pred` encodes a probability distribution.
<ide> label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For
<ide> example, if `0.1`, use `0.1 / num_classes` for non-target labels
<ide> and `0.9 + 0.1 / num_classes` for target labels.
<ide> def sparse_categorical_crossentropy(y_true, y_pred, from_logits=False, axis=-1):
<ide> Args:
<ide> y_true: Ground truth values.
<ide> y_pred: The predicted values.
<del> from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
<del> we assume that `y_pred` encodes a probability distribution.
<add> from_logits: Whether `y_pred` is expected to be a logits tensor. By
<add> default, we assume that `y_pred` encodes a probability distribution.
<ide> axis: Defaults to -1. The dimension along which the entropy is
<ide> computed.
<ide>
<ide> def binary_crossentropy(
<ide> Args:
<ide> y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
<ide> y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
<del> from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
<del> we assume that `y_pred` encodes a probability distribution.
<add> from_logits: Whether `y_pred` is expected to be a logits tensor. By
<add> default, we assume that `y_pred` encodes a probability distribution.
<ide> label_smoothing: Float in [0, 1]. If > `0` then smooth the labels by
<ide> squeezing them towards 0.5 That is, using `1. - 0.5 * label_smoothing`
<del> for the target class and `0.5 * label_smoothing` for the non-target class.
<add> for the target class and `0.5 * label_smoothing` for the non-target
<add> class.
<ide> axis: The axis along which the mean is computed. Defaults to -1.
<ide>
<ide> Returns:
<ide> def _ragged_tensor_binary_crossentropy(
<ide> Args:
<ide> y_true: Tensor of one-hot true targets.
<ide> y_pred: Tensor of predicted targets.
<del> from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
<del> we assume that `y_pred` encodes a probability distribution.
<add> from_logits: Whether `y_pred` is expected to be a logits tensor. By
<add> default, we assume that `y_pred` encodes a probability distribution.
<ide> label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For
<ide> example, if `0.1`, use `0.1 / num_classes` for non-target labels
<ide> and `0.9 + 0.1 / num_classes` for target labels.
<ide> def binary_focal_crossentropy(
<ide>
<ide> >>> y_true = [[0, 1], [0, 0]]
<ide> >>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
<del> >>> loss = tf.keras.losses.binary_focal_crossentropy(y_true, y_pred, gamma=2)
<add> >>> loss = tf.keras.losses.binary_focal_crossentropy(y_true, y_pred,
<add> ... gamma=2)
<ide> >>> assert loss.shape == (2,)
<ide> >>> loss.numpy()
<ide> array([0.330, 0.206], dtype=float32)
<ide> def binary_focal_crossentropy(
<ide> y_pred: The predicted values, of shape `(batch_size, d0, .. dN)`.
<ide> apply_class_balancing: A bool, whether to apply weight balancing on the
<ide> binary classes 0 and 1.
<del> alpha: A weight balancing factor for class 1, default is `0.25` as mentioned
<del> in the reference. The weight for class 0 is `1.0 - alpha`.
<del> gamma: A focusing parameter, default is `2.0` as mentioned in the reference.
<del> from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
<del> we assume that `y_pred` encodes a probability distribution.
<del> label_smoothing: Float in `[0, 1]`. If higher than 0 then smooth the labels
<del> by squeezing them towards `0.5`, i.e., using `1. - 0.5 * label_smoothing`
<del> for the target class and `0.5 * label_smoothing` for the non-target class.
<add> alpha: A weight balancing factor for class 1, default is `0.25` as
<add> mentioned in the reference. The weight for class 0 is `1.0 - alpha`.
<add> gamma: A focusing parameter, default is `2.0` as mentioned in the
<add> reference.
<add> from_logits: Whether `y_pred` is expected to be a logits tensor. By
<add> default, we assume that `y_pred` encodes a probability distribution.
<add> label_smoothing: Float in `[0, 1]`. If higher than 0 then smooth the
<add> labels by squeezing them towards `0.5`, i.e., using `1. - 0.5 *
<add> label_smoothing` for the target class and `0.5 * label_smoothing` for
<add> the non-target class.
<ide> axis: The axis along which the mean is computed. Defaults to `-1`.
<ide>
<ide> Returns:
<ide> def _ragged_tensor_binary_focal_crossentropy(
<ide> y_pred: Tensor of predicted targets.
<ide> apply_class_balancing: A bool, whether to apply weight balancing on the
<ide> binary classes 0 and 1.
<del> alpha: A weight balancing factor for class 1, default is `0.25` as mentioned
<del> in the reference [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf).
<del> The weight for class 0 is `1.0 - alpha`.
<del> gamma: A focusing parameter, default is `2.0` as mentioned in the reference.
<del> from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
<del> we assume that `y_pred` encodes a probability distribution.
<add> alpha: A weight balancing factor for class 1, default is `0.25` as
<add> mentioned in the reference [Lin et al., 2018](
<add> https://arxiv.org/pdf/1708.02002.pdf). The weight for class 0 is
<add> `1.0 - alpha`.
<add> gamma: A focusing parameter, default is `2.0` as mentioned in the
<add> reference.
<add> from_logits: Whether `y_pred` is expected to be a logits tensor. By
<add> default, we assume that `y_pred` encodes a probability distribution.
<ide> label_smoothing: Float in `[0, 1]`. If > `0` then smooth the labels. For
<ide> example, if `0.1`, use `0.1 / num_classes` for non-target labels
<ide> and `0.9 + 0.1 / num_classes` for target labels.
<ide> class CosineSimilarity(LossFunctionWrapper):
<ide> Usage with the `compile()` API:
<ide>
<ide> ```python
<del> model.compile(optimizer='sgd', loss=tf.keras.losses.CosineSimilarity(axis=1))
<add> model.compile(optimizer='sgd',
<add> loss=tf.keras.losses.CosineSimilarity(axis=1))
<ide> ```
<ide>
<ide> Args:
<ide> axis: The axis along which the cosine similarity is computed
<ide> (the features axis). Defaults to -1.
<ide> reduction: Type of `tf.keras.losses.Reduction` to apply to loss.
<ide> Default value is `AUTO`. `AUTO` indicates that the reduction option will
<del> be determined by the usage context. For almost all cases this defaults to
<del> `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of
<del> built-in training loops such as `tf.keras` `compile` and `fit`, using
<del> `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this
<del> custom training [tutorial]
<del> (https://www.tensorflow.org/tutorials/distribute/custom_training) for more
<del> details.
<add> be determined by the usage context. For almost all cases this defaults
<add> to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`,
<add> outside of built-in training loops such as `tf.keras` `compile` and
<add> `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please
<add> see this custom training [tutorial](
<add> https://www.tensorflow.org/tutorials/distribute/custom_training) for
<add> more details.
<ide> name: Optional name for the instance.
<ide> """
<ide>
<ide> def deserialize(name, custom_objects=None):
<ide> Args:
<ide> name: Loss configuration.
<ide> custom_objects: Optional dictionary mapping names (strings) to custom
<del> objects (classes and functions) to be considered during deserialization.
<add> objects (classes and functions) to be considered during
<add> deserialization.
<ide>
<ide> Returns:
<ide> A Keras `Loss` instance or a loss function.
<ide><path>keras/losses_test.py
<ide> def test_sparse_categorical_crossentropy_loss(self):
<ide> def test_sparse_categorical_crossentropy_loss_with_unknown_rank_tensor(
<ide> self,
<ide> ):
<del> # This test only runs in graph because the TF op layer is not supported yet
<del> # for sparse ops.
<add> # This test only runs in graph because the TF op layer is not supported
<add> # yet for sparse ops.
<ide> t = backend.placeholder()
<ide> p = backend.placeholder()
<ide> o = losses.sparse_categorical_crossentropy(t, p)
<ide> def test_sparse_categorical_crossentropy_loss_with_unknown_rank_tensor(
<ide> @test_combinations.generate(test_combinations.combine(mode=["eager"]))
<ide> def test_sparse_categorical_crossentropy_with_float16(self):
<ide> # See https://github.com/keras-team/keras/issues/15012 for more details.
<del> # we don't cast y_true to have same dtype as y_pred, since y_pred could be
<del> # float16 which has a small upbound, and the casting could cause an
<add> # we don't cast y_true to have same dtype as y_pred, since y_pred could
<add> # be float16 which has a small upbound, and the casting could cause an
<ide> # underflow. The y_true will be used as int64 anyway.
<ide>
<del> # create 2 observations with 2049 labels, since 2048 is the largest number
<del> # for float16
<add> # create 2 observations with 2049 labels, since 2048 is the largest
<add> # number for float16
<ide> y_true = [0, 2049]
<ide> # should result in a loss close to 0 since predicting y_true perfectly
<ide> y_pred = np.zeros((2, 2050))
<ide> y_pred[0][0] = 1
<ide> y_pred[1][2049] = 1
<ide> y_pred_16 = tf.convert_to_tensor(y_pred, dtype=tf.float16)
<ide>
<del> # If we did a cast for y_true to float16 in SparseCategoricalCrossentropy,
<del> # then the loss will not be zero.
<add> # If we did a cast for y_true to float16 in
<add> # SparseCategoricalCrossentropy, then the loss will not be zero.
<ide> scce = losses.SparseCategoricalCrossentropy()
<ide> self.assertAllClose(scce(y_true, y_pred_16).numpy(), 0.0, atol=1e-3)
<ide>
<ide> def tf_functioned_loss_fn(y_true, y_pred, sample_weight=None):
<ide>
<ide> def test_loss_wrapper_dtype(self):
<ide> # Make sure the loss wrapper doesn't cause any numerical precision loss
<del> # during calculation. See https://github.com/keras-team/keras/issues/15791
<add> # during calculation. See
<add> # https://github.com/keras-team/keras/issues/15791
<ide> x = tf.convert_to_tensor([[2.1]], dtype=tf.float64)
<ide> y_true = tf.square(x)
<ide> y_pred = tf.convert_to_tensor([[3.68]], dtype=tf.float64)
<ide> def test_unweighted(self):
<ide> obj = losses.BinaryFocalCrossentropy(gamma=2.0)
<ide> loss = obj(y_true, y_pred)
<ide>
<del> # p_t = y_true y_pred + (1 - y_true) (1 - y_pred) = [[0.9, 0.2], [0.7, 0.8]]
<add> # p_t = y_true y_pred + (1 - y_true) (1 - y_pred) = [[0.9, 0.2],
<add> # [0.7, 0.8]]
<ide> # focal = (1 - p_t) ** gamma = [[0.01, 0.64], [0.09, 0.04]]
<ide>
<ide> # bceLoss = -log(p_t) = [[0.105, 1.609] ,[0.357, 0.223]]
<ide> def test_scalar_weighted(self):
<ide> obj = losses.BinaryFocalCrossentropy(gamma=2.0)
<ide> loss = obj(y_true, y_pred, sample_weight=1.23)
<ide>
<del> # p_t = y_true y_pred + (1 - y_true) (1 - y_pred) = [[0.9, 0.2], [0.7, 0.8]]
<add> # p_t = y_true y_pred + (1 - y_true) (1 - y_pred) = [[0.9, 0.2],
<add> # [0.7, 0.8]]
<ide> # focal = (1 - p_t) ** gamma = [[0.01, 0.64], [0.09, 0.04]]
<ide>
<ide> # bceLoss = -log(p_t) = [[0.105, 1.609] ,[0.357, 0.223]] * sample_weight
<ide> def test_scalar_weighted(self):
<ide> # = [[0.006, 0.823, 0.851], [0.00001, 0.0124, 0.000001]]
<ide>
<ide> # bceLoss = -log(p_t) * sample_weight
<del> # = [[0.2014, 2.7646 , 2.9527], [0.0221, 0.2633, 0.01106]] * sample_weight
<add> # = [[0.2014, 2.7646 , 2.9527], [0.0221, 0.2633, 0.01106]] *
<add> # sample_weight
<ide>
<ide> # focalLoss = focal * bceLoss =
<del> # [[0.0012, 2.2743, 2.514], [0.0000002, 0.0033, 0.00000001]] * sample_weight
<add> # [[0.0012, 2.2743, 2.514], [0.0000002, 0.0033, 0.00000001]] *
<add> # sample_weight
<ide> # Reduced loss = 0.799 * 3.21 = 2.565
<ide>
<ide> self.assertAlmostEqual(self.evaluate(loss), 2.565, 3)
<ide> def test_sample_weighted(self):
<ide> obj = losses.BinaryFocalCrossentropy(gamma=2.0)
<ide> loss = obj(y_true, y_pred, sample_weight=sample_weight)
<ide>
<del> # p_t = y_true y_pred + (1 - y_true) (1 - y_pred) = [[0.9, 0.2], [0.7, 0.8]]
<add> # p_t = y_true y_pred + (1 - y_true) (1 - y_pred) = [[0.9, 0.2], [0.7,
<add> # 0.8]]
<ide> # focal = (1 - p_t) ** gamma = [[0.01, 0.64], [0.09, 0.04]]
<ide>
<ide> # bceLoss = -log(p_t) * sample_weight
<ide> def test_sample_weighted(self):
<ide> # = [[0.006, 0.823, 0.851], [0.00001, 0.0124, 0.000001]]
<ide>
<ide> # bceLoss = -log(p_t) * sample_weight
<del> # = [[0.2014, 2.7646 , 2.9527], [0.0221, 0.2633, 0.01106]] * sample_weight
<add> # = [[0.2014, 2.7646 , 2.9527], [0.0221, 0.2633, 0.01106]] *
<add> # sample_weight
<ide>
<ide> # focalLoss = focal * bceLoss =
<del> # [[0.0012, 2.2743, 2.514], [0.0000002, 0.0033, 0.00000001]] * sample_weight
<add> # [[0.0012, 2.2743, 2.514], [0.0000002, 0.0033, 0.00000001]] *
<add> # sample_weight
<ide> # focalLoss = [[0.00144, 2.72916, 3.0168], [6.8e-7, 0.01122, 3.4e-8]]
<ide> # Reduced loss = 0.799
<ide>
<ide> def test_no_reduction(self):
<ide> )
<ide> loss = obj(y_true, y_pred)
<ide>
<del> # p_t = y_true y_pred + (1 - y_true) (1 - y_pred) = [[0.9, 0.2], [0.7, 0.8]]
<add> # p_t = y_true y_pred + (1 - y_true) (1 - y_pred) = [[0.9, 0.2], [0.7,
<add> # 0.8]]
<ide> # focal = (1 - p_t) ** gamma = [[0.01, 0.64], [0.09, 0.04]]
<ide>
<ide> # bceLoss = -log(p_t) = [[0.105, 1.609] ,[0.357, 0.223]]
<ide> def test_ragged_tensors(self):
<ide> obj = losses.BinaryFocalCrossentropy(gamma=2.0)
<ide> loss = obj(y_true, y_pred)
<ide>
<del> # p_t = y_true y_pred + (1 - y_true) (1 - y_pred) = [[0.9, 0.2, 0.7], [0.8]]
<add> # p_t = y_true y_pred + (1 - y_true) (1 - y_pred) = [[0.9, 0.2, 0.7],
<add> # [0.8]]
<ide> # focal = (1 - p_t) ** gamma = [[0.01, 0.64, 0.09], [0.04]]
<ide>
<ide> # bceLoss = -log(p_t) = [[0.105, 1.609, 0.357], [0.223]]
<ide> def test_unweighted(self):
<ide> )
<ide> loss = obj(y_true, y_pred)
<ide>
<del> # p_t = y_true y_pred + (1 - y_true) (1 - y_pred) = [[0.9, 0.2], [0.7, 0.8]]
<add> # p_t = y_true y_pred + (1 - y_true) (1 - y_pred) = [[0.9, 0.2], [0.7,
<add> # 0.8]]
<ide> # alpha_weight = alpha y_true + (1 - alpha) (1 - y_true)
<ide> # = [[0.4, 0.6], [0.4, 0.6]]
<ide> # focal = (1 - p_t) ** gamma = [[0.01, 0.64], [0.09, 0.04]]
<ide> def test_scalar_weighted(self):
<ide>
<ide> # alpha_weight = alpha y_true + (1 - alpha) (1 - y_true)
<ide> # = [[0.6, 0.4], [0.6, 0.4]]
<del> # p_t = y_true y_pred + (1 - y_true) (1 - y_pred) = [[0.9, 0.2], [0.7, 0.8]]
<add> # p_t = y_true y_pred + (1 - y_true) (1 - y_pred) = [[0.9, 0.2], [0.7,
<add> # 0.8]]
<ide> # focal = (1 - p_t) ** gamma = [[0.01, 0.64], [0.09, 0.04]]
<ide>
<ide> # bceLoss = -log(p_t) = [[0.105, 1.609] ,[0.357, 0.223]] * sample_weight
<ide> def test_scalar_weighted(self):
<ide> # = [[0.006, 0.823, 0.851], [0.00001, 0.0124, 0.000001]]
<ide>
<ide> # bceLoss = -log(p_t) * sample_weight
<del> # = [[0.2014, 2.7646 , 2.9527], [0.0221, 0.2633, 0.01106]] * sample_weight
<add> # = [[0.2014, 2.7646 , 2.9527], [0.0221, 0.2633, 0.01106]] *
<add> # sample_weight
<ide>
<ide> # weightedfocalLoss = alpha_weight * focal * bceLoss =
<del> # [[0.00024, 0.45486, 2.0112], [0.00000016, 0.00066, 0.000000008]] * 3.21
<add> # [[0.00024, 0.45486, 2.0112], [0.00000016, 0.00066, 0.000000008]] *
<add> # 3.21
<ide> # Reduced loss = 0.41116 * 3.21 = 1.32
<ide>
<ide> self.assertAlmostEqual(self.evaluate(loss), 1.32, 3)
<ide> def test_sample_weighted(self):
<ide>
<ide> # alpha_weight = alpha y_true + (1 - alpha) (1 - y_true)
<ide> # = [[0.1, 0.9], [0.1, 0.9]]
<del> # p_t = y_true y_pred + (1 - y_true) (1 - y_pred) = [[0.9, 0.2], [0.7, 0.8]]
<add> # p_t = y_true y_pred + (1 - y_true) (1 - y_pred) = [[0.9, 0.2], [0.7,
<add> # 0.8]]
<ide> # focal = (1 - p_t) ** gamma = [[0.01, 0.64], [0.09, 0.04]]
<ide>
<ide> # bceLoss = -log(p_t) * sample_weight
<ide> def test_sample_weighted(self):
<ide> # = [[0.2, 0.2, 0.8], [0.8, 0.2, 0.8]]
<ide>
<ide> # bceLoss = -log(p_t) * sample_weight
<del> # = [[0.2014, 2.7646 , 2.9527], [0.0221, 0.2633, 0.01106]] * sample_weight
<add> # = [[0.2014, 2.7646 , 2.9527], [0.0221, 0.2633, 0.01106]] *
<add> # sample_weight
<ide>
<ide> # focalLoss = alpha_weight * focal * bceLoss =
<ide> # [[0.00024, 0.45486, 2.0112], [1.6e-7, 6.6e-4, 8e-9]] * sample_weight
<del> # focalLoss = [[0.000288, 0.5458, 2.41344], [5.44e-7, 2.444e-3, 2.72e-8]]
<add> # focalLoss = [[0.000288, 0.5458, 2.41344], [5.44e-7, 2.444e-3,
<add> # 2.72e-8]]
<ide> # Reduced loss = 0.49366
<ide>
<ide> self.assertAlmostEqual(self.evaluate(loss), 0.49366, 3)
<ide> def test_no_reduction(self):
<ide> # alpha_weight = alpha y_true + (1 - alpha) (1 - y_true)
<ide> # = [[0.6, 0.4], [0.6, 0.4]]
<ide>
<del> # p_t = y_true y_pred + (1 - y_true) (1 - y_pred) = [[0.9, 0.2], [0.7, 0.8]]
<add> # p_t = y_true y_pred + (1 - y_true) (1 - y_pred) = [[0.9, 0.2], [0.7,
<add> # 0.8]]
<ide> # focal = (1 - p_t) ** gamma = [[0.01, 0.64], [0.09, 0.04]]
<ide>
<ide> # bceLoss = -log(p_t) = [[0.105, 1.609] ,[0.357, 0.223]]
<ide> def test_ragged_tensors(self):
<ide>
<ide> # alpha_weight = alpha y_true + (1 - alpha) (1 - y_true)
<ide> # = [[0.1, 0.9, 0.1], [0.9]]
<del> # p_t = y_true y_pred + (1 - y_true) (1 - y_pred) = [[0.9, 0.2, 0.7], [0.8]]
<add> # p_t = y_true y_pred + (1 - y_true) (1 - y_pred) = [[0.9, 0.2, 0.7],
<add> # [0.8]]
<ide> # focal = (1 - p_t) ** gamma = [[0.01, 0.64, 0.09], [0.04]]
<ide>
<ide> # bceLoss = -log(p_t) = [[0.105, 1.609, 0.357], [0.223]]
<ide> def test_unweighted(self):
<ide> # y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
<ide> # y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
<ide> # 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
<del> # max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]]
<add> # max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5,
<add> # 0.4]]
<ide> # squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0],
<ide> # [0.5625, 0, 0.25, 0.16]]
<ide> # loss = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4]
<ide> def test_scalar_weighted(self):
<ide> # y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
<ide> # y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
<ide> # 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
<del> # max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]]
<add> # max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5,
<add> # 0.4]]
<ide> # squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0],
<ide> # [0.5625, 0, 0.25, 0.16]]
<ide> # loss = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4]
<ide> def test_sample_weighted(self):
<ide> # y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
<ide> # y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
<ide> # 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
<del> # max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]]
<add> # max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5,
<add> # 0.4]]
<ide> # squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0],
<ide> # [0.5625, 0, 0.25, 0.16]]
<ide> # loss = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4]
<ide> def test_unweighted(self):
<ide> loss = cat_hinge_obj(y_true, y_pred)
<ide>
<ide> # pos = reduce_sum(y_true * y_pred) = [1*4+8*9, 12*2+8*-5] = [76, -16]
<del> # neg = reduce_max((1. - y_true) * y_pred) = [[0, -64], [-12, 48]] = [0, 48]
<add> # neg = reduce_max((1. - y_true) * y_pred) = [[0, -64], [-12, 48]] = [0,
<add> # 48]
<ide> # cat_hinge = max(0., neg - pos + 1.) = [0, 65]
<ide> # reduced_loss = (0 + 65)/2 = 32.5
<ide> self.assertAlmostEqual(self.evaluate(loss), 32.5, 3)
<ide><path>keras/regularizers.py
<ide> class Regularizer:
<ide> activity during optimization. These penalties are summed into the loss
<ide> function that the network optimizes.
<ide>
<del> Regularization penalties are applied on a per-layer basis. The exact API will
<del> depend on the layer, but many layers (e.g. `Dense`, `Conv1D`, `Conv2D` and
<del> `Conv3D`) have a unified API.
<add> Regularization penalties are applied on a per-layer basis. The exact API
<add> will depend on the layer, but many layers (e.g. `Dense`, `Conv1D`, `Conv2D`
<add> and `Conv3D`) have a unified API.
<ide>
<ide> These layers expose 3 keyword arguments:
<ide>
<ide> - `kernel_regularizer`: Regularizer to apply a penalty on the layer's kernel
<ide> - `bias_regularizer`: Regularizer to apply a penalty on the layer's bias
<del> - `activity_regularizer`: Regularizer to apply a penalty on the layer's output
<add> - `activity_regularizer`: Regularizer to apply a penalty on the layer's
<add> output
<ide>
<ide> All layers (including custom layers) expose `activity_regularizer` as a
<ide> settable property, whether or not it is in the constructor arguments.
<ide>
<ide> The value returned by the `activity_regularizer` is divided by the input
<del> batch size so that the relative weighting between the weight regularizers and
<del> the activity regularizers does not change with the batch size.
<add> batch size so that the relative weighting between the weight regularizers
<add> and the activity regularizers does not change with the batch size.
<ide>
<ide> You can access a layer's regularization penalties by calling `layer.losses`
<ide> after calling the layer on inputs.
<ide> class Regularizer:
<ide> >>> out = layer(tensor)
<ide>
<ide> >>> # The kernel regularization term is 0.25
<del> >>> # The activity regularization term (after dividing by the batch size) is 5
<add> >>> # The activity regularization term (after dividing by the batch size)
<add> >>> # is 5
<ide> >>> tf.math.reduce_sum(layer.losses)
<ide> <tf.Tensor: shape=(), dtype=float32, numpy=5.25>
<ide>
<ide> class Regularizer:
<ide>
<ide> Registration is required for saving and
<ide> loading models to HDF5 format, Keras model cloning, some visualization
<del> utilities, and exporting models to and from JSON. If using this functionality,
<del> you must make sure any python process running your model has also defined
<del> and registered your custom regularizer.
<add> utilities, and exporting models to and from JSON. If using this
<add> functionality, you must make sure any python process running your model has
<add> also defined and registered your custom regularizer.
<ide> """
<ide>
<ide> def __call__(self, x):
<ide> class OrthogonalRegularizer(Regularizer):
<ide> Arguments:
<ide> factor: Float. The regularization factor. The regularization penalty will
<ide> be proportional to `factor` times the mean of the dot products between
<del> the L2-normalized rows (if `mode="rows"`, or columns if `mode="columns"`)
<del> of the inputs, excluding the product of each row/column with itself.
<del> Defaults to 0.01.
<add> the L2-normalized rows (if `mode="rows"`, or columns if
<add> `mode="columns"`) of the inputs, excluding the product of each
<add> row/column with itself. Defaults to 0.01.
<ide> mode: String, one of `{"rows", "columns"}`. Defaults to `"rows"`. In rows
<ide> mode, the regularization effect seeks to make the rows of the input
<ide> orthogonal to each other. In columns mode, it seeks to make the columns
<ide><path>keras/regularizers_test.py
<ide> def test_regularization_shared_layer_in_different_models(self, regularizer):
<ide>
<ide> # We expect to see 9 losses on the model:
<ide> # - 2 from the 2 add_loss calls on the outer model.
<del> # - 3 from the weight regularizers on the shared_dense layer, unshared_dense
<del> # in inner model 1, unshared_dense in inner model 2.
<add> # - 3 from the weight regularizers on the shared_dense layer,
<add> # unshared_dense in inner model 1, unshared_dense in inner model 2.
<ide> # - 4 from activity regularizers on the shared_dense layer.
<ide> self.assertLen(model.losses, 9)
<ide>
<ide> def test_orthogonal_regularizer(self):
<ide> self.assertAllClose(
<ide> reg_rows(inputs), factor * sum(rows_pairs) / num_row_pairs
<ide> )
<del> # Expected: factor * sum(pairwise_dot_products_of_columns) / num_col_pairs
<add> # Expected: factor * sum(pairwise_dot_products_of_columns) /
<add> # num_col_pairs
<ide> self.assertAllClose(
<ide> reg_cols(inputs), factor * sum(col_pairs) / num_col_pairs
<ide> ) | 13 |
Javascript | Javascript | get $$observe listeners array as own property | a27d827c22b0b6b3ba6b7495cf4fc338c6934b37 | <ide><path>src/ng/compile.js
<ide> function $CompileProvider($provide, $$sanitizeUriProvider) {
<ide> */
<ide> $observe: function(key, fn) {
<ide> var attrs = this,
<del> $$observers = (attrs.$$observers || (attrs.$$observers = {})),
<add> $$observers = (attrs.$$observers || (attrs.$$observers = Object.create(null))),
<ide> listeners = ($$observers[key] || ($$observers[key] = []));
<ide>
<ide> listeners.push(fn);
<ide><path>test/ng/compileSpec.js
<ide> describe('$compile', function() {
<ide> }));
<ide>
<ide>
<add> it('should be able to bind attribute names which are present in Object.prototype', function() {
<add> module(function() {
<add> directive('inProtoAttr', valueFn({
<add> scope: {
<add> 'constructor': '@',
<add> 'toString': '&',
<add>
<add> // Spidermonkey extension, may be obsolete in the future
<add> 'watch': '=',
<add> }
<add> }));
<add> });
<add> inject(function($rootScope) {
<add> expect(function() {
<add> compile('<div in-proto-attr constructor="hello, world" watch="[]" ' +
<add> 'to-string="value = !value"></div>');
<add> }).not.toThrow();
<add> var isolateScope = element.isolateScope();
<add>
<add> expect(typeof isolateScope.constructor).toBe('string');
<add> expect(isArray(isolateScope.watch)).toBe(true);
<add> expect(typeof isolateScope.toString).toBe('function');
<add> expect($rootScope.value).toBeUndefined();
<add> isolateScope.toString();
<add> expect($rootScope.value).toBe(true);
<add> });
<add> });
<add>
<add>
<ide> describe('bind-once', function () {
<ide>
<ide> function countWatches(scope) { | 2 |
Javascript | Javascript | add test for object literal | ac57a25cd981fe3ffcfdfeea1e1ac3fccf787696 | <ide><path>test/ng/directive/ngStyleSpec.js
<ide> describe('ngStyle', function() {
<ide> }));
<ide>
<ide>
<add> it('should support binding for object literals', inject(function($rootScope, $compile) {
<add> element = $compile('<div ng-style="{height: heightStr}"></div>')($rootScope);
<add> $rootScope.$digest();
<add> expect(parseInt(element.css('height') + 0, 10)).toEqual(0); // height could be '' or '0px'
<add> $rootScope.$apply('heightStr = "40px"');
<add> expect(element.css('height')).toBe('40px');
<add>
<add> $rootScope.$apply('heightStr = "100px"');
<add> expect(element.css('height')).toBe('100px');
<add> }));
<add>
<add>
<ide> it('should support lazy one-time binding for object literals', inject(function($rootScope, $compile) {
<ide> element = $compile('<div ng-style="::{height: heightStr}"></div>')($rootScope);
<ide> $rootScope.$digest(); | 1 |
Text | Text | fix typos in changelog | 2e053a5be3d0b7693e624d21541b00ed7f262fa9 | <ide><path>CHANGELOG.md
<ide> * **Critical Bugfix:** Update the version of `prop-types` to fix critical bug. ([@gaearon](https://github.com/gaearon) in [#545c87f](https://github.com/facebook/react/commit/545c87fdc348f82eb0c3830bef715ed180785390))
<ide>
<ide> ### React Test Renderer
<del>* Fix compatibility with Enzyme by exposing `batchedUpdates` on shallow renderer. ([@gaearon](https://github.com/gaearon) in [#9382](https://github.com/facebook/react/commit/69933e25c37cf5453a9ef132177241203ee8d2fd).
<add>* Fix compatibility with Enzyme by exposing `batchedUpdates` on shallow renderer. ([@gaearon](https://github.com/gaearon) in [#9382](https://github.com/facebook/react/commit/69933e25c37cf5453a9ef132177241203ee8d2fd))
<ide>
<ide>
<ide> ## 15.5.3 (April 7, 2017)
<ide>
<ide> ## 15.5.2 (April 7, 2017)
<ide>
<add>**Note: this release has a critical issue and was deprecated. Please update to 15.5.4 or higher.**
<add>
<ide> ### React Addons
<ide> * Fix the production single-file builds to not include the development code. ([@gaearon](https://github.com/gaearon) in [#9385](https://github.com/facebook/react/pull/9383))
<ide> * Apply better minification to production single-file builds. ([@gaearon](https://github.com/gaearon) in [#9385](https://github.com/facebook/react/pull/9383)) | 1 |
Ruby | Ruby | convert magic number to constant | f3aba78ce8436fd09c5bade09092339a21880f7f | <ide><path>lib/active_storage/service/disk_service.rb
<ide> require "pathname"
<ide>
<ide> class ActiveStorage::Service::DiskService < ActiveStorage::Service
<add> CHUNK_SIZE = 65536
<add>
<ide> attr_reader :root
<ide>
<ide> def initialize(root:)
<ide> def initialize(root:)
<ide>
<ide> def upload(key, io)
<ide> File.open(make_path_for(key), "wb") do |file|
<del> while chunk = io.read(65536)
<add> while chunk = io.read(CHUNK_SIZE)
<ide> file.write(chunk)
<ide> end
<ide> end
<ide> def upload(key, io)
<ide> def download(key)
<ide> if block_given?
<ide> File.open(path_for(key)) do |file|
<del> while data = file.read(65536)
<add> while data = file.read(CHUNK_SIZE)
<ide> yield data
<ide> end
<ide> end | 1 |
Ruby | Ruby | handle non-existing $arconn | 4846eeee0f422c885e13325364f8e860a0c1595e | <ide><path>activerecord/test/support/connection.rb
<ide> def self.connection_name
<ide> end
<ide>
<ide> def self.connection_config
<del> config["connections"][connection_name]
<add> config.fetch("connections").fetch(connection_name) do
<add> puts "Connection #{connection_name.inspect} not found. Available connections: #{config['connections'].keys.join(', ')}"
<add> exit 1
<add> end
<ide> end
<ide>
<ide> def self.connect | 1 |
Python | Python | set seed for head_masking test | 7f006cdd875799d7d66a8bda964ab90b0f7ed3f4 | <ide><path>pytorch_transformers/tests/modeling_common_test.py
<ide> def test_headmasking(self):
<ide> if not self.test_head_masking:
<ide> return
<ide>
<add> torch.manual_seed(42)
<ide> config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
<ide>
<ide> config.output_attentions = True | 1 |
Ruby | Ruby | fix macos version comparison | 282c0a30b61edc46c22b0b1d50ea59d275b64a63 | <ide><path>Library/Homebrew/os/mac/xcode.rb
<ide> def detect_version
<ide> return MacOS::Xcode.version if MacOS::Xcode.installed? && Xcode::Version.new(MacOS::Xcode.version) < "3.0"
<ide>
<ide> [MAVERICKS_PKG_ID, MAVERICKS_NEW_PKG_ID, STANDALONE_PKG_ID, FROM_XCODE_PKG_ID].find do |id|
<del> if Xcode::Version.new(MacOS.version) >= :mavericks.to_s
<add> if MacOS.version >= :mavericks
<ide> next unless File.exist?("#{MAVERICKS_PKG_PATH}/usr/bin/clang")
<ide> end
<ide> version = MacOS.pkgutil_info(id)[/version: (.+)$/, 1] | 1 |
Text | Text | add missing files | 560fae891223a3e7e4063ce41df5442648a38044 | <ide><path>research/object_detection/README.md
<ide> release includes:
<ide> * Region-Based Fully Convolutional Networks (R-FCN) with Resnet 101,
<ide> * Faster RCNN with Resnet 101,
<ide> * Faster RCNN with Inception Resnet v2
<del> * Mask R-CNN with Resnet 101.
<ide> * Frozen weights (trained on the COCO dataset) for each of the above models to
<ide> be used for out-of-the-box inference purposes.
<ide> * A [Jupyter notebook](object_detection_tutorial.ipynb) for performing
<ide><path>research/object_detection/g3doc/detection_model_zoo.md
<ide> # Tensorflow detection model zoo
<ide>
<ide> We provide a collection of detection models pre-trained on the [COCO
<del>dataset](http://mscoco.org) and the [Kitti dataset](http://www.cvlibs.net/datasets/kitti/).
<del>These models can be useful for
<add>dataset](http://mscoco.org), the [Kitti dataset](http://www.cvlibs.net/datasets/kitti/), and the
<add>[Open Images dataset](https://github.com/openimages/dataset). These models can
<add>be useful for
<ide> out-of-the-box inference if you are interested in categories already in COCO
<del>(e.g., humans, cars, etc). They are also useful for initializing your models when
<add>(e.g., humans, cars, etc) or in Open Images (e.g.,
<add>surfboard, jacuzzi, etc). They are also useful for initializing your models when
<ide> training on novel datasets.
<ide>
<ide> In the table below, we list each such pre-trained model including:
<ide> In the table below, we list each such pre-trained model including:
<ide> configuration (these timings were performed using an Nvidia
<ide> GeForce GTX TITAN X card) and should be treated more as relative timings in
<ide> many cases.
<del>* detector performance on subset of the COCO validation set.
<add>* detector performance on subset of the COCO validation set or Open Images test split as measured by the dataset-specific mAP measure.
<ide> Here, higher is better, and we only report bounding box mAP rounded to the
<ide> nearest integer.
<ide> * Output types (currently only `Boxes`)
<ide> Model name
<ide> ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | :---: | :-------------: | :-----:
<ide> [faster_rcnn_resnet101_kitti](http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet101_kitti_2017_11_08.tar.gz) | 79 | 87 | Boxes
<ide>
<add>## Open Images-trained models {#open-images-models}
<add>
<add>Model name | Speed (ms) | Open Images [email protected][^2] | Outputs
<add>----------------------------------------------------------------------------------------------------------------------------------------------------------------- | :---: | :-------------: | :-----:
<add>[faster_rcnn_inception_resnet_v2_atrous_oid](http://download.tensorflow.org/models/object_detection/faster_rcnn_inception_resnet_v2_atrous_oid_2017_11_08.tar.gz) | 727 | 37 | Boxes
<add>[faster_rcnn_inception_resnet_v2_atrous_lowproposals_oid](http://download.tensorflow.org/models/object_detection/faster_rcnn_inception_resnet_v2_atrous_lowproposals_oid_2017_11_08.tar.gz) | 347 | | Boxes
<add>
<add>
<ide> [^1]: See [MSCOCO evaluation protocol](http://cocodataset.org/#detections-eval).
<add>[^2]: This is PASCAL mAP with a slightly different way of true positives computation: see [Open Images evaluation protocol](evaluation_protocols.md#open-images).
<ide>
<ide><path>research/object_detection/g3doc/installation.md
<ide> Tensorflow Object Detection API depends on the following libraries:
<ide>
<ide> For detailed steps to install Tensorflow, follow the
<ide> [Tensorflow installation instructions](https://www.tensorflow.org/install/).
<del>A typical user can install Tensorflow using one of the following commands:
<add>A typically user can install Tensorflow using one of the following commands:
<ide>
<ide> ``` bash
<ide> # For CPU | 3 |
Go | Go | use len() > 0 instead of != nil | 3183af8b19d2adf29dd3fb80689d764353a3fe00 | <ide><path>builder/internals.go
<ide> func (b *Builder) create() (*daemon.Container, error) {
<ide> b.TmpContainers[c.ID] = struct{}{}
<ide> fmt.Fprintf(b.OutStream, " ---> Running in %s\n", utils.TruncateID(c.ID))
<ide>
<del> if config.Cmd != nil {
<add> if len(config.Cmd) > 0 {
<ide> // override the entry point that may have been picked up from the base image
<ide> c.Path = config.Cmd[0]
<ide> c.Args = config.Cmd[1:] | 1 |
PHP | PHP | fix lint error | 971b34201747310ce0c8035f2071d9d0639f780f | <ide><path>Cake/Utility/RepositoryAwareTrait.php
<ide> protected function _setModelClass($name) {
<ide> $this->modelClass = Inflector::pluralize($name);
<ide> }
<ide> }
<add>
<ide> /**
<ide> * Loads and constructs repository objects required by this object
<ide> * | 1 |
Javascript | Javascript | keep chunks filenames in production mode | 2eeebacb4c99c5f1e4a22a721a7c8a3dc9538e60 | <ide><path>build/webpack.js
<ide> export default async function getBaseWebpackConfig (dir: string, {dev = false, i
<ide> hotUpdateChunkFilename: 'static/webpack/[id].[hash].hot-update.js',
<ide> hotUpdateMainFilename: 'static/webpack/[hash].hot-update.json',
<ide> // This saves chunks with the name given via `import()`
<del> chunkFilename: isServer ? `${dev ? '[name]' : '[contenthash]'}.js` : `static/chunks/${dev ? '[name]' : '[contenthash]'}.js`,
<add> chunkFilename: isServer ? `${dev ? '[name]' : '[name].[contenthash]'}.js` : `static/chunks/${dev ? '[name]' : '[name].[contenthash]'}.js`,
<ide> strictModuleExceptionHandling: true
<ide> },
<ide> performance: { hints: false },
<ide> export default async function getBaseWebpackConfig (dir: string, {dev = false, i
<ide> // Precompile react / react-dom for development, speeding up webpack
<ide> dev && !isServer && new AutoDllPlugin({
<ide> filename: '[name]_[hash].js',
<del> path: './static/dll',
<add> path: './static/development/dll',
<ide> context: dir,
<ide> entry: {
<ide> dll: [
<ide><path>build/webpack/plugins/build-manifest-plugin.js
<ide> export default class BuildManifestPlugin {
<ide>
<ide> for (const filePath of Object.keys(compilation.assets)) {
<ide> const path = filePath.replace(/\\/g, '/')
<del> if (/^static\/dll\//.test(path)) {
<add> if (/^static\/development\/dll\//.test(path)) {
<ide> assetMap.devFiles.push(path)
<ide> }
<ide> }
<ide><path>test/integration/basic/components/hello-chunkfilename.js
<add>export default () => <div>test chunkfilename</div>
<ide><path>test/integration/basic/pages/dynamic/chunkfilename.js
<add>import dynamic from 'next/dynamic'
<add>
<add>const Hello = dynamic(import(/* webpackChunkName: 'hello-world' */'../../components/hello-chunkfilename'))
<add>
<add>export default Hello
<ide><path>test/integration/basic/test/dynamic.js
<ide> export default (context, render) => {
<ide> })
<ide> })
<ide>
<add> describe('custom chunkfilename', () => {
<add> it('should render the correct filename', async () => {
<add> const $ = await get$('/dynamic/chunkfilename')
<add> expect($('body').text()).toMatch(/test chunkfilename/)
<add> expect($('html').html()).toMatch(/hello-world\.js/)
<add> })
<add>
<add> it('should render the component on client side', async () => {
<add> let browser
<add> try {
<add> browser = await webdriver(context.appPort, '/dynamic/chunkfilename')
<add> await check(() => browser.elementByCss('body').text(), /test chunkfilename/)
<add> } finally {
<add> if (browser) {
<add> browser.close()
<add> }
<add> }
<add> })
<add> })
<add>
<ide> describe('custom loading', () => {
<ide> it('should render custom loading on the server side when `ssr:false` and `loading` is provided', async () => {
<ide> const $ = await get$('/dynamic/no-ssr-custom-loading') | 5 |
Javascript | Javascript | fix deepequal inconsistencies | a8149c47993d8675d5085fc6617dd7f1a47a9ce1 | <ide><path>lib/assert.js
<ide> function innerDeepEqual(actual, expected, strict, memos) {
<ide> position: 0
<ide> };
<ide> } else {
<del> if (memos.actual.has(actual)) {
<del> return memos.actual.get(actual) === memos.expected.get(expected);
<add> // We prevent up to two map.has(x) calls by directly retrieving the value
<add> // and checking for undefined. The map can only contain numbers, so it is
<add> // safe to check for undefined only.
<add> const expectedMemoA = memos.actual.get(actual);
<add> if (expectedMemoA !== undefined) {
<add> const expectedMemoB = memos.expected.get(expected);
<add> if (expectedMemoB !== undefined) {
<add> return expectedMemoA === expectedMemoB;
<add> }
<ide> }
<ide> memos.position++;
<ide> }
<ide><path>test/parallel/test-assert-deep.js
<ide> assertOnlyDeepEqual(
<ide> new Set([undefined])
<ide> );
<ide>
<add>// Circular structures
<add>{
<add> const a = {};
<add> const b = {};
<add> a.a = a;
<add> b.a = {};
<add> b.a.a = a;
<add> assertDeepAndStrictEqual(a, b);
<add>}
<add>
<add>{
<add> const a = new Set();
<add> const b = new Set();
<add> const c = new Set();
<add> a.add(a);
<add> b.add(b);
<add> c.add(a);
<add> assertDeepAndStrictEqual(b, c);
<add>}
<add>
<ide> {
<ide> const values = [
<ide> 123,
<ide><path>test/parallel/test-assert.js
<ide> a.throws(makeBlock(thrower, TypeError), function(err) {
<ide>
<ide> const h = { ref: g };
<ide>
<del> a.throws(makeBlock(a.deepEqual, f, h), /AssertionError/);
<del> a.throws(makeBlock(a.deepStrictEqual, f, h), /AssertionError/);
<add> a.doesNotThrow(makeBlock(a.deepEqual, f, h));
<add> a.doesNotThrow(makeBlock(a.deepStrictEqual, f, h));
<ide> }
<ide> // GH-7178. Ensure reflexivity of deepEqual with `arguments` objects.
<ide> const args = (function() { return arguments; })(); | 3 |
PHP | PHP | update flash tests for options array | 00632a9c349cd04aab6617ce7b7f7e07fb71e126 | <ide><path>tests/TestCase/Controller/Component/FlashComponentTest.php
<ide> public function testSet() {
<ide> $this->assertNull($this->Session->read('Message.flash'));
<ide>
<ide> $this->Flash->set('This is a test message');
<del> $expected = ['message' => 'This is a test message', 'params' => ['element' => null], 'type' => 'info'];
<add> $expected = [
<add> 'message' => 'This is a test message',
<add> 'key' => 'flash',
<add> 'element' => null,
<add> 'class' => 'info',
<add> 'params' => []
<add> ];
<ide> $result = $this->Session->read('Message.flash');
<ide> $this->assertEquals($expected, $result);
<ide>
<del> $this->Flash->set('This is a test message', 'test', ['foo' => 'bar']);
<del> $expected = ['message' => 'This is a test message','params' => ['foo' => 'bar', 'element' => 'test'], 'type' => 'info'];
<add> $this->Flash->set('This is a test message', ['element' => 'test', 'params' => ['foo' => 'bar']]);
<add> $expected = [
<add> 'message' => 'This is a test message',
<add> 'key' => 'flash',
<add> 'element' => 'test',
<add> 'class' => 'info',
<add> 'params' => ['foo' => 'bar']
<add> ];
<ide> $result = $this->Session->read('Message.flash');
<ide> $this->assertEquals($expected, $result);
<ide>
<del> $this->Flash->set('This is a test message', 'MyPlugin.alert');
<del> $expected = ['message' => 'This is a test message', 'params' => ['element' => 'MyPlugin.alert'], 'type' => 'info'];
<add> $this->Flash->set('This is a test message', ['element' => 'MyPlugin.alert']);
<add> $expected = [
<add> 'message' => 'This is a test message',
<add> 'key' => 'flash',
<add> 'element' => 'MyPlugin.alert',
<add> 'class' => 'info',
<add> 'params' => []
<add> ];
<ide> $result = $this->Session->read('Message.flash');
<ide> $this->assertEquals($expected, $result);
<ide>
<del> $this->Flash->set('This is a test message', null, [], 'foobar');
<del> $expected = ['message' => 'This is a test message', 'params' => ['element' => null], 'type' => 'info'];
<add> $this->Flash->set('This is a test message', ['key' => 'foobar']);
<add> $expected = [
<add> 'message' => 'This is a test message',
<add> 'key' => 'foobar',
<add> 'element' => null,
<add> 'class' => 'info',
<add> 'params' => []
<add> ];
<ide> $result = $this->Session->read('Message.foobar');
<ide> $this->assertEquals($expected, $result);
<ide> }
<ide> public function testSetWithException() {
<ide> $this->assertNull($this->Session->read('Message.flash'));
<ide>
<ide> $this->Flash->set(new \Exception('This is a test message'));
<del> $expected = ['message' => 'This is a test message', 'params' => ['element' => null], 'type' => 'info'];
<add> $expected = [
<add> 'message' => 'This is a test message',
<add> 'key' => 'flash',
<add> 'element' => null,
<add> 'class' => 'info',
<add> 'params' => []
<add> ];
<ide> $result = $this->Session->read('Message.flash');
<ide> $this->assertEquals($expected, $result);
<ide> }
<ide><path>tests/TestCase/View/Helper/FlashHelperTest.php
<ide> public function setUp() {
<ide> $session->write(array(
<ide> 'Message' => array(
<ide> 'flash' => array(
<del> 'type' => 'info',
<del> 'params' => array(),
<del> 'message' => 'This is a calling'
<add> 'key' => 'flash',
<add> 'message' => 'This is a calling',
<add> 'element' => null,
<add> 'class' => 'info',
<add> 'params' => array()
<ide> ),
<ide> 'notification' => array(
<del> 'type' => 'info',
<add> 'key' => 'notification',
<add> 'message' => 'This is a test of the emergency broadcasting system',
<add> 'element' => 'flash_helper',
<add> 'class' => 'info',
<ide> 'params' => array(
<ide> 'title' => 'Notice!',
<del> 'name' => 'Alert!',
<del> 'element' => 'session_helper'
<del> ),
<del> 'message' => 'This is a test of the emergency broadcasting system',
<add> 'name' => 'Alert!'
<add> )
<ide> ),
<ide> 'classy' => array(
<del> 'type' => 'success',
<del> 'params' => array('class' => 'positive'),
<del> 'message' => 'Recorded'
<add> 'key' => 'classy',
<add> 'message' => 'Recorded',
<add> 'element' => null,
<add> 'class' => 'positive',
<add> 'params' => array()
<ide> )
<ide> )
<ide> ));
<ide> public function testFlash() {
<ide> $expected = '<div id="flash-message" class="message-info">This is a calling</div>';
<ide> $this->assertEquals($expected, $result);
<ide>
<del> $expected = '<div id="classy-message" class="message-success">Recorded</div>';
<add> $expected = '<div id="classy-message" class="message-positive">Recorded</div>';
<ide> $result = $this->Flash->out('classy');
<ide> $this->assertEquals($expected, $result);
<ide>
<ide> public function testFlashAttributes() {
<ide> * @return void
<ide> */
<ide> public function testFlashElementInAttrs() {
<del> $result = $this->Flash->out('flash', array(
<del> 'element' => 'session_helper',
<add> $result = $this->Flash->out('notification', array(
<add> 'element' => 'flash_helper',
<ide> 'params' => array('title' => 'Notice!', 'name' => 'Alert!')
<ide> ));
<del> $expected = "<div id=\"notificationLayout\">\n\t<h1>Alert!</h1>\n\t<h3>Notice!</h3>\n\t<p>This is a calling</p>\n</div>";
<add> $expected = "<div id=\"notificationLayout\">\n\t<h1>Alert!</h1>\n\t<h3>Notice!</h3>\n\t<p>This is a test of the emergency broadcasting system</p>\n</div>";
<ide> $this->assertTextEquals($expected, $result);
<ide> }
<ide> | 2 |
Javascript | Javascript | return this on copy | 991b8fa7c3a1303df69fa8c447e4d021f06751a8 | <ide><path>src/lights/LightShadow.js
<ide> THREE.LightShadow.prototype = {
<ide>
<ide> this.mapSize.copy( source.mapSize );
<ide>
<add> return this;
<add>
<ide> },
<ide>
<ide> clone: function () { | 1 |
Ruby | Ruby | remove unused method alias | ff2d8fece3ff9645d4a26ca285604e597d15db7e | <ide><path>Library/Homebrew/compilers.rb
<ide> class Compiler < Struct.new(:name, :version, :priority)
<del> # This is exposed under the `build` name for compatibility, since
<del> # `fails_with` continues to use `build` in the public API.
<del> # `build` indicates the build number of an Apple compiler.
<del> # This is preferred over version numbers since there are often
<del> # significant differences within the same version,
<del> # e.g. GCC 4.2 build 5553 vs 5666.
<del> # Non-Apple compilers don't have build numbers.
<del> alias_method :build, :version
<del>
<ide> # The major version for non-Apple compilers. Used to indicate a compiler
<ide> # series; for instance, if the version is 4.8.2, it would return "4.8".
<ide> def major_version | 1 |
PHP | PHP | update model dirty check | f1cfa859e3826b2d996ffc8af30848bc537f06f9 | <ide><path>src/Illuminate/Database/Eloquent/Concerns/HasAttributes.php
<ide> public function originalIsEquivalent($key, $current)
<ide> } elseif ($this->isDateAttribute($key)) {
<ide> return $this->fromDateTime($current) ===
<ide> $this->fromDateTime($original);
<add> } elseif ($this->hasCast($key, ['object', 'collection'])) {
<add> return $this->castAttribute($key, $current) ==
<add> $this->castAttribute($key, $original);
<ide> } elseif ($this->hasCast($key)) {
<ide> return $this->castAttribute($key, $current) ===
<ide> $this->castAttribute($key, $original);
<ide><path>tests/Database/DatabaseEloquentModelTest.php
<ide> public function testDirtyOnCastOrDateAttributes()
<ide> $this->assertTrue($model->isDirty('datetimeAttribute'));
<ide> }
<ide>
<add> public function testDirtyOnCastedObjects()
<add> {
<add> $model = new EloquentModelCastingStub;
<add> $model->setRawAttributes([
<add> 'objectAttribute' => '["one", "two", "three"]',
<add> 'collectionAttribute' => '["one", "two", "three"]',
<add> ]);
<add> $model->syncOriginal();
<add>
<add> $model->objectAttribute = ['one', 'two', 'three'];
<add> $model->collectionAttribute = ['one', 'two', 'three'];
<add>
<add> $this->assertFalse($model->isDirty());
<add> $this->assertFalse($model->isDirty('objectAttribute'));
<add> $this->assertFalse($model->isDirty('collectionAttribute'));
<add> }
<add>
<ide> public function testCleanAttributes()
<ide> {
<ide> $model = new EloquentModelStub(['foo' => '1', 'bar' => 2, 'baz' => 3]); | 2 |
Javascript | Javascript | prefix process.env to avoid inconsistency | cf130c70f0b6a002d437fc175ad513ed30a6244a | <ide><path>examples/with-universal-configuration/env-config.js
<ide> const prod = process.env.NODE_ENV === 'production'
<ide>
<ide> module.exports = {
<del> 'BACKEND_URL': prod ? 'https://api.example.com' : 'https://localhost:8080'
<add> 'process.env.BACKEND_URL': prod ? 'https://api.example.com' : 'https://localhost:8080'
<ide> }
<ide><path>examples/with-universal-configuration/pages/index.js
<del>/* global BACKEND_URL */
<del>
<ide> export default () => (
<del> <div>Loading data from { BACKEND_URL }</div>
<add> <div>Loading data from { process.env.BACKEND_URL }</div>
<ide> ) | 2 |
Javascript | Javascript | issue a warning instead of an error for long names | 456ad438d8eabdd3b5bfa7568a600323a2b0278e | <ide><path>src/core/parser.js
<ide> var Lexer = (function LexerClosure() {
<ide> strBuf.push(String.fromCharCode(ch));
<ide> }
<ide> }
<del> if (strBuf.length > 128) {
<del> error('Warning: name token is longer than allowed by the spec: ' +
<del> strBuf.length);
<add> if (strBuf.length > 127) {
<add> warn('name token is longer than allowed by the spec: ' + strBuf.length);
<ide> }
<ide> return Name.get(strBuf.join(''));
<ide> }, | 1 |
PHP | PHP | fix return type | 9ddc23401389dc5f316591114540f607176491c7 | <ide><path>src/Illuminate/Console/Command.php
<ide> public function secret($question, $fallback = true)
<ide> * @param string $default
<ide> * @param mixed $attempts
<ide> * @param bool $multiple
<del> * @return bool
<add> * @return string
<ide> */
<ide> public function choice($question, array $choices, $default = null, $attempts = null, $multiple = null)
<ide> { | 1 |
Python | Python | avoid race condition in elasticsearch backend | ada19d215fd1289ad8a22eb44b1ac3afe9787d46 | <ide><path>celery/backends/elasticsearch.py
<ide>
<ide> from datetime import datetime
<ide>
<add>from celery import states
<ide> from kombu.utils.encoding import bytes_to_str
<ide> from kombu.utils.url import _parse_url
<ide>
<ide> def __init__(self, url=None, *args, **kwargs):
<ide>
<ide> def get(self, key):
<ide> try:
<del> res = self.server.get(
<del> index=self.index,
<del> doc_type=self.doc_type,
<del> id=key,
<del> )
<add> res = self._get(key)
<ide> try:
<ide> if res['found']:
<ide> return res['_source']['result']
<ide> def get(self, key):
<ide> except elasticsearch.exceptions.NotFoundError:
<ide> pass
<ide>
<add> def _get(self, key):
<add> return self.server.get(
<add> index=self.index,
<add> doc_type=self.doc_type,
<add> id=key,
<add> )
<add>
<ide> def set(self, key, value):
<add> body = {
<add> 'result': value,
<add> '@timestamp': '{0}Z'.format(
<add> datetime.utcnow().isoformat()[:-3]
<add> ),
<add> }
<ide> try:
<ide> self._index(
<ide> id=key,
<del> body={
<del> 'result': value,
<del> '@timestamp': '{0}Z'.format(
<del> datetime.utcnow().isoformat()[:-3]
<del> ),
<del> },
<add> body=body,
<ide> )
<ide> except elasticsearch.exceptions.ConflictError:
<ide> # document already exists, update it
<del> data = self.get(key)
<del> data[key] = value
<del> self._index(key, data, refresh=True)
<add> self._update(id=key, body=body)
<ide>
<ide> def _index(self, id, body, **kwargs):
<ide> body = {bytes_to_str(k): v for k, v in items(body)}
<ide> def _index(self, id, body, **kwargs):
<ide> index=self.index,
<ide> doc_type=self.doc_type,
<ide> body=body,
<add> params={'op_type': 'create'},
<ide> **kwargs
<ide> )
<ide>
<add> def _update(self, id, body, **kwargs):
<add> body = {bytes_to_str(k): v for k, v in items(body)}
<add> retries = 3
<add> while retries > 0:
<add> retries -= 1
<add> try:
<add> res_get = self._get(key=id)
<add> if not res_get['found']:
<add> return self._index(id, body, **kwargs)
<add> parsed_result = self.decode_result(res_get['_source']['result'])
<add> if parsed_result['status'] in states.READY_STATES:
<add> # if stored state is already in ready state, do nothing
<add> return {'result': 'noop'}
<add>
<add> # get current sequence number and primary term
<add> # https://www.elastic.co/guide/en/elasticsearch/reference/current/optimistic-concurrency-control.html
<add> seq_no = res_get.get('_seq_no', 1)
<add> prim_term = res_get.get('_primary_term', 1)
<add>
<add> # try to update document with current seq_no and primary_term
<add> res = self.server.update(
<add> id=bytes_to_str(id),
<add> index=self.index,
<add> body=body,
<add> params={'if_primary_term': prim_term, 'if_seq_no': seq_no},
<add> **kwargs
<add> )
<add> # result is elastic search update query result
<add> # noop = query did not update any document
<add> # updated = at least one document got updated
<add> if res['result'] != 'noop':
<add> return res
<add> except Exception:
<add> if retries == 0:
<add> raise
<add> raise Exception('too many retries to update backend')
<add>
<ide> def mget(self, keys):
<ide> return [self.get(key) for key in keys]
<ide>
<ide><path>t/unit/backends/test_elasticsearch.py
<ide> def test_index(self):
<ide> doc_type=x.doc_type,
<ide> index=x.index,
<ide> body=body,
<add> params={'op_type': 'create'},
<ide> kwarg1='test1'
<ide> )
<ide>
<ide> def test_index_bytes_key(self):
<ide> doc_type=x.doc_type,
<ide> index=x.index,
<ide> body={"field1": "value1"},
<add> params={'op_type': 'create'},
<ide> kwarg1='test1'
<ide> )
<ide> | 2 |
Go | Go | resolve full path to docker binary | 46578a2359714e32d61af4efaeea86b7ff770359 | <ide><path>integration-cli/docker_test_vars.go
<ide> package main
<ide>
<ide> import (
<add> "fmt"
<ide> "os"
<add> "os/exec"
<ide> )
<ide>
<ide> // the docker binary to use
<ide> var workingDirectory string
<ide> func init() {
<ide> if dockerBin := os.Getenv("DOCKER_BINARY"); dockerBin != "" {
<ide> dockerBinary = dockerBin
<add> } else {
<add> whichCmd := exec.Command("which", "docker")
<add> out, _, err := runCommandWithOutput(whichCmd)
<add> if err == nil {
<add> dockerBinary = stripTrailingCharacters(out)
<add> } else {
<add> fmt.Printf("ERROR: couldn't resolve full path to the Docker binary")
<add> os.Exit(1)
<add> }
<ide> }
<ide> if registryImage := os.Getenv("REGISTRY_IMAGE"); registryImage != "" {
<ide> registryImageName = registryImage | 1 |
Text | Text | fix example code syntax [ci skip] | e06e53e28527a7f90f7e22117174f6376bf6eaf1 | <ide><path>guides/source/active_storage_overview.md
<ide> input.addEventListener('change', (event) => {
<ide> input.value = null
<ide> })
<ide>
<del>const uploadFile = (file) {
<add>const uploadFile = (file) => {
<ide> // your form needs the file_field direct_upload: true, which
<ide> // provides data-direct-upload-url
<ide> const url = input.dataset.directUploadUrl | 1 |
Text | Text | update guide to mention code linters available | cdabcc2bd530af4184c2592619ea3fe0ddb2d9af | <ide><path>guides/source/contributing_to_ruby_on_rails.md
<ide> Now get busy and add/edit code. You're on your branch now, so you can write what
<ide> * Include tests that fail without your code, and pass with it.
<ide> * Update the (surrounding) documentation, examples elsewhere, and the guides: whatever is affected by your contribution.
<ide>
<del>
<ide> TIP: Changes that are cosmetic in nature and do not add anything substantial to the stability, functionality, or testability of Rails will generally not be accepted (read more about [our rationales behind this decision](https://github.com/rails/rails/pull/13771#issuecomment-32746700)).
<ide>
<ide> #### Follow the Coding Conventions
<ide> Rails follows a simple set of coding style conventions:
<ide> * Prefer class << self over self.method for class methods.
<ide> * Use `my_method(my_arg)` not `my_method( my_arg )` or `my_method my_arg`.
<ide> * Use `a = b` and not `a=b`.
<del>* Use assert_not methods instead of refute.
<add>* Use assert\_not methods instead of refute.
<ide> * Prefer `method { do_stuff }` instead of `method{do_stuff}` for single-line blocks.
<ide> * Follow the conventions in the source you see used already.
<ide>
<ide> The above are guidelines - please use your best judgment in using them.
<ide>
<add>Additionally, we have [RuboCop](https://www.rubocop.org/) rules defined to codify some of our coding conventions. You can run RuboCop locally against the file that you have modified before submitting a pull request:
<add>
<add>```bash
<add>$ rubocop actionpack/lib/action_controller/metal/strong_parameters.rb
<add>Inspecting 1 file
<add>.
<add>
<add>1 file inspected, no offenses detected
<add>```
<add>
<add>For `rails-ujs` CoffeeScript and JavaScript files, you can run `npm run lint` in `actionview` folder.
<add>
<ide> ### Benchmark Your Code
<ide>
<ide> For changes that might have an impact on performance, please benchmark your | 1 |
Java | Java | simplify logic in mergedsqlconfig | a48851509735d85db4632a6eff2de2248299db40 | <ide><path>spring-test/src/main/java/org/springframework/test/context/jdbc/MergedSqlConfig.java
<ide> private static ErrorMode retrieveErrorMode(AnnotationAttributes attributes) {
<ide>
<ide> private static String retrieveSeparator(AnnotationAttributes attributes) {
<ide> String separator = attributes.getString("separator");
<del> if (separator != null && separator.equals("")) {
<add> if ("".equals(separator)) {
<ide> separator = ScriptUtils.DEFAULT_STATEMENT_SEPARATOR;
<ide> }
<ide> return separator;
<ide> }
<ide>
<ide> private static String retrieveCommentPrefix(AnnotationAttributes attributes) {
<ide> String commentPrefix = attributes.getString("commentPrefix");
<del> if (commentPrefix != null && commentPrefix.equals("")) {
<add> if ("".equals(commentPrefix)) {
<ide> commentPrefix = ScriptUtils.DEFAULT_COMMENT_PREFIX;
<ide> }
<ide> return commentPrefix;
<ide> }
<ide>
<ide> private static String retrieveBlockCommentStartDelimiter(AnnotationAttributes attributes) {
<ide> String blockCommentStartDelimiter = attributes.getString("blockCommentStartDelimiter");
<del> if (blockCommentStartDelimiter != null && blockCommentStartDelimiter.equals("")) {
<add> if ("".equals(blockCommentStartDelimiter)) {
<ide> blockCommentStartDelimiter = ScriptUtils.DEFAULT_BLOCK_COMMENT_START_DELIMITER;
<ide> }
<ide> return blockCommentStartDelimiter;
<ide> }
<ide>
<ide> private static String retrieveBlockCommentEndDelimiter(AnnotationAttributes attributes) {
<ide> String blockCommentEndDelimiter = attributes.getString("blockCommentEndDelimiter");
<del> if (blockCommentEndDelimiter != null && blockCommentEndDelimiter.equals("")) {
<add> if ("".equals(blockCommentEndDelimiter)) {
<ide> blockCommentEndDelimiter = ScriptUtils.DEFAULT_BLOCK_COMMENT_END_DELIMITER;
<ide> }
<ide> return blockCommentEndDelimiter; | 1 |
Text | Text | move estliberitas to collaborator emeriti list | 6982dc7198c92757f54c26f71ca4d1ab2b381118 | <ide><path>README.md
<ide> For information about the governance of the Node.js project, see
<ide> **Adrian Estrada** <[email protected]> (he/him)
<ide> * [eljefedelrodeodeljefe](https://github.com/eljefedelrodeodeljefe) -
<ide> **Robert Jefe Lindstaedt** <[email protected]>
<del>* [estliberitas](https://github.com/estliberitas) -
<del>**Alexander Makarenko** <[email protected]>
<ide> * [eugeneo](https://github.com/eugeneo) -
<ide> **Eugene Ostroukhov** <[email protected]>
<ide> * [evanlucas](https://github.com/evanlucas) -
<ide> For information about the governance of the Node.js project, see
<ide> **Andras** <[email protected]>
<ide> * [AnnaMag](https://github.com/AnnaMag) -
<ide> **Anna M. Kedzierska** <[email protected]>
<add>* [estliberitas](https://github.com/estliberitas) -
<add>**Alexander Makarenko** <[email protected]>
<ide> * [firedfox](https://github.com/firedfox) -
<ide> **Daniel Wang** <[email protected]>
<ide> * [imran-iq](https://github.com/imran-iq) - | 1 |
Ruby | Ruby | set the remote config manually | f40650ecd0ff54b6a710791db42884a3b982d0c1 | <ide><path>Library/Homebrew/cmd/update.rb
<ide> def git_init_if_necessary
<ide> if Dir[".git/*"].empty?
<ide> safe_system "git", "init"
<ide> safe_system "git", "config", "core.autocrlf", "false"
<del> safe_system "git", "remote", "add", "origin", "https://github.com/Homebrew/homebrew.git"
<add> safe_system "git", "config", "remote.origin.url", "https://github.com/Homebrew/homebrew.git"
<add> safe_system "git", "config", "remote.origin.fetch", "+refs/heads/*:refs/remotes/origin/*"
<ide> safe_system "git", "fetch", "origin"
<ide> safe_system "git", "reset", "--hard", "origin/master"
<ide> end | 1 |
PHP | PHP | add language line | b084aacc5ad105e39c2b058e9523e73655be8d1f | <ide><path>lang/en/validation.php
<ide> 'string' => 'The :attribute must be :size characters.',
<ide> ],
<ide> 'starts_with' => 'The :attribute must start with one of the following: :values.',
<add> 'doesnt_start_with' => 'The :attribute may not start with one of the following: :values.',
<ide> 'string' => 'The :attribute must be a string.',
<ide> 'timezone' => 'The :attribute must be a valid timezone.',
<ide> 'unique' => 'The :attribute has already been taken.', | 1 |
Java | Java | fix failing test in cancelwithoutdemandcodectests | 02f61e3aaeebf3beb08317a0d3aa934a6d8fc747 | <ide><path>spring-web/src/main/java/org/springframework/http/codec/json/AbstractJackson2Encoder.java
<ide> import com.fasterxml.jackson.databind.SequenceWriter;
<ide> import com.fasterxml.jackson.databind.exc.InvalidDefinitionException;
<ide> import com.fasterxml.jackson.databind.ser.FilterProvider;
<del>import org.apache.commons.logging.Log;
<ide> import org.reactivestreams.Publisher;
<ide> import reactor.core.publisher.Flux;
<ide> import reactor.core.publisher.Mono;
<ide> public Flux<DataBuffer> encode(Publisher<?> inputStream, DataBufferFactory buffe
<ide> // Do not prepend JSON array prefix until first signal is known, onNext vs onError
<ide> // Keeps response not committed for error handling
<ide>
<del> Flux<DataBuffer> flux1 = helper.getPrefix(bufferFactory, hints, logger)
<del> .concatWith(Flux.just(EMPTY_BUFFER).repeat());
<add> dataBufferFlux = Flux.from(inputStream)
<add> .map(value -> {
<add> byte[] prefix = helper.getPrefix();
<add> byte[] delimiter = helper.getDelimiter();
<ide>
<del> Flux<DataBuffer> flux2 = Flux.from(inputStream).map(value -> encodeStreamingValue(
<del> value, bufferFactory, hints, sequenceWriter, byteBuilder, helper.getDelimiter(), EMPTY_BYTES));
<add> DataBuffer dataBuffer = encodeStreamingValue(
<add> value, bufferFactory, hints, sequenceWriter, byteBuilder, delimiter, EMPTY_BYTES);
<ide>
<del> dataBufferFlux = Flux.zip(flux1, flux2, (buffer1, buffer2) ->
<del> (buffer1 != EMPTY_BUFFER ?
<del> bufferFactory.join(Arrays.asList(buffer1, buffer2)) :
<del> buffer2))
<del> .concatWith(helper.getSuffix(bufferFactory, hints, logger));
<add> return (prefix.length > 0 ?
<add> bufferFactory.join(Arrays.asList(bufferFactory.wrap(prefix), dataBuffer)) :
<add> dataBuffer);
<add> })
<add> .concatWith(Mono.fromCallable(() -> bufferFactory.wrap(helper.getSuffix())));
<ide> }
<ide>
<ide> return dataBufferFlux
<add> .doOnNext(dataBuffer -> Hints.touchDataBuffer(dataBuffer, hints, logger))
<ide> .doAfterTerminate(() -> {
<ide> try {
<ide> byteBuilder.release();
<ide> private static class JsonArrayJoinHelper {
<ide>
<ide> private static final byte[] CLOSE_BRACKET = {']'};
<ide>
<del>
<del> private boolean afterFirstItem = false;
<add> private boolean firstItemEmitted;
<ide>
<ide> public byte[] getDelimiter() {
<del> if (this.afterFirstItem) {
<add> if (this.firstItemEmitted) {
<ide> return COMMA_SEPARATOR;
<ide> }
<del> this.afterFirstItem = true;
<add> this.firstItemEmitted = true;
<ide> return EMPTY_BYTES;
<ide> }
<ide>
<del> public Mono<DataBuffer> getPrefix(DataBufferFactory factory, @Nullable Map<String, Object> hints, Log logger) {
<del> return wrapBytes(OPEN_BRACKET, factory, hints, logger);
<del> }
<del>
<del> public Mono<DataBuffer> getSuffix(DataBufferFactory factory, @Nullable Map<String, Object> hints, Log logger) {
<del> return wrapBytes(CLOSE_BRACKET, factory, hints, logger);
<add> public byte[] getPrefix() {
<add> return (this.firstItemEmitted ? EMPTY_BYTES : OPEN_BRACKET);
<ide> }
<ide>
<del> private Mono<DataBuffer> wrapBytes(
<del> byte[] bytes, DataBufferFactory bufferFactory, @Nullable Map<String, Object> hints, Log logger) {
<del>
<del> return Mono.fromCallable(() -> {
<del> DataBuffer buffer = bufferFactory.wrap(bytes);
<del> Hints.touchDataBuffer(buffer, hints, logger);
<del> return buffer;
<del> });
<add> public byte[] getSuffix() {
<add> return CLOSE_BRACKET;
<ide> }
<ide> }
<ide> | 1 |
Python | Python | update transfo xl example | 6cd769957e5eadd59963bb02efd3b746e8d120af | <ide><path>examples/run_transfo_xl.py
<ide>
<ide> import torch
<ide>
<del>from pytorch_pretrained_bert import TransfoXLModel, TransfoXLCorpus
<add>from pytorch_pretrained_bert import TransfoXLLMHeadModel, TransfoXLCorpus
<ide>
<ide> logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
<ide> datefmt = '%m/%d/%Y %H:%M:%S',
<ide> def main():
<ide> device=device, ext_len=args.ext_len)
<ide>
<ide> # Load a pre-trained model
<del> model = TransfoXLModel.from_pretrained(args.model_name)
<add> model = TransfoXLLMHeadModel.from_pretrained(args.model_name)
<ide> model = model.to(device)
<ide>
<ide> logger.info('Evaluating with bsz {} tgt_len {} ext_len {} mem_len {} clamp_len {}'.format(
<ide> def format_log(loss, split):
<ide> logger.info('=' * 100)
<ide>
<ide> if __name__ == '__main__':
<del> main()
<ide>\ No newline at end of file
<add> main()
<ide><path>pytorch_pretrained_bert/modeling_transfo_xl_utilities.py
<ide> def forward(self, hidden, target=None, keep_order=False):
<ide>
<ide> if i == 0:
<ide> if target is not None:
<del> logprob_i = head_logprob_i.gather(1, target_i[:,None]).squeeze(1)
<add> logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
<ide> else:
<ide> out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]]
<ide> else: | 2 |
Ruby | Ruby | add blob#type as a stringinquirer | 801b4eb465cd48435abddac881b92c93470b6933 | <ide><path>app/models/active_storage/blob.rb
<ide> def filename
<ide> ActiveStorage::Filename.new(self[:filename])
<ide> end
<ide>
<add> # Returns a `StringInquirer` based on the content_type that is broken into text, image, audio, video, pdf, or,
<add> # the catch-all, file. Example: `messages.attachments.select(&:image?)`.
<add> def type
<add> @type ||=
<add> case content_type
<add> when /^text/ then 'text'
<add> when /^image/ then 'image'
<add> when /^audio/ then 'audio'
<add> when /^video/ then 'video'
<add> when /pdf/ then 'pdf'
<add> else 'file'
<add> end.inquiry
<add> end
<add>
<ide> # Returns a `ActiveStorage::Variant` instance with the set of `transformations` passed in. This is only relevant
<ide> # for image files, and it allows any image to be transformed for size, colors, and the like. Example:
<ide> #
<ide><path>test/models/blob_test.rb
<ide> class ActiveStorage::BlobTest < ActiveSupport::TestCase
<ide> assert_equal Digest::MD5.base64digest(data), blob.checksum
<ide> end
<ide>
<add> test "inquery type" do
<add> blob = create_blob data: "Hello world!"
<add> assert blob.type.text?
<add> assert_not blob.type.audio?
<add> end
<add>
<ide> test "download yields chunks" do
<ide> blob = create_blob data: "a" * 75.kilobytes
<ide> chunks = [] | 2 |
Javascript | Javascript | remove old unused grunt tasks | 8a4857e85d9eb0b394f10948a26c2a719b7c207d | <ide><path>tasks/embed_locales.js
<del>
<del>module.exports = function (grunt) {
<del> grunt.registerTask('embedLocales', function () {
<del> var config = grunt.config('embedLocales');
<del>
<del> var files = grunt.file.expand(config.targetLocales);
<del> var embeddedContents = determineEmbeddedContent(files);
<del>
<del> var momentContents = grunt.file.read(config.moment);
<del> var modifiedContents = momentContents.replace('/* EMBED_LOCALES */', function () {
<del> // If we don't do this, $ symbols in locale files may get interpreted in
<del> // the regex replacement
<del> return embeddedContents;
<del> });
<del>
<del> grunt.file.write(config.dest, modifiedContents);
<del> });
<del>
<del> var languageReset = 'moment.locale(\'en\');';
<del>
<del> function determineEmbeddedContent(files) {
<del> var embeddedContent = '';
<del> files.forEach(function (file) {
<del> embeddedContent += transformFile(file);
<del> });
<del> embeddedContent += '\n ' + languageReset + '\n';
<del> return embeddedContent;
<del> }
<del>
<del> var reTransform = /function \(factory\) \{[^]*\}(?=\(function \(moment\) \{)/gm;
<del> var replaceWith =
<del> 'function (factory) {\n' +
<del> ' factory(moment);\n' +
<del> '}';
<del>
<del> function transformFile(file) {
<del> var fileContents = grunt.file.read(file);
<del>
<del> if (!fileContents.match(reTransform)) {
<del> grunt.warn('Warning: all locale files must use the common UMD wrapper pattern. Failed locale file: ' + file);
<del> return '';
<del> }
<del>
<del> return fileContents.replace(reTransform, replaceWith);
<del> }
<del>};
<del>
<ide><path>tasks/history.js
<del>var https = require('https'),
<del> zlib = require('zlib'),
<del> path = require('path'),
<del> fs = require('fs');
<del>
<del>var count = 0;
<del>var resolved = 0;
<del>
<del>var outputs = [];
<del>
<del>var done;
<del>
<del>function check() {
<del> if (resolved === count) {
<del> normalize();
<del> display();
<del> }
<del>}
<del>
<del>function makeBar(length) {
<del> var i = '';
<del> while (i.length < length) {
<del> i += '=';
<del> }
<del> return i;
<del>}
<del>
<del>function normalize() {
<del> var i,
<del> max = 0,
<del> max2 = 0;
<del> for (i = 0; i < count; i++) {
<del> max = Math.max(max, outputs[i].gzip);
<del> max2 = Math.max(max2, outputs[i].original);
<del> }
<del> for (i = 0; i < count; i++) {
<del> outputs[i].bargraph = makeBar((outputs[i].gzip / max) * 80);
<del> outputs[i].bargraph2 = makeBar((outputs[i].original / max2) * 80);
<del> }
<del>}
<del>
<del>function display() {
<del> var i;
<del> for (i = 0; i < count; i++) {
<del> console.log(outputs[i].version + ' ' + outputs[i].gzip + ' ' + outputs[i].original);
<del> console.log('gzip ' + outputs[i].bargraph);
<del> console.log('orig ' + outputs[i].bargraph2);
<del> }
<del> done();
<del>}
<del>
<del>function getSizeAtVersion(version, path) {
<del> var data = '',
<del> op = {},
<del>
<del> req = https.request({
<del> host: 'raw.github.com',
<del> port: 443,
<del> path: '/timrwood/moment/' + version + path
<del> }, function (res) {
<del> res.setEncoding('utf8');
<del> res.on('data', function (chunk) {
<del> data += chunk;
<del> });
<del> res.on('end', function (e) {
<del> zlib.gzip(data, function (error, result) {
<del> op.version = version;
<del> op.gzip = result.length;
<del> op.original = data.length;
<del> resolved++;
<del> check();
<del> });
<del> });
<del> });
<del>
<del> req.on('error', function (e) {
<del> console.log('problem with request: ' + e.message);
<del> });
<del> req.end();
<del> count++;
<del> outputs.push(op);
<del>}
<del>
<del>function getRemote() {
<del> var oldVersions = '1.0.1 1.1.0 1.1.1 1.1.2 1.2.0 1.3.0 1.4.0'.split(' '),
<del> newVersions = '1.5.0 1.5.1 1.6.0 1.6.1 1.7.0 1.7.1'.split(' '),
<del> i;
<del>
<del> for (i = 0; i < oldVersions.length; i++) {
<del> getSizeAtVersion(oldVersions[i], '/moment.min.js');
<del> }
<del> for (i = 0; i < newVersions.length; i++) {
<del> getSizeAtVersion(newVersions[i], '/min/moment.min.js');
<del> }
<del>}
<del>
<del>function getLocal() {
<del> count++;
<del> var op = {};
<del> outputs.push(op);
<del> fs.readFile(path.normalize(__dirname + '/../min/moment.min.js'), 'utf8', function (err, data) {
<del> if (err) {
<del> throw err;
<del> }
<del> zlib.gzip(data, function (error, result) {
<del> op.version = '.next';
<del> op.gzip = result.length;
<del> op.original = data.length;
<del> resolved++;
<del> check();
<del> });
<del> });
<del>}
<del>
<del>
<del>
<del>module.exports = function (grunt) {
<del> grunt.registerTask('history', 'Check the codebase filesize over different releases.', function () {
<del> done = this.async();
<del> getRemote();
<del> getLocal();
<del> });
<del>};
<ide><path>tasks/size.js
<del>var https = require('https'),
<del> zlib = require('zlib'),
<del> path = require('path'),
<del> fs = require('fs');
<del>
<del>var stable = '1.7.1',
<del> done;
<del>
<del>function getVersion(path, cb) {
<del> var data = '',
<del> req = https.request({
<del> host: 'raw.github.com',
<del> port: 443,
<del> path: '/timrwood/moment/' + path
<del> }, function (res) {
<del> res.setEncoding('utf8');
<del> res.on('data', function (chunk) {
<del> data += chunk;
<del> });
<del> res.on('end', function (e) {
<del> zlib.gzip(data, function (error, result) {
<del> cb(data.length, result.length);
<del> });
<del> });
<del> });
<del> req.on('error', function (e) {
<del> console.log('problem with request: ' + e.message);
<del> });
<del> req.end();
<del>}
<del>
<del>function printDiffs(stableLen, stableGzip, currentLen, currentGzip) {
<del> var diff = currentLen - stableLen,
<del> gzipDiff = currentGzip - stableGzip;
<del>
<del> console.log('Filesize difference from current branch to ' + stable);
<del> console.log(stable + ' ' + stableLen + ' / ' + stableGzip);
<del> console.log('curr ' + currentLen + ' / ' + currentGzip);
<del> console.log('diff ' + (diff > 0 ? '+' : '') + diff);
<del> console.log('gzip ' + (gzipDiff > 0 ? '+' : '') + gzipDiff);
<del>}
<del>
<del>
<del>module.exports = function (grunt) {
<del> grunt.registerTask('size', 'Check the codebase filesize against the latest stable version.', function () {
<del> done = this.async();
<del> fs.readFile(path.normalize(__dirname + '/../min/moment.min.js'), 'utf8', function (err, data) {
<del> if (err) {
<del> throw err;
<del> }
<del> zlib.gzip(data, function (error, result) {
<del> getVersion(stable + '/min/moment.min.js', function (stableLength, stableGzipLength) {
<del> printDiffs(stableLength, stableGzipLength, data.length, result.length);
<del> done();
<del> });
<del> });
<del> });
<del> });
<del>};
<ide><path>tasks/zones.js
<del>var fs = require('fs');
<del>
<del>
<del>module.exports = function (grunt) {
<del> var ZONE_TAB = '/usr/share/zoneinfo/zone.tab';
<del>
<del> grunt.registerTask('zones', 'Run the unit tests in different timezones.', function () {
<del> var done = this.async();
<del>
<del> getAllTimezones(function (err, zones) {
<del> if (err != null) {
<del> throw err;
<del> }
<del> (function iterator(i) {
<del> if (i >= zones.length) {
<del> return done();
<del> }
<del> runTestsInZone(zones[i], function (err) {
<del> if (err != null) {
<del> throw err;
<del> }
<del> iterator(i + 1);
<del> });
<del> }(0));
<del> });
<del> });
<del>
<del> function getAllTimezones (callback) {
<del> fs.readFile(ZONE_TAB, 'ascii', function (err, content) {
<del> if (err != null) {
<del> callback(err);
<del> }
<del> callback(null, content.split(/\r\n|\r|\n/)
<del> // remove empty and commented lines
<del> .filter(function (line) {
<del> return line && !/^#/.test(line);
<del> })
<del> // country code TAB coordinates TAB timezone
<del> .map(function (line) {
<del> return line.split('\t')[2];
<del> }));
<del> });
<del> }
<del>
<del> function runTestsInZone (zone, next) {
<del> grunt.log.ok('Running tests in zone ' + zone);
<del> grunt.util.spawn({
<del> cmd: 'grunt',
<del> opts: {
<del> env: {
<del> 'PATH': process.env.PATH,
<del> 'TZ': zone
<del> }
<del> },
<del> args: ['--no-color', 'nodeunit']
<del> }, function (err, result, code) {
<del> if (code !== 0) {
<del> grunt.log.error(result.stdout.split(/\r\n|\r|\n/)
<del> .filter(function (line) {
<del> return /^(>>|Warning:|$)/.test(line);
<del> })
<del> .map(function (line) {
<del> return (line.substr(0, 3) === '>> ' ? line.substr(3) : line);
<del> })
<del> .join('\n'));
<del> }
<del> next();
<del> });
<del> }
<del>}; | 4 |
Text | Text | clarify data flow and counter example | 51af568ba748afb55b09b84f6ff8db4aef9efaec | <ide><path>docs/tutorials/fundamentals/part-1-overview.md
<ide> Let's look at a minimal working example of a Redux app - a small counter applica
<ide>
<ide> Because Redux is a standalone JS library with no dependencies, this example is written by only loading a single script tag for the Redux library, and uses basic JS and HTML for the UI. In practice, Redux is normally used by [installing the Redux packages from NPM](../../introduction/Installation.md), and the UI is created using a library like [React](https://reactjs.org).
<ide>
<add>:::info
<add>
<add>[Part 5: UI and React](./part-5-ui-and-react.md) shows how to use Redux and React together.
<add>
<add>:::
<add>
<ide> Let's break this example down into its separate parts to see what's happening.
<ide>
<ide> #### State, Actions, and Reducers
<ide> after a delay.
<ide>
<ide> ### Data Flow
<ide>
<del>We can summarize the flow of data through a Redux app with this diagram:
<add>We can summarize the flow of data through a Redux app with this diagram. It represents how:
<add>
<add>- actions are dispatched in response to a user interaction like a click
<add>- the store runs the reducer function to calculate a new state
<add>- the UI reads the new state to display the new values
<add>
<add>(Don't worry if these pieces aren't quite clear yet! Keep this picture in your mind as you go through the rest of this tutorial, and you'll see how the pieces fit together.)
<ide>
<ide> 
<ide> | 1 |
Javascript | Javascript | move module exports proxy into a separate method | c3b674cc116e6656a0eedd9e8f01b1c01c955f2c | <ide><path>lib/internal/bootstrap/loaders.js
<ide> undefined;
<ide> };
<ide>
<add> // Provide named exports for all builtin libraries so that the libraries
<add> // may be imported in a nicer way for esm users. The default export is left
<add> // as the entire namespace (module.exports) and wrapped in a proxy such
<add> // that APMs and other behavior are still left intact.
<add> NativeModule.prototype.proxifyExports = function() {
<add> this.exportKeys = ObjectKeys(this.exports);
<add>
<add> const update = (property, value) => {
<add> if (this.reflect !== undefined &&
<add> ReflectApply(ObjectHasOwnProperty,
<add> this.reflect.exports, [property]))
<add> this.reflect.exports[property].set(value);
<add> };
<add>
<add> const handler = {
<add> __proto__: null,
<add> defineProperty: (target, prop, descriptor) => {
<add> // Use `Object.defineProperty` instead of `Reflect.defineProperty`
<add> // to throw the appropriate error if something goes wrong.
<add> ObjectDefineProperty(target, prop, descriptor);
<add> if (typeof descriptor.get === 'function' &&
<add> !ReflectHas(handler, 'get')) {
<add> handler.get = (target, prop, receiver) => {
<add> const value = ReflectGet(target, prop, receiver);
<add> if (ReflectApply(ObjectHasOwnProperty, target, [prop]))
<add> update(prop, value);
<add> return value;
<add> };
<add> }
<add> update(prop, getOwn(target, prop));
<add> return true;
<add> },
<add> deleteProperty: (target, prop) => {
<add> if (ReflectDeleteProperty(target, prop)) {
<add> update(prop, undefined);
<add> return true;
<add> }
<add> return false;
<add> },
<add> set: (target, prop, value, receiver) => {
<add> const descriptor = ReflectGetOwnPropertyDescriptor(target, prop);
<add> if (ReflectSet(target, prop, value, receiver)) {
<add> if (descriptor && typeof descriptor.set === 'function') {
<add> for (const key of this.exportKeys) {
<add> update(key, getOwn(target, key, receiver));
<add> }
<add> } else {
<add> update(prop, getOwn(target, prop, receiver));
<add> }
<add> return true;
<add> }
<add> return false;
<add> }
<add> };
<add>
<add> this.exports = new Proxy(this.exports, handler);
<add> };
<add>
<ide> NativeModule.prototype.compile = function() {
<ide> const id = this.id;
<ide> let source = NativeModule.getSource(id);
<ide> fn(this.exports, requireFn, this, process, internalBinding);
<ide>
<ide> if (config.experimentalModules && !NativeModule.isInternal(this.id)) {
<del> this.exportKeys = ObjectKeys(this.exports);
<del>
<del> const update = (property, value) => {
<del> if (this.reflect !== undefined &&
<del> ReflectApply(ObjectHasOwnProperty,
<del> this.reflect.exports, [property]))
<del> this.reflect.exports[property].set(value);
<del> };
<del>
<del> const handler = {
<del> __proto__: null,
<del> defineProperty: (target, prop, descriptor) => {
<del> // Use `Object.defineProperty` instead of `Reflect.defineProperty`
<del> // to throw the appropriate error if something goes wrong.
<del> ObjectDefineProperty(target, prop, descriptor);
<del> if (typeof descriptor.get === 'function' &&
<del> !ReflectHas(handler, 'get')) {
<del> handler.get = (target, prop, receiver) => {
<del> const value = ReflectGet(target, prop, receiver);
<del> if (ReflectApply(ObjectHasOwnProperty, target, [prop]))
<del> update(prop, value);
<del> return value;
<del> };
<del> }
<del> update(prop, getOwn(target, prop));
<del> return true;
<del> },
<del> deleteProperty: (target, prop) => {
<del> if (ReflectDeleteProperty(target, prop)) {
<del> update(prop, undefined);
<del> return true;
<del> }
<del> return false;
<del> },
<del> set: (target, prop, value, receiver) => {
<del> const descriptor = ReflectGetOwnPropertyDescriptor(target, prop);
<del> if (ReflectSet(target, prop, value, receiver)) {
<del> if (descriptor && typeof descriptor.set === 'function') {
<del> for (const key of this.exportKeys) {
<del> update(key, getOwn(target, key, receiver));
<del> }
<del> } else {
<del> update(prop, getOwn(target, prop, receiver));
<del> }
<del> return true;
<del> }
<del> return false;
<del> }
<del> };
<del>
<del> this.exports = new Proxy(this.exports, handler);
<add> this.proxifyExports();
<ide> }
<ide>
<ide> this.loaded = true; | 1 |
Python | Python | improve swin for visionencoderdecoder | a7aca42fc48c9fe32002c905705516238b387d5e | <ide><path>src/transformers/models/swin/configuration_swin.py
<ide> class SwinConfig(PretrainedConfig):
<ide>
<ide> attribute_map = {
<ide> "num_attention_heads": "num_heads",
<add> "num_hidden_layers": "num_layers",
<ide> }
<ide>
<ide> def __init__(
<ide> def __init__(
<ide> self.encoder_stride = encoder_stride
<ide> # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
<ide> # this indicates the channel dimension after the last stage of the model
<del> self.hidden_size = embed_dim * 8
<add> self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1))
<ide><path>tests/swin/test_modeling_swin.py
<ide> def __init__(
<ide> patch_size=2,
<ide> num_channels=3,
<ide> embed_dim=16,
<del> depths=[1],
<del> num_heads=[2],
<add> depths=[1, 2, 1],
<add> num_heads=[2, 2, 4],
<ide> window_size=2,
<ide> mlp_ratio=2.0,
<ide> qkv_bias=True,
<ide> def __init__(
<ide> scope=None,
<ide> use_labels=True,
<ide> type_sequence_label_size=10,
<del> encoder_stride=2,
<add> encoder_stride=8,
<ide> ):
<ide> self.parent = parent
<ide> self.batch_size = batch_size
<ide> def create_and_check_model(self, config, pixel_values, labels):
<ide> model.eval()
<ide> result = model(pixel_values)
<ide>
<del> # since the model we're testing only consists of a single layer, expected_seq_len = number of patches
<del> expected_seq_len = (config.image_size // config.patch_size) ** 2
<add> expected_seq_len = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
<ide> expected_dim = int(config.embed_dim * 2 ** (len(config.depths) - 1))
<ide>
<ide> self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim))
<ide><path>tests/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py
<ide> from transformers.file_utils import cached_property, is_torch_available, is_vision_available
<ide> from transformers.testing_utils import require_torch, require_vision, slow, torch_device
<ide>
<add>from ..bart.test_modeling_bart import BartModelTester
<ide> from ..bert.test_modeling_bert import BertModelTester
<ide> from ..deit.test_modeling_deit import DeiTModelTester
<add>from ..swin.test_modeling_swin import SwinModelTester
<ide> from ..test_modeling_common import floats_tensor, ids_tensor, random_attention_mask
<ide> from ..trocr.test_modeling_trocr import TrOCRStandaloneDecoderModelTester
<ide> from ..vit.test_modeling_vit import ViTModelTester
<ide>
<ide> from transformers import (
<ide> AutoTokenizer,
<add> BartForCausalLM,
<ide> BertLMHeadModel,
<ide> DeiTModel,
<add> SwinModel,
<ide> TrOCRForCausalLM,
<ide> VisionEncoderDecoderConfig,
<ide> VisionEncoderDecoderModel,
<ide> def prepare_config_and_inputs(self):
<ide> }
<ide>
<ide>
<add>@require_torch
<add>class Swin2BartModelTest(EncoderDecoderMixin, unittest.TestCase):
<add> def get_encoder_decoder_model(self, config, decoder_config):
<add> encoder_model = SwinModel(config).eval()
<add> decoder_model = BartForCausalLM(decoder_config).eval()
<add> return encoder_model, decoder_model
<add>
<add> def prepare_config_and_inputs(self):
<add> model_tester_encoder = SwinModelTester(self, batch_size=13, embed_dim=32)
<add> model_tester_decoder = BartModelTester(self, batch_size=13, hidden_size=32, max_position_embeddings=512)
<add> encoder_config_and_inputs = model_tester_encoder.prepare_config_and_inputs()
<add> decoder_config_and_inputs = model_tester_decoder.prepare_config_and_inputs()
<add> config, pixel_values, _ = encoder_config_and_inputs
<add> decoder_config, decoder_inputs_dict = decoder_config_and_inputs
<add>
<add> # make sure that cross attention layers are added
<add> decoder_config.add_cross_attention = True
<add> # disable cache for now
<add> decoder_config.use_cache = False
<add> return {
<add> "config": config,
<add> "pixel_values": pixel_values,
<add> "decoder_config": decoder_config,
<add> **decoder_inputs_dict,
<add> }
<add>
<add> def check_encoder_decoder_model_output_attentions(
<add> self,
<add> config,
<add> decoder_config,
<add> decoder_input_ids,
<add> decoder_attention_mask,
<add> labels=None,
<add> pixel_values=None,
<add> **kwargs
<add> ):
<add> # make the decoder inputs a different shape from the encoder inputs to harden the test
<add> decoder_input_ids = decoder_input_ids[:, :-1]
<add> decoder_attention_mask = decoder_attention_mask[:, :-1]
<add> encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
<add> enc_dec_model = VisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
<add> enc_dec_model.to(torch_device)
<add> outputs_encoder_decoder = enc_dec_model(
<add> pixel_values=pixel_values,
<add> decoder_input_ids=decoder_input_ids,
<add> decoder_attention_mask=decoder_attention_mask,
<add> output_attentions=True,
<add> )
<add>
<add> encoder_attentions = outputs_encoder_decoder["encoder_attentions"]
<add> self.assertEqual(len(encoder_attentions), config.num_hidden_layers)
<add>
<add> # in Swin, the seq_len equals:
<add> seq_len = encoder_model.config.window_size**2
<add> self.assertEqual(encoder_attentions[0].shape[-3:], (config.num_attention_heads[0], seq_len, seq_len))
<add>
<add> decoder_attentions = outputs_encoder_decoder["decoder_attentions"]
<add> num_decoder_layers = (
<add> decoder_config.num_decoder_layers
<add> if hasattr(decoder_config, "num_decoder_layers")
<add> else decoder_config.num_hidden_layers
<add> )
<add> self.assertEqual(len(decoder_attentions), num_decoder_layers)
<add>
<add> self.assertEqual(
<add> decoder_attentions[0].shape[-3:],
<add> (decoder_config.num_attention_heads, decoder_input_ids.shape[-1], decoder_input_ids.shape[-1]),
<add> )
<add>
<add> cross_attentions = outputs_encoder_decoder["cross_attentions"]
<add> self.assertEqual(len(cross_attentions), num_decoder_layers)
<add>
<add> encoder_seq_len = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
<add> cross_attention_input_seq_len = decoder_input_ids.shape[-1]
<add> self.assertEqual(
<add> cross_attentions[0].shape[-3:],
<add> (decoder_config.num_attention_heads, cross_attention_input_seq_len, encoder_seq_len),
<add> )
<add>
<add> # there are no published pretrained BART-causal checkpoints for now
<add> def test_real_model_save_load_from_pretrained(self):
<add> pass
<add>
<add>
<ide> @require_torch
<ide> class ViT2TrOCR(EncoderDecoderMixin, unittest.TestCase):
<ide> def get_encoder_decoder_model(self, config, decoder_config): | 3 |
PHP | PHP | fix docblock typo. | f2c1253fc2aae9df160f6cc8686196066083c0d4 | <ide><path>src/Illuminate/Database/Query/Grammars/Grammar.php
<ide> protected function removeLeadingBoolean($value)
<ide> }
<ide>
<ide> /**
<del> * Get the gramar specific operators.
<add> * Get the grammar specific operators.
<ide> *
<ide> * @return array
<ide> */ | 1 |
Javascript | Javascript | add support for `promise.allsettled` | 2942233c9c319d644485ec1f6d89d2118061e77a | <ide><path>src/shared/compatibility.js
<ide> if (
<ide> // need to be polyfilled for the IMAGE_DECODERS build target.
<ide> return;
<ide> }
<del> if (
<del> globalThis.Promise &&
<del> globalThis.Promise.prototype &&
<del> globalThis.Promise.prototype.finally
<del> ) {
<add> if (globalThis.Promise && globalThis.Promise.allSettled) {
<ide> return;
<ide> }
<ide> globalThis.Promise = require("core-js/es/promise/index.js");
<ide><path>src/shared/message_handler.js
<ide> class MessageHandler {
<ide> async _deleteStreamController(streamId) {
<ide> // Delete the `streamController` only when the start, pull, and cancel
<ide> // capabilities have settled, to prevent `TypeError`s.
<del> await Promise.all(
<add> await Promise.allSettled(
<ide> [
<ide> this.streamControllers[streamId].startCall,
<ide> this.streamControllers[streamId].pullCall,
<ide> this.streamControllers[streamId].cancelCall,
<ide> ].map(function(capability) {
<del> return capability && capability.promise.catch(function() {});
<add> return capability && capability.promise;
<ide> })
<ide> );
<ide> delete this.streamControllers[streamId]; | 2 |
Ruby | Ruby | add depends_on spacing checks to brew_audit | bd51ae33431d8aa2b30d150da775097ccd603c9c | <ide><path>Library/Contributions/examples/brew-audit.rb
<ide> def ff
<ide> problems << " * Remove 'use_mirror' from url."
<ide> end
<ide>
<add> # 2 (or more, if in an if block) spaces before depends_on, please
<add> if /^\ ?depends_on/ =~ text
<add> problems << " * Check indentation of 'depends_on'."
<add> end
<add>
<ide> if /(#\{\w+\s*\+\s*['"][^}]+\})/ =~ text
<ide> problems << " * Try not to concatenate paths in string interpolation:\n #{$1}"
<ide> end | 1 |
PHP | PHP | fix variable name typo | 4c40f2fdada85cf2fe6c75ab5bb467536410d59b | <ide><path>tests/TestCase/Error/ExceptionRendererTest.php
<ide> public function testError400AsJson()
<ide> Router::setRequestInfo($request);
<ide>
<ide> $exception = new NotFoundException('Custom message');
<del> $execptionLine = __LINE__ - 1;
<add> $exceptionLine = __LINE__ - 1;
<ide> $ExceptionRenderer = new ExceptionRenderer($exception);
<ide> $ExceptionRenderer->controller->response = $this->getMockBuilder('Cake\Network\Response')
<ide> ->setMethods(['statusCode', '_sendHeader'])
<ide> public function testError400AsJson()
<ide> 'url' => '/posts/view/1000?sort=title&direction=desc',
<ide> 'code' => 404,
<ide> 'file' => __FILE__,
<del> 'line' => $execptionLine
<add> 'line' => $exceptionLine
<ide> ];
<ide>
<ide> $this->assertEquals($expected, json_decode($result, true)); | 1 |
PHP | PHP | add support for request bodies in get requests | 3921015bcdfde986b5dcba04242ca5229336d5d1 | <ide><path>lib/Cake/Network/Http/Client.php
<ide> public function config($config = null) {
<ide> /**
<ide> * Do a GET request.
<ide> *
<add> * The $data argument supports a special `_content` key
<add> * for providing a request body in a GET request. This is
<add> * generally not used but services like ElasticSearch use
<add> * this feature.
<add> *
<ide> * @param string $url The url or path you want to request.
<ide> * @param array $data The query data you want to send.
<ide> * @param array $options Additional options for the request.
<ide> * @return Cake\Network\Http\Response
<ide> */
<ide> public function get($url, $data = [], $options = []) {
<ide> $options = $this->_mergeOptions($options);
<add> $body = [];
<add> if (isset($data['_content'])) {
<add> $body = $data['_content'];
<add> unset($data['_content']);
<add> }
<ide> $url = $this->buildUrl($url, $data, $options);
<ide> $request = $this->_createRequest(
<ide> Request::METHOD_GET,
<ide> $url,
<del> [],
<add> $body,
<ide> $options
<ide> );
<ide> return $this->send($request, $options);
<ide><path>lib/Cake/Test/TestCase/Network/Http/ClientTest.php
<ide> public function testGetQuerystring() {
<ide> ]);
<ide> $this->assertSame($result, $response);
<ide> }
<add>
<add>/**
<add> * Test a GET with a request body. Services like
<add> * elasticsearch use this feature.
<add> *
<add> * @return void
<add> */
<add> public function testGetWithContent() {
<add> $response = new Response();
<add>
<add> $mock = $this->getMock('Cake\Network\Http\Adapter\Stream', ['send']);
<add> $mock->expects($this->once())
<add> ->method('send')
<add> ->with($this->logicalAnd(
<add> $this->isInstanceOf('Cake\Network\Http\Request'),
<add> $this->attributeEqualTo('_url', 'http://cakephp.org/search'),
<add> $this->attributeEqualTo('_content', 'some data')
<add> ))
<add> ->will($this->returnValue($response));
<add>
<add> $http = new Client([
<add> 'host' => 'cakephp.org',
<add> 'adapter' => $mock
<add> ]);
<add> $result = $http->get('/search', [
<add> '_content' => 'some data'
<add> ]);
<add> $this->assertSame($result, $response);
<add> }
<ide> } | 2 |
Javascript | Javascript | fix style and some docs | 17251372b1dedb42d27b01375125cee4f5edcce7 | <ide><path>src/widgets.js
<ide> * elements.
<ide> * * {@link angular.widget.@ng:required ng:required} - Verifies presence of user input.
<ide> * * {@link angular.widget.@ng:validate ng:validate} - Validates content of user input.
<del> * * {@link angular.widget.HTML HTML} - Standard HTML processed by angular.
<add> * * {@link angular.widget.HTML HTML input elements} - Standard HTML input elements data-bound by
<add> * angular.
<ide> * * {@link angular.widget.ng:view ng:view} - Works with $route to "include" partial templates
<ide> * * {@link angular.widget.ng:switch ng:switch} - Conditionally changes DOM structure
<ide> * * {@link angular.widget.ng:include ng:include} - Includes an external HTML fragment
<ide> angularWidget('button', inputWidgetSelector);
<ide> * @name angular.directive.ng:options
<ide> *
<ide> * @description
<del> * Dynamically generate a list of `<option>` elements for a `<select>` element using the array
<del> * obtained by evaluating the `ng:options` expression.
<add> * Dynamically generate a list of `<option>` elements for a `<select>` element using an array or
<add> * an object obtained by evaluating the `ng:options` expression.
<ide> *
<del> * When an item in the select menu is select, the array element represented by the selected option
<del> * will be bound to the model identified by the `name` attribute of the parent select element.
<add> * When an item in the select menu is select, the value of array element or object property
<add> * represented by the selected option will be bound to the model identified by the `name` attribute
<add> * of the parent select element.
<ide> *
<ide> * Optionally, a single hard-coded `<option>` element, with the value set to an empty string, can
<ide> * be nested into the `<select>` element. This element will then represent `null` or "not selected"
<ide> angularWidget('button', inputWidgetSelector);
<ide> * * binding to a value not in list confuses most browsers.
<ide> *
<ide> * @element select
<del> * @param {comprehension_expression} comprehension in following form
<del> *
<del> * * _label_ `for` _value_ `in` _array_
<del> * * _select_ `as` _label_ `for` _value_ `in` _array_
<del> * * _select_ `as` _label_ `group by` _group_ `for` _value_ `in` _array_
<del> * * _select_ `group by` _group_ `for` _value_ `in` _array_
<del> * * _label_ `for` `(`_key_`,` _value_`)` `in` _object_
<del> * * _select_ `as` _label_ `for` `(`_key_`,` _value_`)` `in` _object_
<del> * * _select_ `as` _label_ `group by` _group_ `for` `(`_key_`,` _value_`)` `in` _object_
<del> * * _select_ `group by` _group_ `for` `(`_key_`,` _value_`)` `in` _object_
<add> * @param {comprehension_expression} comprehension in one of the following forms:
<add> *
<add> * * for array data sources:
<add> * * `label` **`for`** `value` **`in`** `array`
<add> * * `select` **`as`** `label` **`for`** `value` **`in`** `array`
<add> * * `label` **`group by`** `group` **`for`** `value` **`in`** `array`
<add> * * `select` **`as`** `label` **`group by`** `group` **`for`** `value` **`in`** `array`
<add> * * for object data sources:
<add> * * `label` **`for (`**`key` **`,`** `value`**`) in`** `object`
<add> * * `select` **`as`** `label` **`for (`**`key` **`,`** `value`**`) in`** `object`
<add> * * `label` **`group by`** `group` **`for (`**`key`**`,`** `value`**`) in`** `object`
<add> * * `select` **`as`** `label` **`group by`** `group`
<add> * **`for` `(`**`key`**`,`** `value`**`) in`** `object`
<ide> *
<ide> * Where:
<ide> *
<del> * * _array_ / _object_: an expression which evaluates to an array / object to iterate over.
<del> * * _value_: local variable which will refer to each item in the _array_ or each value of
<del> * _object_ during itteration.
<del> * * _key_: local variable which will refer to the key in the _object_ during the iteration.
<del> * * _label_: The result of this expression will be the `option` label. The
<del> * `expression` will most likely refer to the _value_ variable.
<del> * * _select_: The result of this expression will be bound to the scope. If not specified,
<del> * _select_ expression will default to _value_.
<del> * * _group_: The result of this expression will be used to group options using the `optgroup`
<del> * DOM element.
<add> * * `array` / `object`: an expression which evaluates to an array / object to iterate over.
<add> * * `value`: local variable which will refer to each item in the `array` or each property value
<add> * of `object` during iteration.
<add> * * `key`: local variable which will refer to a property name in `object` during iteration.
<add> * * `label`: The result of this expression will be the label for `<option>` element. The
<add> * `expression` will most likely refer to the `value` variable (e.g. `value.propertyName`).
<add> * * `select`: The result of this expression will be bound to the model of the parent `<select>`
<add> * element. If not specified, `select` expression will default to `value`.
<add> * * `group`: The result of this expression will be used to group options using the `<optgroup>`
<add> * DOM element.
<ide> *
<ide> * @example
<ide> <doc:example>
<ide> <doc:source>
<ide> <script>
<ide> function MyCntrl(){
<ide> this.colors = [
<del> {name:'black'},
<del> {name:'white'},
<del> {name:'red'},
<del> {name:'blue'},
<del> {name:'green'}
<add> {name:'black', shade:'dark'},
<add> {name:'white', shade:'light'},
<add> {name:'red', shade:'dark'},
<add> {name:'blue', shade:'dark'},
<add> {name:'yellow', shade:'light'}
<ide> ];
<ide> this.color = this.colors[2]; // red
<ide> }
<ide> </script>
<ide> <div ng:controller="MyCntrl">
<ide> <ul>
<ide> <li ng:repeat="color in colors">
<del> Name: <input name="color.name"/> [<a href ng:click="colors.$remove(color)">X</a>]
<add> Name: <input name="color.name">
<add> [<a href ng:click="colors.$remove(color)">X</a>]
<ide> </li>
<ide> <li>
<ide> [<a href ng:click="colors.push({})">add</a>]
<ide> </li>
<ide> </ul>
<ide> <hr/>
<ide> Color (null not allowed):
<del> <select name="color" ng:options="c.name for c in colors"></select><br/>
<add> <select name="color" ng:options="c.name for c in colors"></select><br>
<ide>
<ide> Color (null allowed):
<del> <select name="color" ng:options="c.name for c in colors">
<del> <option value="">-- chose color --</option>
<add> <div class="nullable">
<add> <select name="color" ng:options="c.name for c in colors">
<add> <option value="">-- chose color --</option>
<add> </select>
<add> </div><br/>
<add>
<add> Color grouped by shade:
<add> <select name="color" ng:options="c.name group by c.shade for c in colors">
<ide> </select><br/>
<ide>
<del> Select <a href ng:click="color={name:'not in list'}">bogus</a>. <br/>
<add>
<add> Select <a href ng:click="color={name:'not in list'}">bogus</a>.<br>
<ide> <hr/>
<ide> Currently selected: {{ {selected_color:color} }}
<del> <div style="border:solid 1px black;"
<add> <div style="border:solid 1px black; height:20px"
<ide> ng:style="{'background-color':color.name}">
<del>
<ide> </div>
<ide> </div>
<ide> </doc:source>
<ide> angularWidget('button', inputWidgetSelector);
<ide> expect(binding('color')).toMatch('red');
<ide> select('color').option('0');
<ide> expect(binding('color')).toMatch('black');
<del> select('color').option('');
<add> using('.nullable').select('color').option('');
<ide> expect(binding('color')).toMatch('null');
<ide> });
<ide> </doc:scenario>
<ide> var NG_OPTIONS_REGEXP = /^\s*(.*?)(?:\s+as\s+(.*?))?(?:\s+group\s+by\s+(.*))?\s+
<ide> angularWidget('select', function(element){
<ide> this.descend(true);
<ide> this.directives(true);
<del> var isMultiselect = element.attr('multiple');
<del> var expression = element.attr('ng:options');
<del> var onChange = expressionCompile(element.attr('ng:change') || "").fnSelf;
<del> var match;
<add>
<add> var isMultiselect = element.attr('multiple'),
<add> expression = element.attr('ng:options'),
<add> onChange = expressionCompile(element.attr('ng:change') || "").fnSelf,
<add> match;
<add>
<ide> if (!expression) {
<ide> return inputWidgetSelector.call(this, element);
<ide> }
<ide> if (! (match = expression.match(NG_OPTIONS_REGEXP))) {
<ide> throw Error(
<del> "Expected ng:options in form of '_select_ (as _label_)? for (_key_,)?_value_ in _collection_' but got '" +
<del> expression + "'.");
<add> "Expected ng:options in form of '_select_ (as _label_)? for (_key_,)?_value_ in _collection_'" +
<add> " but got '" + expression + "'.");
<ide> }
<del> var displayFn = expressionCompile(match[2] || match[1]).fnSelf;
<del> var valueName = match[4] || match[6];
<del> var keyName = match[5];
<del> var groupByFn = expressionCompile(match[3] || '').fnSelf;
<del> var valueFn = expressionCompile(match[2] ? match[1] : valueName).fnSelf;
<del> var valuesFn = expressionCompile(match[7]).fnSelf;
<del> // we can't just jqLite('<option>') since jqLite is not smart enough
<del> // to create it in <select> and IE barfs otherwise.
<del> var optionTemplate = jqLite(document.createElement('option'));
<del> var optGroupTemplate = jqLite(document.createElement('optgroup'));
<del> var nullOption = false; // if false then user will not be able to select it
<add>
<add> var displayFn = expressionCompile(match[2] || match[1]).fnSelf,
<add> valueName = match[4] || match[6],
<add> keyName = match[5],
<add> groupByFn = expressionCompile(match[3] || '').fnSelf,
<add> valueFn = expressionCompile(match[2] ? match[1] : valueName).fnSelf,
<add> valuesFn = expressionCompile(match[7]).fnSelf,
<add> // we can't just jqLite('<option>') since jqLite is not smart enough
<add> // to create it in <select> and IE barfs otherwise.
<add> optionTemplate = jqLite(document.createElement('option')),
<add> optGroupTemplate = jqLite(document.createElement('optgroup')),
<add> nullOption = false; // if false then user will not be able to select it
<add>
<ide> return function(selectElement){
<del> var scope = this;
<ide>
<ide> // This is an array of array of existing option groups in DOM. We try to reuse these if possible
<ide> // optionGroupsCache[0] is the options with no option group
<ide> // optionGroupsCache[?][0] is the parent: either the SELECT or OPTGROUP element
<del> var optionGroupsCache = [[{element: selectElement, label:''}]];
<del> var model = modelAccessor(scope, element);
<add> var optionGroupsCache = [[{element: selectElement, label:''}]],
<add> scope = this,
<add> model = modelAccessor(scope, element);
<ide>
<ide> // find existing special options
<ide> forEach(selectElement.children(), function(option){
<ide> angularWidget('select', function(element){
<ide> selectElement.html(''); // clear contents
<ide>
<ide> selectElement.bind('change', function(){
<del> var optionGroup;
<del> var collection = valuesFn(scope) || [];
<del> var key = selectElement.val();
<del> var value;
<del> var optionElement;
<del> var index, groupIndex, length, groupLength;
<del> var tempScope = scope.$new();
<add> var optionGroup,
<add> collection = valuesFn(scope) || [],
<add> key = selectElement.val(),
<add> tempScope = scope.$new(),
<add> value, optionElement, index, groupIndex, length, groupLength;
<add>
<ide> try {
<ide> if (isMultiselect) {
<ide> value = [];
<ide> angularWidget('select', function(element){
<ide> });
<ide>
<ide> scope.$onEval(function(){
<del> var scope = this;
<del>
<del> // Temporary location for the option groups before we render them
<del> var optionGroups = {
<del> '':[]
<del> };
<del> var optionGroupNames = [''];
<del> var optionGroupName;
<del> var optionGroup;
<del> var option;
<del> var existingParent, existingOptions, existingOption;
<del> var values = valuesFn(scope) || [];
<del> var keys = values;
<del> var key;
<del> var groupLength, length;
<del> var fragment;
<del> var groupIndex, index;
<del> var optionElement;
<del> var optionScope = scope.$new();
<del> var modelValue = model.get();
<del> var selected;
<del> var selectedSet = false; // nothing is selected yet
<del> var isMulti = isMultiselect;
<del> var lastElement;
<del> var element;
<add> var scope = this,
<add> optionGroups = {'':[]}, // Temporary location for the option groups before we render them
<add> optionGroupNames = [''],
<add> optionGroupName,
<add> optionGroup,
<add> option,
<add> existingParent, existingOptions, existingOption,
<add> values = valuesFn(scope) || [],
<add> keys = values,
<add> key,
<add> groupLength, length,
<add> fragment,
<add> groupIndex, index,
<add> optionElement,
<add> optionScope = scope.$new(),
<add> modelValue = model.get(),
<add> selected,
<add> selectedSet = false, // nothing is selected yet
<add> isMulti = isMultiselect,
<add> lastElement,
<add> element;
<ide>
<ide> try {
<ide> if (isMulti) {
<ide> angularWidget('select', function(element){
<ide> selectedSet = true;
<ide> }
<ide>
<del> // If we have a keyName then we are iterating over on object. We
<del> // grab the keys and sort them.
<add> // If we have a keyName then we are iterating over on object. Grab the keys and sort them.
<ide> if(keyName) {
<ide> keys = [];
<ide> for (key in values) {
<ide> angularWidget('select', function(element){
<ide> };
<ide> });
<ide>
<add>
<ide> /**
<ide> * @workInProgress
<ide> * @ngdoc widget
<ide><path>test/widgetsSpec.js
<ide> describe("widget", function(){
<ide> };
<ide>
<ide> function createSingleSelect(blank, unknown){
<del> createSelect({name:'selected', 'ng:options':'value.name for value in values'},
<del> blank, unknown);
<add> createSelect({
<add> 'name':'selected',
<add> 'ng:options':'value.name for value in values'
<add> }, blank, unknown);
<ide> };
<ide>
<ide> function createMultiSelect(blank, unknown){
<del> createSelect({name:'selected', multiple:true, 'ng:options':'value.name for value in values'},
<del> blank, unknown);
<add> createSelect({
<add> 'name':'selected',
<add> 'multiple':true,
<add> 'ng:options':'value.name for value in values'
<add> }, blank, unknown);
<ide> };
<ide>
<ide> afterEach(function(){
<ide> describe("widget", function(){
<ide> it('should throw when not formated "? for ? in ?"', function(){
<ide> expect(function(){
<ide> compile('<select name="selected" ng:options="i dont parse"></select>');
<del> }).toThrow("Expected ng:options in form of '_select_ (as _label_)? for (_key_,)?_value_ in _collection_' but got 'i dont parse'.");
<del>
<del> $logMock.error.logs.shift();
<add> }).toThrow("Expected ng:options in form of '_select_ (as _label_)? for (_key_,)?_value_ in" +
<add> " _collection_' but got 'i dont parse'.");
<ide> });
<ide>
<ide> it('should render a list', function(){
<ide> describe("widget", function(){
<ide>
<ide> it('should render an object', function(){
<ide> createSelect({
<del> name:'selected',
<add> 'name':'selected',
<ide> 'ng:options': 'value as key for (key, value) in object'
<ide> });
<ide> scope.object = {'red':'FF0000', 'green':'00FF00', 'blue':'0000FF'};
<ide> describe("widget", function(){
<ide>
<ide> it('should bind to scope value and group', function(){
<ide> createSelect({
<del> name:'selected',
<del> 'ng:options':'item.name group by item.group for item in values'});
<add> 'name':'selected',
<add> 'ng:options':'item.name group by item.group for item in values'
<add> });
<ide> scope.values = [{name:'A'},
<ide> {name:'B', group:'first'},
<ide> {name:'C', group:'second'},
<ide> describe("widget", function(){
<ide> });
<ide>
<ide> it('should bind to scope value through experession', function(){
<del> createSelect({name:'selected', 'ng:options':'item.id as item.name for item in values'});
<add> createSelect({'name':'selected', 'ng:options':'item.id as item.name for item in values'});
<ide> scope.values = [{id:10, name:'A'}, {id:20, name:'B'}];
<ide> scope.selected = scope.values[0].id;
<ide> scope.$eval();
<ide> describe("widget", function(){
<ide>
<ide> it('should bind to object key', function(){
<ide> createSelect({
<del> name:'selected',
<del> 'ng:options':'key as value for (key, value) in object'});
<add> 'name':'selected',
<add> 'ng:options':'key as value for (key, value) in object'
<add> });
<ide> scope.object = {'red':'FF0000', 'green':'00FF00', 'blue':'0000FF'};
<ide> scope.selected = 'green';
<ide> scope.$eval();
<ide> describe("widget", function(){
<ide> it('should bind to object value', function(){
<ide> createSelect({
<ide> name:'selected',
<del> 'ng:options':'value as key for (key, value) in object'});
<add> 'ng:options':'value as key for (key, value) in object'
<add> });
<ide> scope.object = {'red':'FF0000', 'green':'00FF00', 'blue':'0000FF'};
<ide> scope.selected = '00FF00';
<ide> scope.$eval();
<ide> describe("widget", function(){
<ide> expect(select.find('option').length).toEqual(2);
<ide> });
<ide>
<del> it('should insert a unknown option if bound to not in list', function(){
<add> it('should insert a unknown option if bound to something not in the list', function(){
<ide> createSingleSelect();
<ide> scope.values = [{name:'A'}];
<ide> scope.selected = {};
<ide> describe("widget", function(){
<ide> createSelect({
<ide> name:'selected',
<ide> 'ng:options':'value for value in values',
<del> 'ng:change':'count = count + 1'});
<add> 'ng:change':'count = count + 1'
<add> });
<ide> scope.values = [{name:'A'}, {name:'B'}];
<ide> scope.selected = scope.values[0];
<ide> scope.count = 0; | 2 |
Go | Go | remove tp5 support from volume | 6ceec828bff1663f4193ca4ffc308e4ccb962ec5 | <ide><path>volume/volume_windows.go
<ide> import (
<ide> "path/filepath"
<ide> "regexp"
<ide> "strings"
<del>
<del> "github.com/docker/docker/pkg/system"
<ide> )
<ide>
<ide> // read-write modes
<ide> const (
<ide> // - Variation on hostdir but can be a drive followed by colon as well
<ide> // - If a path, must be absolute. Can include spaces
<ide> // - Drive cannot be c: (explicitly checked in code, not RegEx)
<del>)
<del>
<del>// RXMode is the regex expression for the mode of the mount
<del>var RXMode string
<ide>
<del>func init() {
<del> osv := system.GetOSVersion()
<del> // Read-only volumes supported from 14350 onwards (post Windows Server 2016 TP5)
<add> // RXMode is the regex expression for the mode of the mount
<ide> // Mode (optional):
<ide> // - Hopefully self explanatory in comparison to above regex's.
<ide> // - Colon is not in the capture group
<del> if osv.Build >= 14350 {
<del> RXMode = `(:(?P<mode>(?i)ro|rw))?`
<del> } else {
<del> RXMode = `(:(?P<mode>(?i)rw))?`
<del> }
<del>}
<add> RXMode = `(:(?P<mode>(?i)ro|rw))?`
<add>)
<ide>
<ide> // BackwardsCompatible decides whether this mount point can be
<ide> // used in old versions of Docker or not. | 1 |
Python | Python | fix url-prefixes for generated pages | 5b203bcb7878794e43464304aad7f72f56cae89c | <ide><path>official/utils/docs/build_nlp_api_docs.py
<ide> flags.DEFINE_string('output_dir', None, 'Where to write the resulting docs to.')
<ide> flags.DEFINE_string(
<ide> 'code_url_prefix',
<del> 'https://github.com/tensorflow/models/blob/master/tensorflow_models/',
<add> 'https://github.com/tensorflow/models/blob/master/tensorflow_models/nlp',
<ide> 'The url prefix for links to code.')
<ide>
<ide> flags.DEFINE_bool('search_hints', True,
<ide> def gen_api_docs(code_url_prefix, site_path, output_dir, project_short_name,
<ide> del tfm.nlp.layers.MultiHeadAttention
<ide> del tfm.nlp.layers.EinsumDense
<ide>
<del> branch = code_url_prefix.strip('/').split('/')[-2]
<del> official_url_prefix = (
<del> f'https://github.com/tensorflow/models/blob/{branch}/official/')
<add> url_parts = code_url_prefix.strip('/').split('/')
<add> url_parts = url_parts[:url_parts.index('tensorflow_models')]
<add> url_parts.append('official')
<add>
<add> official_url_prefix = '/'.join(url_parts)
<ide>
<ide> nlp_base_dir = pathlib.Path(tfm.nlp.__file__).parent
<ide>
<ide><path>official/utils/docs/build_vision_api_docs.py
<ide> flags.DEFINE_string('output_dir', None, 'Where to write the resulting docs to.')
<ide> flags.DEFINE_string(
<ide> 'code_url_prefix',
<del> 'https://github.com/tensorflow/models/blob/master/tensorflow_models/',
<add> 'https://github.com/tensorflow/models/blob/master/tensorflow_models/vision',
<ide> 'The url prefix for links to code.')
<ide>
<ide> flags.DEFINE_bool('search_hints', True,
<ide> def gen_api_docs(code_url_prefix, site_path, output_dir, project_short_name,
<ide> """Generates api docs for the tensorflow docs package."""
<ide> build_api_docs_lib.hide_module_model_and_layer_methods()
<ide>
<del> branch = code_url_prefix.strip('/').split('/')[-2]
<del> official_url_prefix = (
<del> f'https://github.com/tensorflow/models/blob/{branch}/official/')
<add> url_parts = code_url_prefix.strip('/').split('/')
<add> url_parts = url_parts[:url_parts.index('tensorflow_models')]
<add> url_parts.append('official')
<add>
<add> official_url_prefix = '/'.join(url_parts)
<ide>
<ide> vision_base_dir = pathlib.Path(tfm.vision.__file__).parent
<ide> | 2 |
Java | Java | add methodcall option to handlerresultmatchers | 4b1183582a693d3c7bdf3596d1190a700e171ed4 | <ide><path>spring-test/src/main/java/org/springframework/test/web/servlet/result/HandlerResultMatchers.java
<ide> import org.springframework.test.web.servlet.ResultMatcher;
<ide> import org.springframework.util.ClassUtils;
<ide> import org.springframework.web.method.HandlerMethod;
<add>import org.springframework.web.servlet.mvc.method.annotation.MvcUriComponentsBuilder;
<ide> import org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter;
<ide> import org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerMapping;
<ide>
<ide> public void match(MvcResult result) throws Exception {
<ide> };
<ide> }
<ide>
<add> /**
<add> * Assert the controller method used to process the request. The expected
<add> * method is specified through a "mock" controller method invocation
<add> * similar to {@link MvcUriComponentsBuilder#fromMethodCall(Object)}.
<add> * <p>For example given this controller:
<add> * <pre class="code">
<add> * @RestController
<add> * static class SimpleController {
<add> *
<add> * @RequestMapping("/")
<add> * public ResponseEntity<Void> handle() {
<add> * return ResponseEntity.ok().build();
<add> * }
<add> * }
<add> * </pre>
<add> * <p>A test can be performed:
<add> * <pre class="code">
<add> * mockMvc.perform(get("/"))
<add> * .andExpect(handler().methodCall(on(SimpleController.class).handle()));
<add> * </pre>
<add> */
<add> public ResultMatcher methodCall(final Object info) {
<add> return new ResultMatcher() {
<add> @Override
<add> public void match(MvcResult result) throws Exception {
<add> HandlerMethod handlerMethod = getHandlerMethod(result);
<add> Method method = ((MvcUriComponentsBuilder.MethodInvocationInfo) info).getControllerMethod();
<add> assertEquals("HandlerMethod", method, handlerMethod.getMethod());
<add> }
<add> };
<add> }
<add>
<ide> /**
<ide> * Assert the name of the controller method used to process the request
<ide> * using the given Hamcrest {@link Matcher}.
<ide><path>spring-test/src/test/java/org/springframework/test/web/servlet/samples/standalone/resultmatchers/HandlerAssertionTests.java
<ide> import org.junit.Before;
<ide> import org.junit.Test;
<ide>
<del>import org.springframework.http.HttpEntity;
<ide> import org.springframework.http.ResponseEntity;
<ide> import org.springframework.stereotype.Controller;
<ide> import org.springframework.test.web.servlet.MockMvc;
<ide> import org.springframework.web.bind.annotation.RequestMapping;
<ide> import org.springframework.web.bind.annotation.ResponseBody;
<ide>
<del>import static org.hamcrest.Matchers.*;
<del>import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.*;
<del>import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.*;
<del>import static org.springframework.test.web.servlet.setup.MockMvcBuilders.*;
<add>import static org.hamcrest.Matchers.equalTo;
<add>import static org.hamcrest.Matchers.is;
<add>import static org.hamcrest.Matchers.not;
<add>import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get;
<add>import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.handler;
<add>import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status;
<add>import static org.springframework.test.web.servlet.setup.MockMvcBuilders.standaloneSetup;
<ide> import static org.springframework.web.servlet.mvc.method.annotation.MvcUriComponentsBuilder.on;
<ide>
<ide> /**
<ide> public void testHandlerType() throws Exception {
<ide> this.mockMvc.perform(get("/")).andExpect(handler().handlerType(SimpleController.class));
<ide> }
<ide>
<add> @Test
<add> public void testMethodCall() throws Exception {
<add> this.mockMvc.perform(get("/")).andExpect(handler().methodCall(on(SimpleController.class).handle()));
<add> }
<add>
<ide> @Test
<ide> public void testHandlerMethodNameEqualTo() throws Exception {
<ide> this.mockMvc.perform(get("/")).andExpect(handler().methodName("handle")); | 2 |
Python | Python | improve train_ud script | 4ff92184f145a776a29653f2adf29d2bcd0083fe | <ide><path>bin/parser/train_ud.py
<ide> from spacy.syntax.parser import get_templates
<ide> from spacy.syntax.arc_eager import ArcEager
<ide> from spacy.scorer import Scorer
<add>from spacy.language_data.tag_map import TAG_MAP as DEFAULT_TAG_MAP
<ide> import spacy.attrs
<ide> import io
<ide>
<ide> def score_model(vocab, tagger, parser, gold_docs, verbose=False):
<ide> return scorer
<ide>
<ide>
<del>def main(train_loc, dev_loc, model_dir, tag_map_loc):
<del> with open(tag_map_loc) as file_:
<del> tag_map = json.loads(file_.read())
<add>def main(train_loc, dev_loc, model_dir, tag_map_loc=None):
<add> if tag_map_loc:
<add> with open(tag_map_loc) as file_:
<add> tag_map = json.loads(file_.read())
<add> else:
<add> tag_map = DEFAULT_TAG_MAP
<ide> train_sents = list(read_conllx(train_loc))
<ide> train_sents = PseudoProjectivity.preprocess_training_data(train_sents)
<ide>
<ide> def main(train_loc, dev_loc, model_dir, tag_map_loc):
<ide> model_dir = pathlib.Path(model_dir)
<ide> if not (model_dir / 'deps').exists():
<ide> (model_dir / 'deps').mkdir()
<del> with (model_dir / 'deps' / 'config.json').open('w') as file_:
<del> json.dump({'pseudoprojective': True, 'labels': actions, 'features': features}, file_)
<del>
<add> with (model_dir / 'deps' / 'config.json').open('wb') as file_:
<add> file_.write(
<add> json.dumps(
<add> {'pseudoprojective': True, 'labels': actions, 'features': features}).encode('utf8'))
<ide> vocab = Vocab(lex_attr_getters=Language.Defaults.lex_attr_getters, tag_map=tag_map)
<ide> # Populate vocab
<ide> for _, doc_sents in train_sents:
<ide> def main(train_loc, dev_loc, model_dir, tag_map_loc):
<ide> _ = vocab[dep]
<ide> for tag in tags:
<ide> _ = vocab[tag]
<del> for tag in tags:
<del> assert tag in tag_map, repr(tag)
<add> if tag_map:
<add> for tag in tags:
<add> assert tag in tag_map, repr(tag)
<ide> tagger = Tagger(vocab, tag_map=tag_map)
<ide> parser = DependencyParser(vocab, actions=actions, features=features)
<ide> | 1 |
Javascript | Javascript | increase coverage of console | 74bd3144d6b0745e0b1a8201d820da9e685e232b | <ide><path>test/parallel/test-console-instance.js
<ide> out.write = common.mustCall((d) => {
<ide> assert.doesNotThrow(() => {
<ide> Console(out, err);
<ide> });
<add>
<add>// Instance that does not ignore the stream errors.
<add>const c2 = new Console(out, err, false);
<add>
<add>out.write = () => { throw new Error('out'); };
<add>err.write = () => { throw new Error('err'); };
<add>
<add>assert.throws(() => c2.log('foo'), /^Error: out$/);
<add>assert.throws(() => c2.warn('foo'), /^Error: err$/);
<add>assert.throws(() => c2.dir('foo'), /^Error: out$/); | 1 |
Javascript | Javascript | remove unnecessary variables | d28159f0fc488dd58ae64a03a91480870eed5390 | <ide><path>lib/net.js
<ide> exports.createServer = function(options, connectionListener) {
<ide> // connect(path, [cb]);
<ide> //
<ide> exports.connect = exports.createConnection = function() {
<del> const argsLen = arguments.length;
<del> var args = new Array(argsLen);
<del> for (var i = 0; i < argsLen; i++)
<add> var args = new Array(arguments.length);
<add> for (var i = 0; i < arguments.length; i++)
<ide> args[i] = arguments[i];
<ide> args = normalizeConnectArgs(args);
<ide> debug('createConnection', args);
<ide> Socket.prototype.connect = function(options, cb) {
<ide> // Old API:
<ide> // connect(port, [host], [cb])
<ide> // connect(path, [cb]);
<del> const argsLen = arguments.length;
<del> var args = new Array(argsLen);
<del> for (var i = 0; i < argsLen; i++)
<add> var args = new Array(arguments.length);
<add> for (var i = 0; i < arguments.length; i++)
<ide> args[i] = arguments[i];
<ide> args = normalizeConnectArgs(args);
<ide> return Socket.prototype.connect.apply(this, args); | 1 |
Go | Go | fix merge issue and gofmt | 89fb51f6063467124c8883399fc293faae8d8ef5 | <ide><path>network.go
<ide> import (
<ide> "errors"
<ide> "fmt"
<ide> "github.com/dotcloud/docker/iptables"
<del> "github.com/dotcloud/docker/proxy"
<ide> "github.com/dotcloud/docker/netlink"
<add> "github.com/dotcloud/docker/proxy"
<ide> "github.com/dotcloud/docker/utils"
<ide> "log"
<ide> "net"
<ide><path>runtime_test.go
<ide> func init() {
<ide> startFds, startGoroutines = utils.GetTotalUsedFds(), runtime.NumGoroutine()
<ide> }
<ide>
<del>
<ide> func setupBaseImage() {
<ide> config := &DaemonConfig{
<ide> GraphPath: unitTestStoreBase,
<ide> func setupBaseImage() {
<ide> }
<ide> }
<ide>
<del>
<ide> func spawnGlobalDaemon() {
<ide> if globalRuntime != nil {
<ide> utils.Debugf("Global runtime already exists. Skipping.")
<ide><path>server.go
<ide> func (srv *Server) ContainerStart(name string, hostConfig *HostConfig) error {
<ide> }
<ide>
<ide> childName := parts["name"]
<add> if childName[0] != '/' {
<add> childName = "/" + childName
<add> }
<ide> if err := runtime.Link(fmt.Sprintf("/%s", container.ID), childName, parts["alias"]); err != nil {
<ide> return err
<ide> }
<ide><path>state.go
<ide> import (
<ide>
<ide> type State struct {
<ide> sync.Mutex
<del> Running bool
<del> Pid int
<del> ExitCode int
<del> StartedAt time.Time
<add> Running bool
<add> Pid int
<add> ExitCode int
<add> StartedAt time.Time
<ide> FinishedAt time.Time
<del> Ghost bool
<add> Ghost bool
<ide> }
<ide>
<ide> // String returns a human-readable description of the state
<ide><path>utils_test.go
<ide> package docker
<ide>
<ide> import (
<del> "github.com/dotcloud/docker/utils"
<ide> "fmt"
<add> "github.com/dotcloud/docker/utils"
<ide> "io"
<ide> "io/ioutil"
<ide> "os"
<ide> "path"
<add> "runtime"
<ide> "strings"
<ide> "testing"
<del> "runtime"
<ide> )
<ide>
<ide> // This file contains utility functions for docker's unit test suite.
<ide> func mkRuntime(f Fataler) *Runtime {
<ide> pc, _, _, _ := runtime.Caller(1)
<ide> callerLongName := runtime.FuncForPC(pc).Name()
<ide> parts := strings.Split(callerLongName, ".")
<del> callerShortName := parts[len(parts) - 1]
<add> callerShortName := parts[len(parts)-1]
<ide> if globalTestID == "" {
<ide> globalTestID = GenerateID()[:4]
<ide> } | 5 |
Javascript | Javascript | convert existing buffer when calling .setencoding | afb84744c6071e77f0738a7ff1a52aaa81db8b77 | <ide><path>lib/_stream_readable.js
<ide> Readable.prototype.isPaused = function() {
<ide> Readable.prototype.setEncoding = function(enc) {
<ide> if (!StringDecoder)
<ide> StringDecoder = require('string_decoder').StringDecoder;
<del> this._readableState.decoder = new StringDecoder(enc);
<add> const decoder = new StringDecoder(enc);
<add> this._readableState.decoder = decoder;
<ide> // If setEncoding(null), decoder.encoding equals utf8
<ide> this._readableState.encoding = this._readableState.decoder.encoding;
<add>
<add> // Iterate over current buffer to convert already stored Buffers:
<add> let p = this._readableState.buffer.head;
<add> let content = '';
<add> while (p !== null) {
<add> content += decoder.write(p.data);
<add> p = p.next;
<add> }
<add> this._readableState.buffer.clear();
<add> if (content !== '')
<add> this._readableState.buffer.push(content);
<add> this._readableState.length = content.length;
<ide> return this;
<ide> };
<ide>
<ide><path>test/parallel/test-stream-readable-setEncoding-existing-buffers.js
<add>'use strict';
<add>require('../common');
<add>const { Readable } = require('stream');
<add>const assert = require('assert');
<add>
<add>{
<add> // Call .setEncoding() while there are bytes already in the buffer.
<add> const r = new Readable({ read() {} });
<add>
<add> r.push(Buffer.from('a'));
<add> r.push(Buffer.from('b'));
<add>
<add> r.setEncoding('utf8');
<add> const chunks = [];
<add> r.on('data', (chunk) => chunks.push(chunk));
<add>
<add> process.nextTick(() => {
<add> assert.deepStrictEqual(chunks, ['ab']);
<add> });
<add>}
<add>
<add>{
<add> // Call .setEncoding() while the buffer contains a complete,
<add> // but chunked character.
<add> const r = new Readable({ read() {} });
<add>
<add> r.push(Buffer.from([0xf0]));
<add> r.push(Buffer.from([0x9f]));
<add> r.push(Buffer.from([0x8e]));
<add> r.push(Buffer.from([0x89]));
<add>
<add> r.setEncoding('utf8');
<add> const chunks = [];
<add> r.on('data', (chunk) => chunks.push(chunk));
<add>
<add> process.nextTick(() => {
<add> assert.deepStrictEqual(chunks, ['🎉']);
<add> });
<add>}
<add>
<add>{
<add> // Call .setEncoding() while the buffer contains an incomplete character,
<add> // and finish the character later.
<add> const r = new Readable({ read() {} });
<add>
<add> r.push(Buffer.from([0xf0]));
<add> r.push(Buffer.from([0x9f]));
<add>
<add> r.setEncoding('utf8');
<add>
<add> r.push(Buffer.from([0x8e]));
<add> r.push(Buffer.from([0x89]));
<add>
<add> const chunks = [];
<add> r.on('data', (chunk) => chunks.push(chunk));
<add>
<add> process.nextTick(() => {
<add> assert.deepStrictEqual(chunks, ['🎉']);
<add> });
<add>} | 2 |
Python | Python | fix broken test for models with batchnorm | 1a7ef3349fd9acfc8ee7bc718f9864ac4ea9d064 | <ide><path>tests/test_modeling_tf_common.py
<ide> def test_keras_fit(self):
<ide> else:
<ide> metrics = []
<ide>
<add> model(model.dummy_inputs) # Build the model so we can get some constant weights
<add> model_weights = model.get_weights()
<add>
<add> # Run eagerly to save some expensive compilation times
<ide> model.compile(optimizer=tf.keras.optimizers.SGD(0.0), run_eagerly=True, metrics=metrics)
<ide> # Make sure the model fits without crashing regardless of where we pass the labels
<ide> history1 = model.fit(
<ide> def test_keras_fit(self):
<ide> )
<ide> val_loss1 = history1.history["val_loss"][0]
<ide> accuracy1 = {key: val[0] for key, val in history1.history.items() if key.endswith("accuracy")}
<add>
<add> # We reinitialize the model here even though our learning rate was zero
<add> # because BatchNorm updates weights by means other than gradient descent.
<add> model.set_weights(model_weights)
<add>
<ide> history2 = model.fit(
<ide> inputs_minus_labels,
<ide> labels,
<ide> def test_keras_fit(self):
<ide> shuffle=False,
<ide> )
<ide> val_loss2 = history2.history["val_loss"][0]
<del> accuracy2 = {key: val[0] for key, val in history1.history.items() if key.endswith("accuracy")}
<add> accuracy2 = {key: val[0] for key, val in history2.history.items() if key.endswith("accuracy")}
<ide> self.assertTrue(np.allclose(val_loss1, val_loss2, atol=1e-2, rtol=1e-3))
<ide> self.assertEqual(history1.history.keys(), history2.history.keys())
<ide> for key in history1.history.keys():
<ide> def test_keras_fit(self):
<ide> dataset = tf.data.Dataset.from_tensor_slices(prepared_for_class)
<ide> # Pass in all samples as a batch to match other `fit` calls
<ide> dataset = dataset.batch(len(dataset))
<add>
<add> # Reinitialize to fix batchnorm again
<add> model.set_weights(model_weights)
<add>
<ide> history3 = model.fit(
<ide> dataset,
<ide> validation_data=dataset, | 1 |
PHP | PHP | add tests for remaining consoleio methods | fa1f4f4b5a4a54ea243803f64e56dc8c8d2d81e4 | <ide><path>src/Console/ConsoleIo.php
<ide> public function __construct(ConsoleOutput $out = null, ConsoleOutput $err = null
<ide> $this->_in = $in ? $in : new ConsoleInput('php://stdin');
<ide> }
<ide>
<add>/**
<add> * Get/set the current output level.
<add> *
<add> * @param null|int $level The current output level.
<add> * @return int The current output level.
<add> */
<add> public function level($level = null) {
<add> if ($level !== null) {
<add> $this->_level = $level;
<add> }
<add> return $this->_level;
<add> }
<add>
<ide> /**
<ide> * Output only at the verbose level.
<ide> *
<ide> protected function _getInput($prompt, $options, $default) {
<ide> $this->_out->write('<question>' . $prompt . "</question>$optionsText\n$defaultText> ", 0);
<ide> $result = $this->_in->read();
<ide>
<del> if ($result === false) {
<del> return false;
<del> }
<ide> $result = trim($result);
<del>
<ide> if ($default !== null && ($result === '' || $result === null)) {
<ide> return $default;
<ide> }
<ide><path>tests/TestCase/Console/ConsoleIoTest.php
<ide> public function testAskDefaultValue() {
<ide> $this->assertEquals('n', $result);
<ide> }
<ide>
<add>/**
<add> * testOut method
<add> *
<add> * @return void
<add> */
<add> public function testOut() {
<add> $this->out->expects($this->at(0))
<add> ->method('write')
<add> ->with("Just a test", 1);
<add>
<add> $this->out->expects($this->at(1))
<add> ->method('write')
<add> ->with(array('Just', 'a', 'test'), 1);
<add>
<add> $this->out->expects($this->at(2))
<add> ->method('write')
<add> ->with(array('Just', 'a', 'test'), 2);
<add>
<add> $this->out->expects($this->at(3))
<add> ->method('write')
<add> ->with('', 1);
<add>
<add> $this->io->out('Just a test');
<add> $this->io->out(array('Just', 'a', 'test'));
<add> $this->io->out(array('Just', 'a', 'test'), 2);
<add> $this->io->out();
<add> }
<add>
<add>/**
<add> * test that verbose and quiet output levels work
<add> *
<add> * @return void
<add> */
<add> public function testVerboseOut() {
<add> $this->out->expects($this->at(0))
<add> ->method('write')
<add> ->with('Verbose', 1);
<add> $this->out->expects($this->at(1))
<add> ->method('write')
<add> ->with('Normal', 1);
<add> $this->out->expects($this->at(2))
<add> ->method('write')
<add> ->with('Quiet', 1);
<add>
<add> $this->io->level(ConsoleIo::VERBOSE);
<add>
<add> $this->io->out('Verbose', 1, ConsoleIo::VERBOSE);
<add> $this->io->out('Normal', 1, ConsoleIo::NORMAL);
<add> $this->io->out('Quiet', 1, ConsoleIo::QUIET);
<add> }
<add>
<add>/**
<add> * test that verbose and quiet output levels work
<add> *
<add> * @return void
<add> */
<add> public function testVerboseOutput() {
<add> $this->out->expects($this->at(0))
<add> ->method('write')
<add> ->with('Verbose', 1);
<add> $this->out->expects($this->at(1))
<add> ->method('write')
<add> ->with('Normal', 1);
<add> $this->out->expects($this->at(2))
<add> ->method('write')
<add> ->with('Quiet', 1);
<add>
<add> $this->io->level(ConsoleIo::VERBOSE);
<add>
<add> $this->io->verbose('Verbose');
<add> $this->io->out('Normal');
<add> $this->io->quiet('Quiet');
<add> }
<add>
<add>
<add>/**
<add> * test that verbose and quiet output levels work
<add> *
<add> * @return void
<add> */
<add> public function testQuietOutput() {
<add> $this->out->expects($this->exactly(2))
<add> ->method('write')
<add> ->with('Quiet', 1);
<add>
<add> $this->io->level(ConsoleIo::QUIET);
<add>
<add> $this->io->out('Verbose', 1, ConsoleIo::VERBOSE);
<add> $this->io->out('Normal', 1, ConsoleIo::NORMAL);
<add> $this->io->out('Quiet', 1, ConsoleIo::QUIET);
<add> $this->io->verbose('Verbose');
<add> $this->io->quiet('Quiet');
<add> }
<add>
<add>/**
<add> * testErr method
<add> *
<add> * @return void
<add> */
<add> public function testErr() {
<add> $this->err->expects($this->at(0))
<add> ->method('write')
<add> ->with("Just a test", 1);
<add>
<add> $this->err->expects($this->at(1))
<add> ->method('write')
<add> ->with(array('Just', 'a', 'test'), 1);
<add>
<add> $this->err->expects($this->at(2))
<add> ->method('write')
<add> ->with(array('Just', 'a', 'test'), 2);
<add>
<add> $this->err->expects($this->at(3))
<add> ->method('write')
<add> ->with('', 1);
<add>
<add> $this->io->err('Just a test');
<add> $this->io->err(array('Just', 'a', 'test'));
<add> $this->io->err(array('Just', 'a', 'test'), 2);
<add> $this->io->err();
<add> }
<add>
<add>/**
<add> * testNl
<add> *
<add> * @return void
<add> */
<add> public function testNl() {
<add> $newLine = "\n";
<add> if (DS === '\\') {
<add> $newLine = "\r\n";
<add> }
<add> $this->assertEquals($this->io->nl(), $newLine);
<add> $this->assertEquals($this->io->nl(true), $newLine);
<add> $this->assertEquals("", $this->io->nl(false));
<add> $this->assertEquals($this->io->nl(2), $newLine . $newLine);
<add> $this->assertEquals($this->io->nl(1), $newLine);
<add> }
<add>
<add>/**
<add> * testHr
<add> *
<add> * @return void
<add> */
<add> public function testHr() {
<add> $bar = '---------------------------------------------------------------';
<add>
<add> $this->out->expects($this->at(0))->method('write')->with('', 0);
<add> $this->out->expects($this->at(1))->method('write')->with($bar, 1);
<add> $this->out->expects($this->at(2))->method('write')->with('', 0);
<add>
<add> $this->out->expects($this->at(3))->method('write')->with("", true);
<add> $this->out->expects($this->at(4))->method('write')->with($bar, 1);
<add> $this->out->expects($this->at(5))->method('write')->with("", true);
<add>
<add> $this->out->expects($this->at(6))->method('write')->with("", 2);
<add> $this->out->expects($this->at(7))->method('write')->with($bar, 1);
<add> $this->out->expects($this->at(8))->method('write')->with("", 2);
<add>
<add> $this->io->hr();
<add> $this->io->hr(true);
<add> $this->io->hr(2);
<add> }
<add>
<ide> } | 2 |
Text | Text | update new version identifier [ci skip] | 493be8e9db85d203fbfaa6365c448dc8fba12b8f | <ide><path>website/docs/api/entityruler.md
<ide> All labels present in the match patterns.
<ide> | ----------- | ----- | ------------------ |
<ide> | **RETURNS** | tuple | The string labels. |
<ide>
<del>## EntityRuler.ent_ids {#labels tag="property" new="2.2"}
<add>## EntityRuler.ent_ids {#labels tag="property" new="2.2.2"}
<ide>
<ide> All entity ids present in the match patterns `id` properties.
<ide>
<ide><path>website/docs/usage/rule-based-matching.md
<ide> doc = nlp("Apple is opening its first big office in San Francisco.")
<ide> print([(ent.text, ent.label_) for ent in doc.ents])
<ide> ```
<ide>
<del>### Adding IDs to patterns {#entityruler-ent-ids new="2.2"}
<add>### Adding IDs to patterns {#entityruler-ent-ids new="2.2.2"}
<ide>
<ide> The [`EntityRuler`](/api/entityruler) can also accept an `id` attribute for each
<ide> pattern. Using the `id` attribute allows multiple patterns to be associated with | 2 |
PHP | PHP | use proper assertions | 7b1a0732e8e3b70c811f31ce1ed85166e7a0b772 | <ide><path>tests/Database/DatabaseEloquentModelTest.php
<ide> public function testToArray()
<ide> $model->setRelation('multi', new Illuminate\Database\Eloquent\Collection);
<ide> $array = $model->toArray();
<ide>
<del> $this->assertTrue(is_array($array));
<add> $this->assertInternalType('array', $array);
<ide> $this->assertEquals('foo', $array['name']);
<ide> $this->assertEquals('baz', $array['names'][0]['bar']);
<ide> $this->assertEquals('boom', $array['names'][1]['bam']);
<ide><path>tests/Filesystem/FilesystemTest.php
<ide> public function testPutStoresFiles()
<ide> {
<ide> $files = new Filesystem;
<ide> $files->put(__DIR__.'/file.txt', 'Hello World');
<del> $this->assertEquals('Hello World', file_get_contents(__DIR__.'/file.txt'));
<add> $this->assertStringEqualsFile(__DIR__.'/file.txt', 'Hello World');
<ide> @unlink(__DIR__.'/file.txt');
<ide> }
<ide>
<ide> public function testPrependExistingFiles()
<ide> $files = new Filesystem;
<ide> $files->put(__DIR__.'/file.txt', 'World');
<ide> $files->prepend(__DIR__.'/file.txt', 'Hello ');
<del> $this->assertEquals('Hello World', file_get_contents(__DIR__.'/file.txt'));
<add> $this->assertStringEqualsFile(__DIR__.'/file.txt', 'Hello World');
<ide> @unlink(__DIR__.'/file.txt');
<ide> }
<ide>
<ide> public function testPrependNewFiles()
<ide> {
<ide> $files = new Filesystem;
<ide> $files->prepend(__DIR__.'/file.txt', 'Hello World');
<del> $this->assertEquals('Hello World', file_get_contents(__DIR__.'/file.txt'));
<add> $this->assertStringEqualsFile(__DIR__.'/file.txt', 'Hello World');
<ide> @unlink(__DIR__.'/file.txt');
<ide> }
<ide>
<ide><path>tests/Queue/QueueDatabaseQueueTest.php
<ide> public function testPushProperlyPushesJobOntoDatabase()
<ide> $this->assertEquals(0, $array['attempts']);
<ide> $this->assertEquals(0, $array['reserved']);
<ide> $this->assertNull($array['reserved_at']);
<del> $this->assertTrue(is_int($array['available_at']));
<add> $this->assertInternalType('int', $array['available_at']);
<ide> });
<ide>
<ide> $queue->push('foo', ['data']);
<ide> public function testDelayedPushProperlyPushesJobOntoDatabase()
<ide> $this->assertEquals(0, $array['attempts']);
<ide> $this->assertEquals(0, $array['reserved']);
<ide> $this->assertNull($array['reserved_at']);
<del> $this->assertTrue(is_int($array['available_at']));
<add> $this->assertInternalType('int', $array['available_at']);
<ide> });
<ide>
<ide> $queue->later(10, 'foo', ['data']);
<ide><path>tests/Support/SupportCollectionTest.php
<ide> public function testToJsonEncodesTheToArrayResult()
<ide> $c->expects($this->once())->method('toArray')->will($this->returnValue('foo'));
<ide> $results = $c->toJson();
<ide>
<del> $this->assertEquals(json_encode('foo'), $results);
<add> $this->assertJsonStringEqualsJsonString(json_encode('foo'), $results);
<ide> }
<ide>
<ide> public function testCastingToStringJsonEncodesTheToArrayResult()
<ide> {
<ide> $c = $this->getMock('Illuminate\Database\Eloquent\Collection', ['toArray']);
<ide> $c->expects($this->once())->method('toArray')->will($this->returnValue('foo'));
<ide>
<del> $this->assertEquals(json_encode('foo'), (string) $c);
<add> $this->assertJsonStringEqualsJsonString(json_encode('foo'), (string) $c);
<ide> }
<ide>
<ide> public function testOffsetAccess()
<ide><path>tests/Support/SupportFluentTest.php
<ide> public function testToJsonEncodesTheToArrayResult()
<ide> $fluent->expects($this->once())->method('toArray')->will($this->returnValue('foo'));
<ide> $results = $fluent->toJson();
<ide>
<del> $this->assertEquals(json_encode('foo'), $results);
<add> $this->assertJsonStringEqualsJsonString(json_encode('foo'), $results);
<ide> }
<ide> }
<ide>
<ide><path>tests/Support/SupportHelpersTest.php
<ide> public function testStrIs()
<ide> public function testStrRandom()
<ide> {
<ide> $result = Str::random(20);
<del> $this->assertTrue(is_string($result));
<add> $this->assertInternalType('string', $result);
<ide> $this->assertEquals(20, strlen($result));
<ide> }
<ide> | 6 |
PHP | PHP | remove unused variable | e9e797c8fae530980695958ac726cbf5fba88da3 | <ide><path>src/Cache/CacheEngine.php
<ide> public function setMultiple($values, $ttl = null): bool
<ide> $this->setConfig('duration', $ttl);
<ide> }
<ide> try {
<del> $return = [];
<ide> foreach ($values as $key => $value) {
<ide> $success = $this->set($key, $value);
<ide> if ($success === false) { | 1 |
Javascript | Javascript | remove unused variable | 7c38b292f8ad7057b62f7ee4a2c651d0213d5e5f | <ide><path>src/ng/compile.js
<ide> function $CompileProvider($provide) {
<ide> j = 0, jj = nAttrs && nAttrs.length; j < jj; j++) {
<ide> var attrStartName = false;
<ide> var attrEndName = false;
<del> var index;
<ide>
<ide> attr = nAttrs[j];
<ide> if (!msie || msie >= 8 || attr.specified) { | 1 |
PHP | PHP | remove password rule | d6fd27cdb37b41f58135a8148e37e523f5a7c632 | <ide><path>src/Illuminate/Validation/Concerns/ValidatesAttributes.php
<ide> public function validateNumeric($attribute, $value)
<ide> return is_numeric($value);
<ide> }
<ide>
<del> /**
<del> * Validate that the password of the currently authenticated user matches the given value.
<del> *
<del> * @param string $attribute
<del> * @param mixed $value
<del> * @param array $parameters
<del> * @return bool
<del> */
<del> protected function validatePassword($attribute, $value, $parameters)
<del> {
<del> return $this->validateCurrentPassword($attribute, $value, $parameters);
<del> }
<del>
<ide> /**
<ide> * Validate that an attribute exists even if not filled.
<ide> *
<ide><path>tests/Validation/ValidationValidatorTest.php
<ide> public function testValidationStopsAtFailedPresenceCheck()
<ide> $this->assertEquals(['validation.present'], $v->errors()->get('name'));
<ide> }
<ide>
<del> public function testValidatePassword()
<del> {
<del> // Fails when user is not logged in.
<del> $auth = m::mock(Guard::class);
<del> $auth->shouldReceive('guard')->andReturn($auth);
<del> $auth->shouldReceive('guest')->andReturn(true);
<del>
<del> $hasher = m::mock(Hasher::class);
<del>
<del> $container = m::mock(Container::class);
<del> $container->shouldReceive('make')->with('auth')->andReturn($auth);
<del> $container->shouldReceive('make')->with('hash')->andReturn($hasher);
<del>
<del> $trans = $this->getTranslator();
<del> $trans->shouldReceive('get')->andReturnArg(0);
<del>
<del> $v = new Validator($trans, ['password' => 'foo'], ['password' => 'password']);
<del> $v->setContainer($container);
<del>
<del> $this->assertFalse($v->passes());
<del>
<del> // Fails when password is incorrect.
<del> $user = m::mock(Authenticatable::class);
<del> $user->shouldReceive('getAuthPassword');
<del>
<del> $auth = m::mock(Guard::class);
<del> $auth->shouldReceive('guard')->andReturn($auth);
<del> $auth->shouldReceive('guest')->andReturn(false);
<del> $auth->shouldReceive('user')->andReturn($user);
<del>
<del> $hasher = m::mock(Hasher::class);
<del> $hasher->shouldReceive('check')->andReturn(false);
<del>
<del> $container = m::mock(Container::class);
<del> $container->shouldReceive('make')->with('auth')->andReturn($auth);
<del> $container->shouldReceive('make')->with('hash')->andReturn($hasher);
<del>
<del> $trans = $this->getTranslator();
<del> $trans->shouldReceive('get')->andReturnArg(0);
<del>
<del> $v = new Validator($trans, ['password' => 'foo'], ['password' => 'password']);
<del> $v->setContainer($container);
<del>
<del> $this->assertFalse($v->passes());
<del>
<del> // Succeeds when password is correct.
<del> $user = m::mock(Authenticatable::class);
<del> $user->shouldReceive('getAuthPassword');
<del>
<del> $auth = m::mock(Guard::class);
<del> $auth->shouldReceive('guard')->andReturn($auth);
<del> $auth->shouldReceive('guest')->andReturn(false);
<del> $auth->shouldReceive('user')->andReturn($user);
<del>
<del> $hasher = m::mock(Hasher::class);
<del> $hasher->shouldReceive('check')->andReturn(true);
<del>
<del> $container = m::mock(Container::class);
<del> $container->shouldReceive('make')->with('auth')->andReturn($auth);
<del> $container->shouldReceive('make')->with('hash')->andReturn($hasher);
<del>
<del> $trans = $this->getTranslator();
<del> $trans->shouldReceive('get')->andReturnArg(0);
<del>
<del> $v = new Validator($trans, ['password' => 'foo'], ['password' => 'password']);
<del> $v->setContainer($container);
<del>
<del> $this->assertTrue($v->passes());
<del>
<del> // We can use a specific guard.
<del> $user = m::mock(Authenticatable::class);
<del> $user->shouldReceive('getAuthPassword');
<del>
<del> $auth = m::mock(Guard::class);
<del> $auth->shouldReceive('guard')->with('custom')->andReturn($auth);
<del> $auth->shouldReceive('guest')->andReturn(false);
<del> $auth->shouldReceive('user')->andReturn($user);
<del>
<del> $hasher = m::mock(Hasher::class);
<del> $hasher->shouldReceive('check')->andReturn(true);
<del>
<del> $container = m::mock(Container::class);
<del> $container->shouldReceive('make')->with('auth')->andReturn($auth);
<del> $container->shouldReceive('make')->with('hash')->andReturn($hasher);
<del>
<del> $trans = $this->getTranslator();
<del> $trans->shouldReceive('get')->andReturnArg(0);
<del>
<del> $v = new Validator($trans, ['password' => 'foo'], ['password' => 'password:custom']);
<del> $v->setContainer($container);
<del>
<del> $this->assertTrue($v->passes());
<del> }
<del>
<ide> public function testValidatePresent()
<ide> {
<ide> $trans = $this->getIlluminateArrayTranslator(); | 2 |
PHP | PHP | remove dead code | 088833e0754fd837fa031b5e77b0dda04e55d5da | <ide><path>tests/test_app/TestApp/Controller/TestsAppsController.php
<ide>
<ide> class TestsAppsController extends AppController
<ide> {
<del>
<del> public $uses = [];
<del>
<ide> public $components = ['RequestHandler'];
<ide>
<ide> public function index() | 1 |
Javascript | Javascript | change the hostname to an invalid name | 64cf71195c83a2ff06319cf8719ff31058c9ff80 | <ide><path>test/parallel/test-net-better-error-messages-port-hostname.js
<ide> var common = require('../common');
<ide> var net = require('net');
<ide> var assert = require('assert');
<ide>
<del>var c = net.createConnection(common.PORT, 'blah.blah');
<add>var c = net.createConnection(common.PORT, '...');
<ide>
<ide> c.on('connect', assert.fail);
<ide>
<ide> c.on('error', common.mustCall(function(e) {
<ide> assert.equal(e.code, 'ENOTFOUND');
<ide> assert.equal(e.port, common.PORT);
<del> assert.equal(e.hostname, 'blah.blah');
<add> assert.equal(e.hostname, '...');
<ide> })); | 1 |
PHP | PHP | add test for element | 4f12f254f463f77279b2dac7fece6c01b8a109cf | <ide><path>lib/Cake/Test/Case/View/Helper/TimeHelperTest.php
<ide> public function testTimeHelperProxyMethodCalls() {
<ide> 'isToday', 'isThisMonth', 'isThisYear', 'wasYesterday',
<ide> 'isTomorrow', 'toQuarter', 'toUnix', 'toAtom', 'toRSS',
<ide> 'timeAgoInWords', 'wasWithinLast', 'gmt', 'format', 'i18nFormat',
<del> );
<add> );
<ide> $CakeTime = $this->getMock('CakeTimeMock', $methods);
<ide> $Time = new TimeHelperTestObject($this->View, array('engine' => 'CakeTimeMock'));
<ide> $Time->attach($CakeTime);
<ide> public function testEngineOverride() {
<ide> CakePlugin::unload('TestPlugin');
<ide> }
<ide>
<add>/**
<add> * Test element wrapping in timeAgoInWords
<add> *
<add> * @return void
<add> */
<add> public function testTimeAgoInWords() {
<add> $Time = new TimeHelper($this->View);
<add> $timestamp = strtotime('+8 years, +4 months +2 weeks +3 days');
<add> $result = $Time->timeAgoInWords(
<add> $timestamp,
<add> array('end' => '1 years', 'element' => 'span')
<add> );
<add> $expected = array(
<add> 'span' => array(
<add> 'title' => $timestamp,
<add> 'class' => 'time-ago-in-words'
<add> ),
<add> 'on ' . date('j/n/y', $timestamp),
<add> '/span'
<add> );
<add> $this->assertTags($result, $expected);
<add>
<add> $timestamp = strtotime('+2 weeks');
<add> $result = $Time->timeAgoInWords(
<add> $timestamp,
<add> array('end' => '1 years', 'element' => 'div')
<add> );
<add> $expected = array(
<add> 'div' => array(
<add> 'title' => $timestamp,
<add> 'class' => 'time-ago-in-words'
<add> ),
<add> '2 weeks',
<add> '/div'
<add> );
<add> $this->assertTags($result, $expected);
<add> }
<add>
<ide> } | 1 |
Ruby | Ruby | fix outdated comment [ci skip] | e5926a3c44a2666bd09bdae21a273e899074d8f1 | <ide><path>actionview/lib/action_view/template.rb
<ide> def locals_code
<ide> locals = @locals - Module::RUBY_RESERVED_KEYWORDS
<ide> locals = locals.grep(/\A@?(?![A-Z0-9])(?:[[:alnum:]_]|[^\0-\177])+\z/)
<ide>
<del> # Double assign to suppress the dreaded 'assigned but unused variable' warning
<add> # Assign for the same variable is to suppress unused variable warning
<ide> locals.each_with_object("".dup) { |key, code| code << "#{key} = local_assigns[:#{key}]; #{key} = #{key};" }
<ide> end
<ide> | 1 |
PHP | PHP | fix deprecation warnings in controllertest | ca9b8c40a31f15bc7673c1cf70bbe8fb628a9570 | <ide><path>src/Controller/Controller.php
<ide> public function redirect($url, $status = 302)
<ide> {
<ide> $this->autoRender = false;
<ide>
<del> $response = $this->response;
<ide> if ($status) {
<del> $response = $response->withStatus($status);
<add> $this->response = $this->response->withStatus($status);
<ide> }
<ide>
<del> $event = $this->dispatchEvent('Controller.beforeRedirect', [$url, $response]);
<add> $event = $this->dispatchEvent('Controller.beforeRedirect', [$url, $this->response]);
<ide> if ($event->getResult() instanceof Response) {
<ide> return $this->response = $event->getResult();
<ide> }
<ide> if ($event->isStopped()) {
<ide> return null;
<ide> }
<add> $response = $this->response;
<ide>
<ide> if (!$response->getHeaderLine('Location')) {
<ide> $response = $response->withLocation(Router::url($url, true));
<ide><path>tests/TestCase/Controller/ControllerTest.php
<ide> public function testBeforeRenderCallbackChangingViewClass()
<ide> $debug = Configure::read('debug');
<ide> Configure::write('debug', false);
<ide> $result = $Controller->render('index');
<del> $this->assertEquals('{"test":"value"}', $result->body());
<add> $this->assertEquals('{"test":"value"}', (string)$result->getBody());
<ide> Configure::write('debug', $debug);
<ide> }
<ide>
<ide> public function testRedirectByCode($code, $msg)
<ide>
<ide> $response = $Controller->redirect('http://cakephp.org', (int)$code);
<ide> $this->assertSame($response, $Controller->response);
<del> $this->assertEquals($code, $response->statusCode());
<del> $this->assertEquals('http://cakephp.org', $response->header()['Location']);
<add> $this->assertEquals($code, $response->getStatusCode());
<add> $this->assertEquals('http://cakephp.org', $response->getHeaderLine('Location'));
<ide> $this->assertFalse($Controller->autoRender);
<ide> }
<ide>
<ide> public function testRedirectBeforeRedirectModifyingUrl()
<ide> $Controller = new Controller(null, new Response());
<ide>
<ide> $Controller->getEventManager()->on('Controller.beforeRedirect', function (Event $event, $url, Response $response) {
<del> $response = $response->withLocation('https://book.cakephp.org');
<add> $controller = $event->getSubject();
<add> $controller->response = $response->withLocation('https://book.cakephp.org');
<ide> });
<ide>
<ide> $response = $Controller->redirect('http://cakephp.org', 301);
<del> $this->assertEquals('https://book.cakephp.org', $response->header()['Location']);
<del> $this->assertEquals(301, $response->statusCode());
<add> $this->assertEquals('https://book.cakephp.org', $response->getHeaderLine('Location'));
<add> $this->assertEquals(301, $response->getStatusCode());
<ide> }
<ide>
<ide> /**
<ide> public function testRedirectBeforeRedirectModifyingUrl()
<ide> */
<ide> public function testRedirectBeforeRedirectModifyingStatusCode()
<ide> {
<del> $Response = $this->getMockBuilder('Cake\Http\Response')
<del> ->setMethods(['stop'])
<del> ->getMock();
<del> $Controller = new Controller(null, $Response);
<add> $response = new Response();
<add> $Controller = new Controller(null, $response);
<ide>
<ide> $Controller->getEventManager()->on('Controller.beforeRedirect', function (Event $event, $url, Response $response) {
<del> $response = $response->withStatus(302);
<add> $controller = $event->getSubject();
<add> $controller->response = $response->withStatus(302);
<ide> });
<ide>
<ide> $response = $Controller->redirect('http://cakephp.org', 301); | 2 |
Python | Python | improve error message | 1e46a5d3ec173b3f5f264a080bd29901c3839bd7 | <ide><path>keras/layers/core.py
<ide> def set_weights(self, weights):
<ide> str(len(weights)) + ' provided weights)')
<ide> for p, w in zip(params, weights):
<ide> if K.get_value(p).shape != w.shape:
<del> raise Exception('Layer shape %s not compatible with weight shape %s.' % (K.get_value(p).shape, w.shape))
<add> raise Exception('Layer weight shape %s not compatible with provided weight shape %s.' % (K.get_value(p).shape, w.shape))
<ide> K.set_value(p, w)
<ide>
<ide> def get_weights(self): | 1 |
Javascript | Javascript | add --watch mode to "yarn build" | ed36df46c60ea1654d403aaad899cb73f997bdfb | <ide><path>scripts/rollup/build.js
<ide> 'use strict';
<ide>
<del>const {rollup} = require('rollup');
<add>const rollup = require('rollup');
<ide> const babel = require('rollup-plugin-babel');
<ide> const closure = require('./plugins/closure-plugin');
<ide> const commonjs = require('rollup-plugin-commonjs');
<ide> const requestedBundleTypes = argv.type
<ide> : [];
<ide> const requestedBundleNames = parseRequestedNames(argv._, 'lowercase');
<ide> const forcePrettyOutput = argv.pretty;
<add>const isWatchMode = argv.watch;
<ide> const syncFBSourcePath = argv['sync-fbsource'];
<ide> const syncWWWPath = argv['sync-www'];
<ide> const shouldExtractErrors = argv['extract-errors'];
<ide> async function createBundle(bundle, bundleType) {
<ide> bundleType
<ide> );
<ide>
<del> console.log(`${chalk.bgYellow.black(' BUILDING ')} ${logKey}`);
<del> try {
<del> const result = await rollup(rollupConfig);
<del> await result.write(rollupOutputOptions);
<del> } catch (error) {
<del> console.log(`${chalk.bgRed.black(' OH NOES! ')} ${logKey}\n`);
<del> handleRollupError(error);
<del> throw error;
<del> }
<del> for (let i = 0; i < otherOutputPaths.length; i++) {
<del> await asyncCopyTo(mainOutputPath, otherOutputPaths[i]);
<add> if (isWatchMode) {
<add> rollupConfig.output = [rollupOutputOptions];
<add> const watcher = rollup.watch(rollupConfig);
<add> watcher.on('event', async event => {
<add> switch (event.code) {
<add> case 'BUNDLE_START':
<add> console.log(`${chalk.bgYellow.black(' BUILDING ')} ${logKey}`);
<add> break;
<add> case 'BUNDLE_END':
<add> for (let i = 0; i < otherOutputPaths.length; i++) {
<add> await asyncCopyTo(mainOutputPath, otherOutputPaths[i]);
<add> }
<add> console.log(`${chalk.bgGreen.black(' COMPLETE ')} ${logKey}\n`);
<add> break;
<add> case 'ERROR':
<add> case 'FATAL':
<add> console.log(`${chalk.bgRed.black(' OH NOES! ')} ${logKey}\n`);
<add> handleRollupError(event.error);
<add> break;
<add> }
<add> });
<add> } else {
<add> console.log(`${chalk.bgYellow.black(' BUILDING ')} ${logKey}`);
<add> try {
<add> const result = await rollup.rollup(rollupConfig);
<add> await result.write(rollupOutputOptions);
<add> } catch (error) {
<add> console.log(`${chalk.bgRed.black(' OH NOES! ')} ${logKey}\n`);
<add> handleRollupError(error);
<add> throw error;
<add> }
<add> for (let i = 0; i < otherOutputPaths.length; i++) {
<add> await asyncCopyTo(mainOutputPath, otherOutputPaths[i]);
<add> }
<add> console.log(`${chalk.bgGreen.black(' COMPLETE ')} ${logKey}\n`);
<ide> }
<del> console.log(`${chalk.bgGreen.black(' COMPLETE ')} ${logKey}\n`);
<ide> }
<ide>
<ide> function handleRollupWarning(warning) { | 1 |
Ruby | Ruby | escape any spaces in path during strip step | 1ead7d380ab85946f156ef64a0bc6324691b1704 | <ide><path>Library/Homebrew/brew.h.rb
<ide> def strip path, args=''
<ide> puts "strip #{path}" if ARGV.verbose?
<ide> path.chmod 0644 # so we can strip
<ide> unless path.stat.nlink > 1
<del> `strip #{args} #{path}`
<add> system "strip", *(args+path)
<ide> else
<add> path = path.to_s.gsub ' ', '\\ '
<add>
<ide> # strip unlinks the file and recreates it, thus breaking hard links!
<ide> # is this expected behaviour? patch does it too… still, this fixes it
<del> tmp = `/usr/bin/mktemp -t #{path.basename}`.chomp
<add> tmp = `/usr/bin/mktemp -t homebrew_strip`.chomp
<ide> `/usr/bin/strip #{args} -o #{tmp} #{path}`
<ide> `/bin/cat #{tmp} > #{path}`
<ide> File.unlink tmp | 1 |
Java | Java | fix typo in observableretrytest.java | 1d7e8acb3e1fa035a30248a9195a35c7537200b7 | <ide><path>src/test/java/io/reactivex/rxjava3/internal/operators/observable/ObservableRetryTest.java
<ide> public void run() {
<ide> }
<ide> }
<ide>
<del> /** Observer for listener on seperate thread. */
<add> /** Observer for listener on separate thread. */
<ide> static final class AsyncObserver<T> extends DefaultObserver<T> {
<ide>
<ide> protected CountDownLatch latch = new CountDownLatch(1); | 1 |
Python | Python | add additional test case, use better method name | 0616c2e8b99736a63d1bee9c5bb4cbd602373712 | <ide><path>libcloud/test/compute/test_gce.py
<ide> def setUp(self):
<ide> def test_default_scopes(self):
<ide> self.assertIsNone(self.driver.scopes)
<ide>
<del> def test_default_service_account(self):
<add> def test_build_service_account_gce_struct_default_service_account(self):
<ide> result = self.driver._build_service_accounts_gce_list(service_accounts=None)
<ide> self.assertEqual(result, [
<ide> {'email': 'default',
<ide> 'scopes': ['https://www.googleapis.com/auth/devstorage.read_only']
<ide> }
<ide> ])
<ide>
<del> def test_no_service_account(self):
<add> def test_build_service_account_gce_struct_no_service_account(self):
<ide> result = self.driver._build_service_accounts_gce_list(service_accounts=[])
<ide> self.assertEqual(result, [])
<ide>
<add> def test_build_service_account_gce_struct_custom_service_account(self):
<add> data = [
<add> {'email': '1', 'scopes': ['a']},
<add> {'email': '2', 'scopes': ['b']}
<add> ]
<add> expected_result = [
<add> {'email': '1', 'scopes': ['https://www.googleapis.com/auth/a']},
<add> {'email': '2', 'scopes': ['https://www.googleapis.com/auth/b']}
<add> ]
<add>
<add> result = self.driver._build_service_accounts_gce_list(service_accounts=data)
<add> self.assertEqual(result, expected_result)
<add>
<ide> def test_timestamp_to_datetime(self):
<ide> timestamp1 = '2013-06-26T10:05:19.340-07:00'
<ide> datetime1 = datetime.datetime(2013, 6, 26, 17, 5, 19) | 1 |
Text | Text | remove unnecessary comments | 591a534701663af6a79293fb17cbb1ff23a2650d | <ide><path>curriculum/challenges/english/02-javascript-algorithms-and-data-structures/basic-javascript/access-multi-dimensional-arrays-with-indexes.md
<ide> if(typeof myArray !== "undefined"){(function(){return "myData: " + myData + " my
<ide> ## --seed-contents--
<ide>
<ide> ```js
<del>// Setup
<ide> var myArray = [[1,2,3], [4,5,6], [7,8,9], [[10,11,12], 13, 14]];
<ide>
<del>// Only change code below this line
<ide> var myData = myArray[0][0];
<ide> ```
<ide> | 1 |
Ruby | Ruby | create unified test helper | 6d726e1bd6676d97bf41bc800942881cf8c3e78f | <ide><path>Library/Homebrew/gpg.rb
<ide> def self.cleanup_test_processes!
<ide> system gpgconf, "--homedir", "keyrings/live", "--kill",
<ide> "gpg-agent"
<ide> end
<add>
<add> def self.test(path)
<add> create_test_key(path)
<add> begin
<add> yield
<add> ensure
<add> cleanup_test_processes!
<add> end
<add> end
<ide> end | 1 |
Python | Python | update percentile/quantile docs | 7d8a8e746fc841a99f71242f60559b1be2e7340c | <ide><path>numpy/lib/function_base.py
<ide> def percentile(a,
<ide> method : str, optional
<ide> This parameter specifies the method to use for estimating the
<ide> percentile. There are many different methods, some unique to NumPy.
<del> See the notes for explanation. The options aligning with the R types
<del> and the H&F paper are:
<del>
<del> * (H&F 1): 'inverted_cdf'
<del> * (H&F 2): 'averaged_inverted_cdf'
<del> * (H&F 3): 'closest_observation'
<del> * (H&F 4): 'interpolated_inverted_cdf'
<del> * (H&F 5): 'hazen'
<del> * (H&F 6): 'weibull'
<del> * (H&F 7): 'linear' (default)
<del> * (H&F 8): 'median_unbiased'
<del> * (H&F 9): 'normal_unbiased'
<del>
<del> Mainly for compatibility reasons, NumPy also supports the following
<del> options which appear to be unique to NumPy:
<add> See the notes for explanation. The options sorted by their R type
<add> as summarized in the H&F paper [1]_ are:
<add>
<add> 1. 'inverted_cdf'
<add> 2. 'averaged_inverted_cdf'
<add> 3. 'closest_observation'
<add> 4. 'interpolated_inverted_cdf'
<add> 5. 'hazen'
<add> 6. 'weibull'
<add> 7. 'linear' (default)
<add> 8. 'median_unbiased'
<add> 9. 'normal_unbiased'
<add>
<add> The first three methods are discontiuous. NumPy further defines the
<add> following discontinuous variations of the default 'linear' (7.) option:
<ide>
<ide> * 'lower'
<ide> * 'higher',
<ide> def quantile(a,
<ide> method : str, optional
<ide> This parameter specifies the method to use for estimating the
<ide> quantile. There are many different methods, some unique to NumPy.
<del> See the notes for explanation. The options aligning with the R types
<del> and the H&F paper are:
<del>
<del> * (H&F 1): 'inverted_cdf'
<del> * (H&F 2): 'averaged_inverted_cdf'
<del> * (H&F 3): 'closest_observation'
<del> * (H&F 4): 'interpolated_inverted_cdf'
<del> * (H&F 5): 'hazen'
<del> * (H&F 6): 'weibull'
<del> * (H&F 7): 'linear' (default)
<del> * (H&F 8): 'median_unbiased'
<del> * (H&F 9): 'normal_unbiased'
<del>
<del> Mainly for compatibility reasons, NumPy also supports the following
<del> options which appear to be unique to NumPy:
<add> See the notes for explanation. The options sorted by their R type
<add> as summarized in the H&F paper [1]_ are:
<add>
<add> 1. 'inverted_cdf'
<add> 2. 'averaged_inverted_cdf'
<add> 3. 'closest_observation'
<add> 4. 'interpolated_inverted_cdf'
<add> 5. 'hazen'
<add> 6. 'weibull'
<add> 7. 'linear' (default)
<add> 8. 'median_unbiased'
<add> 9. 'normal_unbiased'
<add>
<add> The first three methods are discontiuous. NumPy further defines the
<add> following discontinuous variations of the default 'linear' (7.) option:
<ide>
<ide> * 'lower'
<ide> * 'higher',
<ide><path>numpy/lib/nanfunctions.py
<ide> def nanpercentile(
<ide> method : str, optional
<ide> This parameter specifies the method to use for estimating the
<ide> percentile. There are many different methods, some unique to NumPy.
<del> See the notes for explanation. The options aligning with the R types
<del> and the H&F paper are:
<del>
<del> * (H&F 1): 'inverted_cdf'
<del> * (H&F 2): 'averaged_inverted_cdf'
<del> * (H&F 3): 'closest_observation'
<del> * (H&F 4): 'interpolated_inverted_cdf'
<del> * (H&F 5): 'hazen'
<del> * (H&F 6): 'weibull'
<del> * (H&F 7): 'linear' (default)
<del> * (H&F 8): 'median_unbiased'
<del> * (H&F 9): 'normal_unbiased'
<del>
<del> Mainly for compatibility reasons, NumPy also supports the following
<del> options which appear to be unique to NumPy:
<add> See the notes for explanation. The options sorted by their R type
<add> as summarized in the H&F paper [1]_ are:
<add>
<add> 1. 'inverted_cdf'
<add> 2. 'averaged_inverted_cdf'
<add> 3. 'closest_observation'
<add> 4. 'interpolated_inverted_cdf'
<add> 5. 'hazen'
<add> 6. 'weibull'
<add> 7. 'linear' (default)
<add> 8. 'median_unbiased'
<add> 9. 'normal_unbiased'
<add>
<add> The first three methods are discontiuous. NumPy further defines the
<add> following discontinuous variations of the default 'linear' (7.) option:
<ide>
<ide> * 'lower'
<ide> * 'higher',
<ide> def nanpercentile(
<ide> array([7., 2.])
<ide> >>> assert not np.all(a==b)
<ide>
<add> References
<add> ----------
<add> .. [1] R. J. Hyndman and Y. Fan,
<add> "Sample quantiles in statistical packages,"
<add> The American Statistician, 50(4), pp. 361-365, 1996
<add>
<ide> """
<ide> if interpolation is not None:
<ide> method = function_base._check_interpolation_as_method(
<ide> def nanquantile(
<ide> method : str, optional
<ide> This parameter specifies the method to use for estimating the
<ide> quantile. There are many different methods, some unique to NumPy.
<del> See the notes for explanation. The options aligning with the R types
<del> and the H&F paper are:
<del>
<del> * (H&F 1): 'inverted_cdf'
<del> * (H&F 2): 'averaged_inverted_cdf'
<del> * (H&F 3): 'closest_observation'
<del> * (H&F 4): 'interpolated_inverted_cdf'
<del> * (H&F 5): 'hazen'
<del> * (H&F 6): 'weibull'
<del> * (H&F 7): 'linear' (default)
<del> * (H&F 8): 'median_unbiased'
<del> * (H&F 9): 'normal_unbiased'
<del>
<del> Mainly for compatibility reasons, NumPy also supports the following
<del> options which appear to be unique to NumPy:
<add> See the notes for explanation. The options sorted by their R type
<add> as summarized in the H&F paper [1]_ are:
<add>
<add> 1. 'inverted_cdf'
<add> 2. 'averaged_inverted_cdf'
<add> 3. 'closest_observation'
<add> 4. 'interpolated_inverted_cdf'
<add> 5. 'hazen'
<add> 6. 'weibull'
<add> 7. 'linear' (default)
<add> 8. 'median_unbiased'
<add> 9. 'normal_unbiased'
<add>
<add> The first three methods are discontiuous. NumPy further defines the
<add> following discontinuous variations of the default 'linear' (7.) option:
<ide>
<ide> * 'lower'
<ide> * 'higher',
<ide> def nanquantile(
<ide> array([7., 2.])
<ide> >>> assert not np.all(a==b)
<ide>
<add> References
<add> ----------
<add> .. [1] R. J. Hyndman and Y. Fan,
<add> "Sample quantiles in statistical packages,"
<add> The American Statistician, 50(4), pp. 361-365, 1996
<add>
<ide> """
<ide> if interpolation is not None:
<ide> method = function_base._check_interpolation_as_method( | 2 |
PHP | PHP | take extra junk out of route\parser comments | 89239abd9e1b2168fe2fc69908e89357024eeb8b | <ide><path>system/route/parser.php
<ide> public static function parameters($uri, $route)
<ide> $route_segments = explode('/', $route);
<ide>
<ide> // --------------------------------------------------------------
<del> // Extract all of the parameters out of the URI. Any route
<del> // segment wrapped in parentheses is considered a parameter.
<add> // Any route segment wrapped in parentheses is a parameter.
<ide> // --------------------------------------------------------------
<ide> for ($i = 0; $i < count($route_segments); $i++)
<ide> { | 1 |
Javascript | Javascript | fix bug in todomvc | a32276e400785fdeafdfb2685cd8154aa4a7cc38 | <ide><path>examples/todomvc/js/app.js
<ide> function cx(obj) {
<ide>
<ide> var TodoItem = React.createClass({
<ide> handleSubmit: React.autoBind(function() {
<del> var val = this.refs.editField.getDOMNode().value.trim();
<add> var val = this.state.editText;
<ide> if (val) {
<ide> this.props.onSave(val);
<del> this.refs.editField.getDOMNode().value = '';
<add> this.setState({editField: ''});
<ide> }
<ide> return false;
<ide> }),
<ide> var TodoItem = React.createClass({
<ide> if (event.nativeEvent.keyCode === 27) {
<ide> this.handleSubmit();
<ide> }
<add> this.setState({editText: event.target.value});
<ide> }),
<add> getInitialState: function() {
<add> return {editText: this.props.todo.title};
<add> },
<add> componentWillReceiveProps: function(nextProps) {
<add> if (nextProps.todo.title !== this.props.todo.title) {
<add> this.setState(this.getInitialState());
<add> }
<add> },
<ide> render: function() {
<ide> return (
<ide> <li class={cx({completed: this.props.todo.completed, editing: this.props.editing})}>
<ide> var TodoItem = React.createClass({
<ide> <input
<ide> ref="editField"
<ide> class="edit"
<del> value={this.props.todo.title}
<add> value={this.state.editText}
<ide> onBlur={this.handleSubmit}
<del> onKeyDown={this.handleKey}
<add> onKeyUp={this.handleKey}
<ide> />
<ide> <input type="submit" class="submitButton" />
<ide> </form> | 1 |
Ruby | Ruby | update expat check | 29ac3efe2e5615ed1c89cc51deac0f2c5d55fbfe | <ide><path>Library/Homebrew/cmd/doctor.rb
<ide> def check_for_CLICOLOR_FORCE
<ide>
<ide> def check_for_other_frameworks
<ide> # Other frameworks that are known to cause problems when present
<del> if File.exist? "/Library/Frameworks/expat.framework"
<del> puts <<-EOS.undent
<del> /Library/Frameworks/expat.framework detected
<add> ["/Library/Frameworks/expat.framework", "/Library/Frameworks/libexpat.framework"].each do |f|
<add> if File.exist? f
<add> puts <<-EOS.undent
<add> #{f} detected
<ide>
<del> This will be picked up by Cmake's build system and likey cause the
<del> build to fail, trying to link to a 32-bit version of expat.
<del> You may need to move this file out of the way to compile Cmake.
<add> This will be picked up by Cmake's build system and likey cause the
<add> build to fail, trying to link to a 32-bit version of expat.
<add> You may need to move this file out of the way to compile Cmake.
<ide>
<del> EOS
<add> EOS
<add> end
<ide> end
<ide> end
<ide> | 1 |
Java | Java | trim line from lineinfo only once | c480a99a773d461a6f64dab0dfceeaa6cefa2fb4 | <ide><path>spring-webflux/src/main/java/org/springframework/web/reactive/resource/AppCacheManifestTransformer.java
<ide> */
<ide> public class AppCacheManifestTransformer extends ResourceTransformerSupport {
<ide>
<del> private static final Collection<String> MANIFEST_SECTION_HEADERS =
<del> Arrays.asList("CACHE MANIFEST", "NETWORK:", "FALLBACK:", "CACHE:");
<del>
<ide> private static final String MANIFEST_HEADER = "CACHE MANIFEST";
<ide>
<ide> private static final String CACHE_HEADER = "CACHE:";
<ide>
<add> private static final Collection<String> MANIFEST_SECTION_HEADERS =
<add> Arrays.asList(MANIFEST_HEADER, "NETWORK:", "FALLBACK:", CACHE_HEADER);
<add>
<ide> private static final Charset DEFAULT_CHARSET = StandardCharsets.UTF_8;
<ide>
<ide> private static final Log logger = LogFactory.getLog(AppCacheManifestTransformer.class);
<ide> private static class LineInfo {
<ide>
<ide>
<ide> private static boolean initCacheSectionFlag(String line, @Nullable LineInfo previousLine) {
<del> if (MANIFEST_SECTION_HEADERS.contains(line.trim())) {
<del> return line.trim().equals(CACHE_HEADER);
<add> String trimmedLine = line.trim();
<add> if (MANIFEST_SECTION_HEADERS.contains(trimmedLine)) {
<add> return trimmedLine.equals(CACHE_HEADER);
<ide> }
<ide> else if (previousLine != null) {
<ide> return previousLine.isCacheSection();
<ide><path>spring-webmvc/src/main/java/org/springframework/web/servlet/resource/AppCacheManifestTransformer.java
<ide> */
<ide> public class AppCacheManifestTransformer extends ResourceTransformerSupport {
<ide>
<del> private static final Collection<String> MANIFEST_SECTION_HEADERS =
<del> Arrays.asList("CACHE MANIFEST", "NETWORK:", "FALLBACK:", "CACHE:");
<del>
<ide> private static final String MANIFEST_HEADER = "CACHE MANIFEST";
<ide>
<ide> private static final String CACHE_HEADER = "CACHE:";
<ide>
<add> private static final Collection<String> MANIFEST_SECTION_HEADERS =
<add> Arrays.asList(MANIFEST_HEADER, "NETWORK:", "FALLBACK:", CACHE_HEADER);
<add>
<ide> private static final Charset DEFAULT_CHARSET = StandardCharsets.UTF_8;
<ide>
<ide> private static final Log logger = LogFactory.getLog(AppCacheManifestTransformer.class);
<ide> public LineInfo(String line, @Nullable LineInfo previous) {
<ide> }
<ide>
<ide> private static boolean initCacheSectionFlag(String line, @Nullable LineInfo previousLine) {
<del> if (MANIFEST_SECTION_HEADERS.contains(line.trim())) {
<del> return line.trim().equals(CACHE_HEADER);
<add> String trimmedLine = line.trim();
<add> if (MANIFEST_SECTION_HEADERS.contains(trimmedLine)) {
<add> return trimmedLine.equals(CACHE_HEADER);
<ide> }
<ide> else if (previousLine != null) {
<ide> return previousLine.isCacheSection(); | 2 |
Text | Text | fix broken link in docs | e8a388a60a7975f38accf5210031da399e90093e | <ide><path>docs/Navigation.md
<ide> const styles = StyleSheet.create({
<ide>
<ide> ### Homework
<ide>
<del>You are now an expert navigator. Take a look at [NavigationExperimental in UIExplorer](https://github.com/facebook/react-native/tree/master/Examples/UIExplorer/NavigationExperimental) to learn how to implement other types of navigation hierarchies, such as a tabbed application with multiple navigation stacks.
<add>You are now an expert navigator. Take a look at [NavigationExperimental in UIExplorer](https://github.com/facebook/react-native/tree/master/Examples/UIExplorer/js/NavigationExperimental) to learn how to implement other types of navigation hierarchies, such as a tabbed application with multiple navigation stacks. | 1 |
Ruby | Ruby | use options instead of @options | 17afec0ae3fe9d8895a7e030f6bfb8443ae83be1 | <ide><path>railties/lib/rails/generators/rails/app/app_generator.rb
<ide> def config
<ide> end
<ide>
<ide> def database_yml
<del> template "config/databases/#{@options[:database]}.yml", "config/database.yml"
<add> template "config/databases/#{options[:database]}.yml", "config/database.yml"
<ide> end
<ide>
<ide> def db
<ide> def javascripts
<ide> empty_directory "public/javascripts"
<ide>
<ide> unless options[:skip_javascript]
<del> copy_file "public/javascripts/#{@options[:javascript]}.js"
<del> copy_file "public/javascripts/#{@options[:javascript]}_ujs.js", "public/javascripts/rails.js"
<add> copy_file "public/javascripts/#{options[:javascript]}.js"
<add> copy_file "public/javascripts/#{options[:javascript]}_ujs.js", "public/javascripts/rails.js"
<ide>
<ide> if options[:javascript] == "prototype"
<ide> copy_file "public/javascripts/controls.js" | 1 |
Ruby | Ruby | use | to have more intent revealing code | 2f0acf6a0a82e60d670b90d39d1278c55fdfe766 | <ide><path>activerecord/lib/active_record/associations/association_scope.rb
<ide> def add_constraints(scope)
<ide>
<ide> scope.includes! item.includes_values
<ide> scope.where_values += item.where_values
<del> scope.order_values += (item.order_values - scope.order_values)
<add> scope.order_values |= item.order_values
<ide> end
<ide> end
<ide> | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.