content_type
stringclasses
8 values
main_lang
stringclasses
7 values
message
stringlengths
1
50
sha
stringlengths
40
40
patch
stringlengths
52
962k
file_count
int64
1
300
Text
Text
fix typo in guides/source/engines.md
d2d07b3adea8f443296dfb67c9c568d35d1e13bc
<ide><path>guides/source/engines.md <ide> Now people will only need to go to the root of the engine to see all the posts, <ide> <ide> ### Generating a comments resource <ide> <del>Now that the engine can create new blog posts, it only makes sense to add commenting functionality as well. To do get this, you'll need to generate a comment model, a comment controller and then modify the posts scaffold to display comments and allow people to create new ones. <add>Now that the engine can create new blog posts, it only makes sense to add commenting functionality as well. To do this, you'll need to generate a comment model, a comment controller and then modify the posts scaffold to display comments and allow people to create new ones. <ide> <ide> Run the model generator and tell it to generate a `Comment` model, with the related table having two columns: a `post_id` integer and `text` text column. <ide>
1
Go
Go
fix the storing of setuid bits, etc
5ba24629610c27f08b54ff4c1f000ad6d787c156
<ide><path>archive/archive.go <ide> func addTarFile(path, name string, tw *tar.Writer) error { <ide> } <ide> <ide> func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader) error { <add> // hdr.Mode is in linux format, which we can use for sycalls, <add> // but for os.Foo() calls we need the mode converted to os.FileMode, <add> // so use hdrInfo.Mode() (they differ for e.g. setuid bits) <add> hdrInfo := hdr.FileInfo() <add> <ide> switch hdr.Typeflag { <ide> case tar.TypeDir: <ide> // Create directory unless it exists as a directory already. <ide> // In that case we just want to merge the two <ide> if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { <del> if err := os.Mkdir(path, os.FileMode(hdr.Mode)); err != nil { <add> if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { <ide> return err <ide> } <ide> } <ide> <ide> case tar.TypeReg, tar.TypeRegA: <ide> // Source is regular file <del> file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, os.FileMode(hdr.Mode)) <add> file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) <ide> if err != nil { <ide> return err <ide> } <ide> func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader) e <ide> // There is no LChmod, so ignore mode for symlink. Also, this <ide> // must happen after chown, as that can modify the file mode <ide> if hdr.Typeflag != tar.TypeSymlink { <del> if err := os.Chmod(path, os.FileMode(hdr.Mode&07777)); err != nil { <add> if err := os.Chmod(path, hdrInfo.Mode()); err != nil { <ide> return err <ide> } <ide> }
1
Javascript
Javascript
use countdown in http-response-statuscode
f3f66a8d5a9b67216d19f6307b2a615f60185333
<ide><path>test/parallel/test-http-response-statuscode.js <ide> const common = require('../common'); <ide> const assert = require('assert'); <ide> const http = require('http'); <add>const Countdown = require('../common/countdown'); <ide> <ide> const MAX_REQUESTS = 13; <ide> let reqNum = 0; <ide> const server = http.Server(common.mustCall(function(req, res) { <ide> }, MAX_REQUESTS)); <ide> server.listen(); <ide> <add>const countdown = new Countdown(MAX_REQUESTS, () => server.close()); <add> <ide> server.on('listening', function makeRequest() { <ide> http.get({ <ide> port: this.address().port <ide> }, (res) => { <ide> assert.strictEqual(res.statusCode, 200); <ide> res.on('end', () => { <del> if (++reqNum < MAX_REQUESTS) <add> countdown.dec(); <add> reqNum = MAX_REQUESTS - countdown.remaining; <add> if (countdown.remaining > 0) <ide> makeRequest.call(this); <ide> }); <ide> res.resume();
1
Java
Java
remove unused code
da071c1fceb48ac32ebab105d04a204d1ac8642e
<ide><path>spring-web/src/main/java/org/springframework/web/util/UrlPathHelper.java <ide> public void setDefaultEncoding(String defaultEncoding) { <ide> } <ide> }; <ide> <del> <del> private static class HttpServletMappingHelper { <del> <del> <del> <del> } <ide> }
1
Javascript
Javascript
extend error context to resource and issuer
f2c0e59e06b6aa409e253cdf37ec20327c4d497a
<ide><path>lib/RuleSet.js <ide> RuleSet.normalizeRule = function(rule) { <ide> var newRule = {}; <ide> var useSource; <ide> var resourceSource; <add> var condition; <ide> <ide> if(rule.test || rule.include || rule.exclude) { <ide> checkResourceSource("test + include + exclude"); <del> var condition = { <add> condition = { <ide> test: rule.test, <ide> include: rule.include, <ide> exclude: rule.exclude <ide> }; <ide> try { <ide> newRule.resource = RuleSet.normalizeCondition(condition); <ide> } catch (error) { <del> var conditionAsText = JSON.stringify(condition, function(key, value) { <del> return (value === undefined) ? "undefined" : value; <del> }, 2) <del> var message = error.message + " in " + conditionAsText; <del> throw new Error(message); <add> throw new Error(RuleSet.buildErrorMessage(condition, error)); <ide> } <ide> } <ide> <ide> if(rule.resource) { <ide> checkResourceSource("resource"); <del> newRule.resource = RuleSet.normalizeCondition(rule.resource); <add> try { <add> newRule.resource = RuleSet.normalizeCondition(rule.resource); <add> } catch (error) { <add> throw new Error(RuleSet.buildErrorMessage(rule.resource, error)); <add> } <ide> } <ide> <ide> if(rule.issuer) { <del> newRule.issuer = RuleSet.normalizeCondition(rule.issuer); <add> try { <add> newRule.issuer = RuleSet.normalizeCondition(rule.issuer); <add> } catch (error) { <add> throw new Error(RuleSet.buildErrorMessage(rule.issuer, error)); <add> } <ide> } <ide> <ide> if(rule.loader && rule.loaders) <ide> RuleSet.normalizeRule = function(rule) { <ide> return newRule; <ide> }; <ide> <add>RuleSet.buildErrorMessage = function buildErrorMessage(condition, error) { <add> var conditionAsText = JSON.stringify(condition, function(key, value) { <add> return (value === undefined) ? "undefined" : value; <add> }, 2) <add> var message = error.message + " in " + conditionAsText; <add> return message; <add>}; <add> <ide> RuleSet.normalizeUse = function normalizeUse(use) { <ide> if(Array.isArray(use)) { <ide> return use.map(RuleSet.normalizeUse).reduce(function(arr, items) { <ide><path>test/RuleSet.test.js <ide> describe("RuleSet", function() { <ide> (match(loader, 'style.css')).should.eql(['css']); <ide> }, /No loader specified/) <ide> }); <del> it('should throw with context if exclude array holds an undefined item', function() { <del> should.throws(function() { <del> var loader = new RuleSet([{ <del> test: /\.css$/, <del> loader: 'css', <del> include: [ <del> 'src', <del> ], <del> exclude: [ <del> 'node_modules', <del> undefined, <del> ], <del> }]); <del> (match(loader, 'style.css')).should.eql(['css']); <del> }, function(err) { <add> describe('when exclude array holds an undefined item', function() { <add> function errorHasContext(err) { <ide> if (/Expected condition but got falsy value/.test(err) <ide> && /test/.test(err) <ide> && /include/.test(err) <ide> describe("RuleSet", function() { <ide> && /undefined/.test(err)) { <ide> return true; <ide> } <del> }) <add> } <add> it('should throw with context', function() { <add> should.throws(function() { <add> var loader = new RuleSet([{ <add> test: /\.css$/, <add> loader: 'css', <add> include: [ <add> 'src', <add> ], <add> exclude: [ <add> 'node_modules', <add> undefined, <add> ], <add> }]); <add> (match(loader, 'style.css')).should.eql(['css']); <add> }, errorHasContext) <add> }); <add> it('in resource should throw with context', function() { <add> should.throws(function() { <add> var loader = new RuleSet([{ <add> resource: { <add> test: /\.css$/, <add> include: [ <add> 'src', <add> ], <add> exclude: [ <add> 'node_modules', <add> undefined, <add> ], <add> }, <add> }]); <add> (match(loader, 'style.css')).should.eql(['css']); <add> }, errorHasContext) <add> }); <add> it('in issuer should throw with context', function() { <add> should.throws(function() { <add> var loader = new RuleSet([{ <add> issuer: { <add> test: /\.css$/, <add> include: [ <add> 'src', <add> ], <add> exclude: [ <add> 'node_modules', <add> undefined, <add> ], <add> }, <add> }]); <add> (match(loader, 'style.css')).should.eql(['css']); <add> }, errorHasContext) <add> }); <ide> }); <ide> });
2
Javascript
Javascript
add more functions to the shadernode
31df72c8b528a8ef28c94fa926c843f789f3c0e5
<ide><path>examples/jsm/renderers/nodes/ShaderNode.js <ide> import VarNode from './core/VarNode.js'; <ide> // inputs <ide> import ColorNode from './inputs/ColorNode.js'; <ide> import FloatNode from './inputs/FloatNode.js'; <add>import IntNode from './inputs/IntNode.js'; <ide> import Vector2Node from './inputs/Vector2Node.js'; <ide> import Vector3Node from './inputs/Vector3Node.js'; <ide> import Vector4Node from './inputs/Vector4Node.js'; <ide> export const float = ( val ) => { <ide> <ide> }; <ide> <add>export const int = ( val ) => { <add> <add> return nodeObject( new IntNode( val ).setConst( true ) ); <add> <add>}; <add> <ide> export const color = ( ...params ) => { <ide> <ide> return nodeObject( new ColorNode( new Color( ...params ) ).setConst( true ) ); <ide> export const add = ShaderNodeProxy( OperatorNode, '+' ); <ide> export const sub = ShaderNodeProxy( OperatorNode, '-' ); <ide> export const mul = ShaderNodeProxy( OperatorNode, '*' ); <ide> export const div = ShaderNodeProxy( OperatorNode, '/' ); <add>export const remainder = ShaderNodeProxy( OperatorNode, '%' ); <ide> export const equal = ShaderNodeProxy( OperatorNode, '==' ); <ide> export const assign = ShaderNodeProxy( OperatorNode, '=' ); <add>export const lessThan = ShaderNodeProxy( OperatorNode, '<' ); <ide> export const greaterThan = ShaderNodeProxy( OperatorNode, '>' ); <ide> export const lessThanEqual = ShaderNodeProxy( OperatorNode, '<=' ); <add>export const greaterThanEqual = ShaderNodeProxy( OperatorNode, '>=' ); <ide> export const and = ShaderNodeProxy( OperatorNode, '&&' ); <add>export const or = ShaderNodeProxy( OperatorNode, '||' ); <add>export const xor = ShaderNodeProxy( OperatorNode, '^^' ); <add>export const bitAnd = ShaderNodeProxy( OperatorNode, '&' ); <add>export const bitOr = ShaderNodeProxy( OperatorNode, '|' ); <add>export const bitXor = ShaderNodeProxy( OperatorNode, '^' ); <add>export const shiftLeft = ShaderNodeProxy( OperatorNode, '<<' ); <add>export const shiftRight = ShaderNodeProxy( OperatorNode, '>>' ); <ide> <ide> export const element = ShaderNodeProxy( ArrayElementNode ); <ide> <ide><path>examples/jsm/renderers/nodes/math/OperatorNode.js <ide> class OperatorNode extends TempNode { <ide> <ide> return 'void'; <ide> <del> } else if ( op === '=' ) { <add> } else if ( op === '=' || op === '%' ) { <ide> <ide> return typeA; <ide> <del> } else if ( op === '==' || op === '&&' ) { <add> } else if ( op === '&' || op === '|' || op === '^' || op === '>>' || op === '<<' ) { <add> <add> return 'int'; <add> <add> } else if ( op === '==' || op === '&&' || op === '||' || op === '^^' ) { <ide> <ide> return 'bool'; <ide> <del> } else if ( op === '<=' || op === '>' ) { <add> } else if ( op === '<=' || op === '>=' || op === '<' || op === '>' ) { <ide> <ide> const length = builder.getTypeLength( output ); <ide>
2
PHP
PHP
fix cs error
b1c09ecd57c275d57677a1b6ec07e2595ce6e896
<ide><path>src/Http/Exception/RedirectException.php <ide> */ <ide> class RedirectException extends Exception <ide> { <del> <ide> /** <ide> * Headers to include in the response. <ide> *
1
Javascript
Javascript
use a test case that demonstrates the fix
0941cc9884e0a1a91fd1bc0ba8b4cff271a5eb14
<ide><path>src/renderers/dom/client/__tests__/ReactMount-test.js <ide> describe('ReactMount', function() { <ide> it('should account for escaping on a checksum mismatch', function () { <ide> var div = document.createElement('div'); <ide> var markup = React.renderToString( <del> <div>This markup contains an html entity: &amp; server text</div>); <add> <div>This markup contains an nbsp entity: &nbsp; server text</div>); <ide> div.innerHTML = markup; <ide> <ide> spyOn(console, 'error'); <ide> React.render( <del> <div>This markup contains an html entity: &amp; client text</div>, div); <add> <div>This markup contains an nbsp entity: &nbsp; client text</div>, <add> div <add> ); <ide> expect(console.error.calls.length).toBe(1); <ide> expect(console.error.calls[0].args[0]).toContain( <del> ' (client) html entity: &amp; client text</div>\n' + <del> ' (server) html entity: &amp; server text</div>' <add> ' (client) nbsp entity: &nbsp; client text</div>\n' + <add> ' (server) nbsp entity: &nbsp; server text</div>' <ide> ); <ide> }); <ide>
1
Python
Python
remove leftover doctestplus plugin
af39d67f78ec512d26d69ee32b3eb6966a1f86c8
<ide><path>doc/conftest.py <ide> def add_np(doctest_namespace): <ide> numpy.random.seed(1) <ide> doctest_namespace['np'] = numpy <ide> <del>pytest_plugins = ["pytest_doctestplus"]
1
Javascript
Javascript
fix url query update if searchparams changes
b465cd07fee443ef23b55d05b838379b4069bfa4
<ide><path>lib/internal/url.js <ide> function update(url, params) { <ide> if (!url) <ide> return; <ide> <del> url[context].query = params.toString(); <add> const ctx = url[context]; <add> const serializedParams = params.toString(); <add> if (serializedParams) { <add> ctx.query = serializedParams; <add> ctx.flags |= binding.URL_FLAGS_HAS_QUERY; <add> } else { <add> ctx.query = null; <add> ctx.flags &= ~binding.URL_FLAGS_HAS_QUERY; <add> } <ide> } <ide> <ide> function getSearchParamPairs(target) { <ide><path>test/parallel/test-whatwg-url-searchparams-delete.js <ide> params.append('first', 10); <ide> params.delete('first'); <ide> assert.strictEqual(false, params.has('first'), <ide> 'Search params object has no "first" name'); <add> <add>// https://github.com/nodejs/node/issues/10480 <add>// Emptying searchParams should correctly update url's query <add>{ <add> const url = new URL('http://domain?var=1&var=2&var=3'); <add> for (const param of url.searchParams.keys()) { <add> url.searchParams.delete(param); <add> } <add> assert.strictEqual(url.searchParams.toString(), ''); <add> assert.strictEqual(url.search, ''); <add> assert.strictEqual(url.href, 'http://domain/'); <add>}
2
Ruby
Ruby
add a translation helper
c9ed2c9bd24b9f4cdfcb692151f87ba900469e71
<ide><path>actionpack/lib/action_view/helpers/translation_helper.rb <add>require 'action_view/helpers/tag_helper' <add> <add>module ActionView <add> module Helpers <add> module TranslationHelper <add> def translate(*args) <add> key, locale, options = I18n.send :process_translate_arguments, *args <add> I18n.translate key, locale, options.merge(:raise => true) <add> <add> rescue I18n::MissingTranslationData => e <add> keys = I18n.send :normalize_translation_keys, locale, key, options[:scope] <add> content_tag('span', keys.join(', '), :class => 'translation_missing') <add> end <add> end <add> end <add>end <ide>\ No newline at end of file <ide><path>actionpack/test/template/translation_helper_test.rb <add>require 'abstract_unit' <add> <add>class TranslationHelperTest < Test::Unit::TestCase <add> include ActionView::Helpers::TagHelper <add> include ActionView::Helpers::TranslationHelper <add> <add> attr_reader :request <add> uses_mocha 'translation_helper_test' do <add> def setup <add> end <add> <add> def test_delegates_to_i18n_setting_the_raise_option <add> I18n.expects(:translate).with(:foo, 'en-US', :raise => true) <add> translate :foo, 'en-US' <add> end <add> <add> def test_returns_missing_translation_message_wrapped_into_span <add> expected = '<span class="translation_missing">en-US, foo</span>' <add> assert_equal expected, translate(:foo) <add> end <add> <add> # def test_error_messages_for_given_a_header_message_option_it_does_not_translate_header_message <add> # I18n.expects(:translate).with(:'header_message', :locale => 'en-US', :scope => [:active_record, :error], :count => 1, :object_name => '').never <add> # error_messages_for(:object => @object, :header_message => 'header message', :locale => 'en-US') <add> # end <add> # <add> # def test_error_messages_for_given_no_header_message_option_it_translates_header_message <add> # I18n.expects(:t).with(:'header_message', :locale => 'en-US', :scope => [:active_record, :error], :count => 1, :object_name => '').returns 'header message' <add> # error_messages_for(:object => @object, :locale => 'en-US') <add> # end <add> # <add> # def test_error_messages_for_given_a_message_option_it_does_not_translate_message <add> # I18n.expects(:t).with(:'message', :locale => 'en-US', :scope => [:active_record, :error]).never <add> # error_messages_for(:object => @object, :message => 'message', :locale => 'en-US') <add> # end <add> # <add> # def test_error_messages_for_given_no_message_option_it_translates_message <add> # I18n.expects(:t).with(:'message', :locale => 'en-US', :scope => [:active_record, :error]).returns 'There were problems with the following fields:' <add> # error_messages_for(:object => @object, :locale => 'en-US') <add> # end <add> end <add>end <ide>\ No newline at end of file
2
Ruby
Ruby
remove the formats writer on templates
a213c4829ab0d87b2a7a74df0e35513c6ea6bdea
<ide><path>actionview/lib/action_view/template.rb <ide> def self.finalize_compiled_template_methods=(_) <ide> <ide> extend Template::Handlers <ide> <del> attr_accessor :locals, :formats, :variants, :virtual_path <add> attr_accessor :locals, :variants, :virtual_path <ide> <ide> attr_reader :source, :identifier, :handler, :original_encoding, :updated_at <ide> <del> attr_reader :variable <add> attr_reader :variable, :formats <ide> <ide> def initialize(source, identifier, handler, format: nil, **details) <ide> unless format <ide> def initialize(source, identifier, handler, format: nil, **details) <ide> @compile_mutex = Mutex.new <ide> end <ide> <add> def formats= <add> end <add> deprecate :formats= <add> <ide> # Returns whether the underlying handler supports streaming. If so, <ide> # a streaming buffer *may* be passed when it starts rendering. <ide> def supports_streaming?
1
Ruby
Ruby
use homebrew_visual as pager if set
b31b14f7869c921f95f46d563ae13034113b5fe6
<ide><path>Library/Homebrew/cmd/cat.rb <ide> def cat <ide> raise "`brew cat` doesn't support multiple arguments" if args.remaining.size > 1 <ide> <ide> cd HOMEBREW_REPOSITORY <del> safe_system "cat", formulae.first.path, *Homebrew.args.passthrough <add> pager = ENV["HOMEBREW_VISUAL"] || "cat" <add> safe_system pager, formulae.first.path, *Homebrew.args.passthrough <ide> end <ide> end
1
Ruby
Ruby
simplify conditions for superenv activation
722a5af4ebf68adb9fc3d2e828d054e228dcf5f0
<ide><path>Library/Homebrew/extend/ENV.rb <ide> require 'extend/ENV/super' <ide> <ide> def superenv? <del> return false if MacOS::Xcode.without_clt? && MacOS.sdk_path.nil? <del> return false unless Superenv.bin && Superenv.bin.directory? <del> return false if ARGV.env == "std" <del> true <add> Superenv.bin && Superenv.bin.directory? && ARGV.env != "std" <ide> end <ide> <ide> module EnvActivation
1
Ruby
Ruby
add support for external ocaml deps via opam
6672ef5f9f4c89353ec5ae8031bb826dcf486b36
<ide><path>Library/Homebrew/dependencies.rb <ide> class DependencyCollector <ide> # Define the languages that we can handle as external dependencies. <ide> LANGUAGE_MODULES = [ <del> :chicken, :jruby, :lua, :node, :perl, :python, :rbx, :ruby <add> :chicken, :jruby, :lua, :node, :ocaml, :perl, :python, :rbx, :ruby <ide> ].freeze <ide> <ide> attr_reader :deps, :requirements <ide><path>Library/Homebrew/requirements.rb <ide> def the_test <ide> when :jruby then %W{/usr/bin/env jruby -rubygems -e require\ '#{@import_name}'} <ide> when :lua then %W{/usr/bin/env luarocks show #{@import_name}} <ide> when :node then %W{/usr/bin/env node -e require('#{@import_name}');} <add> when :ocaml then %W{/usr/bin/env opam list #{@import_name} | grep #{@import_name}} <ide> when :perl then %W{/usr/bin/env perl -e use\ #{@import_name}} <ide> when :python then %W{/usr/bin/env python -c import\ #{@import_name}} <ide> when :ruby then %W{/usr/bin/env ruby -rubygems -e require\ '#{@import_name}'} <ide> def command_line <ide> when :jruby then "jruby -S gem install" <ide> when :lua then "luarocks install" <ide> when :node then "npm install" <add> when :ocaml then "opam install" <ide> when :perl then "cpan -i" <ide> when :python then "pip install" <ide> when :rbx then "rbx gem install"
2
Javascript
Javascript
rebuild distribution files
b061991389ffd7cb2b70ca650c2ed46728fde360
<ide><path>dist/immutable.js <ide> var ITERATOR_SYMBOL = REAL_ITERATOR_SYMBOL || FAUX_ITERATOR_SYMBOL; <ide> <ide> <del> function Iterator(next) { <add> function src_Iterator__Iterator(next) { <ide> this.next = next; <ide> } <ide> <del> Iterator.prototype.toString = function() { <add> src_Iterator__Iterator.prototype.toString = function() { <ide> return '[Iterator]'; <ide> }; <ide> <ide> <del> Iterator.KEYS = ITERATE_KEYS; <del> Iterator.VALUES = ITERATE_VALUES; <del> Iterator.ENTRIES = ITERATE_ENTRIES; <add> src_Iterator__Iterator.KEYS = ITERATE_KEYS; <add> src_Iterator__Iterator.VALUES = ITERATE_VALUES; <add> src_Iterator__Iterator.ENTRIES = ITERATE_ENTRIES; <ide> <del> Iterator.prototype.inspect = <del> Iterator.prototype.toSource = function () { return this.toString(); } <del> Iterator.prototype[ITERATOR_SYMBOL] = function () { <add> src_Iterator__Iterator.prototype.inspect = <add> src_Iterator__Iterator.prototype.toSource = function () { return this.toString(); } <add> src_Iterator__Iterator.prototype[ITERATOR_SYMBOL] = function () { <ide> return this; <ide> }; <ide> <ide> var array = this._array; <ide> var maxIndex = array.length - 1; <ide> var ii = 0; <del> return new Iterator(function() <add> return new src_Iterator__Iterator(function() <ide> {return ii > maxIndex ? <ide> iteratorDone() : <ide> iteratorValue(type, ii, array[reverse ? maxIndex - ii++ : ii++])} <ide> var keys = this._keys; <ide> var maxIndex = keys.length - 1; <ide> var ii = 0; <del> return new Iterator(function() { <add> return new src_Iterator__Iterator(function() { <ide> var key = keys[reverse ? maxIndex - ii : ii]; <ide> return ii++ > maxIndex ? <ide> iteratorDone() : <ide> var iterable = this._iterable; <ide> var iterator = getIterator(iterable); <ide> if (!isIterator(iterator)) { <del> return new Iterator(iteratorDone); <add> return new src_Iterator__Iterator(iteratorDone); <ide> } <ide> var iterations = 0; <del> return new Iterator(function() { <add> return new src_Iterator__Iterator(function() { <ide> var step = iterator.next(); <ide> return step.done ? step : iteratorValue(type, iterations++, step.value); <ide> }); <ide> var iterator = this._iterator; <ide> var cache = this._iteratorCache; <ide> var iterations = 0; <del> return new Iterator(function() { <add> return new src_Iterator__Iterator(function() { <ide> if (iterations >= cache.length) { <ide> var step = iterator.next(); <ide> if (step.done) { <ide> if (cache) { <ide> var maxIndex = cache.length - 1; <ide> var ii = 0; <del> return new Iterator(function() { <add> return new src_Iterator__Iterator(function() { <ide> var entry = cache[reverse ? maxIndex - ii : ii]; <ide> return ii++ > maxIndex ? <ide> iteratorDone() : <ide> return value && (value.constructor === Object || value.constructor === undefined); <ide> } <ide> <del> var Math__imul = <add> var src_Math__imul = <ide> typeof Math.imul === 'function' && Math.imul(0xffffffff, 2) === -2 ? <ide> Math.imul : <del> function Math__imul(a, b) { <add> function src_Math__imul(a, b) { <ide> a = a | 0; // int <ide> b = b | 0; // int <ide> var c = a & 0xffff; <ide> } <ide> var iterator = this._iter.__iterator(ITERATE_VALUES, reverse); <ide> var ii = reverse ? resolveSize(this) : 0; <del> return new Iterator(function() { <add> return new src_Iterator__Iterator(function() { <ide> var step = iterator.next(); <ide> return step.done ? step : <ide> iteratorValue(type, reverse ? --ii : ii++, step.value, step); <ide> ToIndexedSequence.prototype.__iterator = function(type, reverse) { <ide> var iterator = this._iter.__iterator(ITERATE_VALUES, reverse); <ide> var iterations = 0; <del> return new Iterator(function() { <add> return new src_Iterator__Iterator(function() { <ide> var step = iterator.next(); <ide> return step.done ? step : <ide> iteratorValue(type, iterations++, step.value, step) <ide> <ide> ToSetSequence.prototype.__iterator = function(type, reverse) { <ide> var iterator = this._iter.__iterator(ITERATE_VALUES, reverse); <del> return new Iterator(function() { <add> return new src_Iterator__Iterator(function() { <ide> var step = iterator.next(); <ide> return step.done ? step : <ide> iteratorValue(type, step.value, step.value, step); <ide> <ide> FromEntriesSequence.prototype.__iterator = function(type, reverse) { <ide> var iterator = this._iter.__iterator(ITERATE_VALUES, reverse); <del> return new Iterator(function() { <add> return new src_Iterator__Iterator(function() { <ide> while (true) { <ide> var step = iterator.next(); <ide> if (step.done) { <ide> flipSequence.__iteratorUncached = function(type, reverse) { <ide> if (type === ITERATE_ENTRIES) { <ide> var iterator = iterable.__iterator(type, reverse); <del> return new Iterator(function() { <add> return new src_Iterator__Iterator(function() { <ide> var step = iterator.next(); <ide> if (!step.done) { <ide> var k = step.value[0]; <ide> } <ide> mappedSequence.__iteratorUncached = function (type, reverse) { <ide> var iterator = iterable.__iterator(ITERATE_ENTRIES, reverse); <del> return new Iterator(function() { <add> return new src_Iterator__Iterator(function() { <ide> var step = iterator.next(); <ide> if (step.done) { <ide> return step; <ide> filterSequence.__iteratorUncached = function (type, reverse) { <ide> var iterator = iterable.__iterator(ITERATE_ENTRIES, reverse); <ide> var iterations = 0; <del> return new Iterator(function() { <add> return new src_Iterator__Iterator(function() { <ide> while (true) { <ide> var step = iterator.next(); <ide> if (step.done) { <ide> <ide> <ide> function countByFactory(iterable, grouper, context) { <del> var groups = Map().asMutable(); <add> var groups = src_Map__Map().asMutable(); <ide> iterable.__iterate(function(v, k) { <ide> groups.update( <ide> grouper.call(context, v, k, iterable), <ide> <ide> function groupByFactory(iterable, grouper, context) { <ide> var isKeyedIter = isKeyed(iterable); <del> var groups = (isOrdered(iterable) ? OrderedMap() : Map()).asMutable(); <add> var groups = (isOrdered(iterable) ? OrderedMap() : src_Map__Map()).asMutable(); <ide> iterable.__iterate(function(v, k) { <ide> groups.update( <ide> grouper.call(context, v, k, iterable), <ide> var iterator = sliceSize && iterable.__iterator(type, reverse); <ide> var skipped = 0; <ide> var iterations = 0; <del> return new Iterator(function() { <add> return new src_Iterator__Iterator(function() { <ide> while (skipped++ !== resolvedBegin) { <ide> iterator.next(); <ide> } <ide> } <ide> var iterator = iterable.__iterator(ITERATE_ENTRIES, reverse); <ide> var iterating = true; <del> return new Iterator(function() { <add> return new src_Iterator__Iterator(function() { <ide> if (!iterating) { <ide> return iteratorDone(); <ide> } <ide> var iterator = iterable.__iterator(ITERATE_ENTRIES, reverse); <ide> var skipping = true; <ide> var iterations = 0; <del> return new Iterator(function() { <add> return new src_Iterator__Iterator(function() { <ide> var step, k, v; <ide> do { <ide> step = iterator.next(); <ide> var iterator = iterable.__iterator(type, reverse); <ide> var stack = []; <ide> var iterations = 0; <del> return new Iterator(function() { <add> return new src_Iterator__Iterator(function() { <ide> while (iterator) { <ide> var step = iterator.next(); <ide> if (step.done !== false) { <ide> var iterator = iterable.__iterator(ITERATE_VALUES, reverse); <ide> var iterations = 0; <ide> var step; <del> return new Iterator(function() { <add> return new src_Iterator__Iterator(function() { <ide> if (!step || iterations % 2) { <ide> step = iterator.next(); <ide> if (step.done) { <ide> ); <ide> var iterations = 0; <ide> var isDone = false; <del> return new Iterator(function() { <add> return new src_Iterator__Iterator(function() { <ide> var steps; <ide> if (!isDone) { <ide> steps = iterators.map(function(i ) {return i.next()}); <ide> return iter; <ide> } <ide> <del> createClass(Map, KeyedCollection); <add> createClass(src_Map__Map, KeyedCollection); <ide> <ide> // @pragma Construction <ide> <del> function Map(value) { <add> function src_Map__Map(value) { <ide> return value === null || value === undefined ? emptyMap() : <ide> isMap(value) ? value : <ide> emptyMap().withMutations(function(map ) { <ide> }); <ide> } <ide> <del> Map.prototype.toString = function() { <add> src_Map__Map.prototype.toString = function() { <ide> return this.__toString('Map {', '}'); <ide> }; <ide> <ide> // @pragma Access <ide> <del> Map.prototype.get = function(k, notSetValue) { <add> src_Map__Map.prototype.get = function(k, notSetValue) { <ide> return this._root ? <ide> this._root.get(0, undefined, k, notSetValue) : <ide> notSetValue; <ide> }; <ide> <ide> // @pragma Modification <ide> <del> Map.prototype.set = function(k, v) { <add> src_Map__Map.prototype.set = function(k, v) { <ide> return updateMap(this, k, v); <ide> }; <ide> <del> Map.prototype.setIn = function(keyPath, v) { <add> src_Map__Map.prototype.setIn = function(keyPath, v) { <ide> return this.updateIn(keyPath, NOT_SET, function() {return v}); <ide> }; <ide> <del> Map.prototype.remove = function(k) { <add> src_Map__Map.prototype.remove = function(k) { <ide> return updateMap(this, k, NOT_SET); <ide> }; <ide> <del> Map.prototype.deleteIn = function(keyPath) { <add> src_Map__Map.prototype.deleteIn = function(keyPath) { <ide> return this.updateIn(keyPath, function() {return NOT_SET}); <ide> }; <ide> <del> Map.prototype.update = function(k, notSetValue, updater) { <add> src_Map__Map.prototype.update = function(k, notSetValue, updater) { <ide> return arguments.length === 1 ? <ide> k(this) : <ide> this.updateIn([k], notSetValue, updater); <ide> }; <ide> <del> Map.prototype.updateIn = function(keyPath, notSetValue, updater) { <add> src_Map__Map.prototype.updateIn = function(keyPath, notSetValue, updater) { <ide> if (!updater) { <ide> updater = notSetValue; <ide> notSetValue = undefined; <ide> return updatedValue === NOT_SET ? undefined : updatedValue; <ide> }; <ide> <del> Map.prototype.clear = function() { <add> src_Map__Map.prototype.clear = function() { <ide> if (this.size === 0) { <ide> return this; <ide> } <ide> <ide> // @pragma Composition <ide> <del> Map.prototype.merge = function(/*...iters*/) { <add> src_Map__Map.prototype.merge = function(/*...iters*/) { <ide> return mergeIntoMapWith(this, undefined, arguments); <ide> }; <ide> <del> Map.prototype.mergeWith = function(merger) {var iters = SLICE$0.call(arguments, 1); <add> src_Map__Map.prototype.mergeWith = function(merger) {var iters = SLICE$0.call(arguments, 1); <ide> return mergeIntoMapWith(this, merger, iters); <ide> }; <ide> <del> Map.prototype.mergeIn = function(keyPath) {var iters = SLICE$0.call(arguments, 1); <add> src_Map__Map.prototype.mergeIn = function(keyPath) {var iters = SLICE$0.call(arguments, 1); <ide> return this.updateIn(keyPath, emptyMap(), function(m ) {return m.merge.apply(m, iters)}); <ide> }; <ide> <del> Map.prototype.mergeDeep = function(/*...iters*/) { <add> src_Map__Map.prototype.mergeDeep = function(/*...iters*/) { <ide> return mergeIntoMapWith(this, deepMerger(undefined), arguments); <ide> }; <ide> <del> Map.prototype.mergeDeepWith = function(merger) {var iters = SLICE$0.call(arguments, 1); <add> src_Map__Map.prototype.mergeDeepWith = function(merger) {var iters = SLICE$0.call(arguments, 1); <ide> return mergeIntoMapWith(this, deepMerger(merger), iters); <ide> }; <ide> <del> Map.prototype.mergeDeepIn = function(keyPath) {var iters = SLICE$0.call(arguments, 1); <add> src_Map__Map.prototype.mergeDeepIn = function(keyPath) {var iters = SLICE$0.call(arguments, 1); <ide> return this.updateIn(keyPath, emptyMap(), function(m ) {return m.mergeDeep.apply(m, iters)}); <ide> }; <ide> <del> Map.prototype.sort = function(comparator) { <add> src_Map__Map.prototype.sort = function(comparator) { <ide> // Late binding <ide> return OrderedMap(sortFactory(this, comparator)); <ide> }; <ide> <del> Map.prototype.sortBy = function(mapper, comparator) { <add> src_Map__Map.prototype.sortBy = function(mapper, comparator) { <ide> // Late binding <ide> return OrderedMap(sortFactory(this, comparator, mapper)); <ide> }; <ide> <ide> // @pragma Mutability <ide> <del> Map.prototype.withMutations = function(fn) { <add> src_Map__Map.prototype.withMutations = function(fn) { <ide> var mutable = this.asMutable(); <ide> fn(mutable); <ide> return mutable.wasAltered() ? mutable.__ensureOwner(this.__ownerID) : this; <ide> }; <ide> <del> Map.prototype.asMutable = function() { <add> src_Map__Map.prototype.asMutable = function() { <ide> return this.__ownerID ? this : this.__ensureOwner(new OwnerID()); <ide> }; <ide> <del> Map.prototype.asImmutable = function() { <add> src_Map__Map.prototype.asImmutable = function() { <ide> return this.__ensureOwner(); <ide> }; <ide> <del> Map.prototype.wasAltered = function() { <add> src_Map__Map.prototype.wasAltered = function() { <ide> return this.__altered; <ide> }; <ide> <del> Map.prototype.__iterator = function(type, reverse) { <add> src_Map__Map.prototype.__iterator = function(type, reverse) { <ide> return new MapIterator(this, type, reverse); <ide> }; <ide> <del> Map.prototype.__iterate = function(fn, reverse) {var this$0 = this; <add> src_Map__Map.prototype.__iterate = function(fn, reverse) {var this$0 = this; <ide> var iterations = 0; <ide> this._root && this._root.iterate(function(entry ) { <ide> iterations++; <ide> return iterations; <ide> }; <ide> <del> Map.prototype.__ensureOwner = function(ownerID) { <add> src_Map__Map.prototype.__ensureOwner = function(ownerID) { <ide> if (ownerID === this.__ownerID) { <ide> return this; <ide> } <ide> return !!(maybeMap && maybeMap[IS_MAP_SENTINEL]); <ide> } <ide> <del> Map.isMap = isMap; <add> src_Map__Map.isMap = isMap; <ide> <ide> var IS_MAP_SENTINEL = ''; <ide> <del> var MapPrototype = Map.prototype; <add> var MapPrototype = src_Map__Map.prototype; <ide> MapPrototype[IS_MAP_SENTINEL] = true; <ide> MapPrototype[DELETE] = MapPrototype.remove; <ide> MapPrototype.removeIn = MapPrototype.deleteIn; <ide> return fn(this.entry); <ide> } <ide> <del> createClass(MapIterator, Iterator); <add> createClass(MapIterator, src_Iterator__Iterator); <ide> <ide> function MapIterator(map, type, reverse) { <ide> this._type = type; <ide> List.prototype.__iterator = function(type, reverse) { <ide> var index = 0; <ide> var values = iterateList(this, reverse); <del> return new Iterator(function() { <add> return new src_Iterator__Iterator(function() { <ide> var value = values(); <ide> return value === DONE ? <ide> iteratorDone() : <ide> return size < SIZE ? 0 : (((size - 1) >>> SHIFT) << SHIFT); <ide> } <ide> <del> createClass(OrderedMap, Map); <add> createClass(OrderedMap, src_Map__Map); <ide> <ide> // @pragma Construction <ide> <ide> } <ide> var iterations = 0; <ide> var node = this._head; <del> return new Iterator(function() { <add> return new src_Iterator__Iterator(function() { <ide> if (node) { <ide> var value = node.value; <ide> node = node.next; <ide> return EMPTY_STACK || (EMPTY_STACK = makeStack(0)); <ide> } <ide> <del> createClass(Set, SetCollection); <add> createClass(src_Set__Set, SetCollection); <ide> <ide> // @pragma Construction <ide> <del> function Set(value) { <add> function src_Set__Set(value) { <ide> return value === null || value === undefined ? emptySet() : <ide> isSet(value) ? value : <ide> emptySet().withMutations(function(set ) { <ide> }); <ide> } <ide> <del> Set.of = function(/*...values*/) { <add> src_Set__Set.of = function(/*...values*/) { <ide> return this(arguments); <ide> }; <ide> <del> Set.fromKeys = function(value) { <add> src_Set__Set.fromKeys = function(value) { <ide> return this(KeyedIterable(value).keySeq()); <ide> }; <ide> <del> Set.prototype.toString = function() { <add> src_Set__Set.prototype.toString = function() { <ide> return this.__toString('Set {', '}'); <ide> }; <ide> <ide> // @pragma Access <ide> <del> Set.prototype.has = function(value) { <add> src_Set__Set.prototype.has = function(value) { <ide> return this._map.has(value); <ide> }; <ide> <ide> // @pragma Modification <ide> <del> Set.prototype.add = function(value) { <add> src_Set__Set.prototype.add = function(value) { <ide> return updateSet(this, this._map.set(value, true)); <ide> }; <ide> <del> Set.prototype.remove = function(value) { <add> src_Set__Set.prototype.remove = function(value) { <ide> return updateSet(this, this._map.remove(value)); <ide> }; <ide> <del> Set.prototype.clear = function() { <add> src_Set__Set.prototype.clear = function() { <ide> return updateSet(this, this._map.clear()); <ide> }; <ide> <ide> // @pragma Composition <ide> <del> Set.prototype.union = function() {var iters = SLICE$0.call(arguments, 0); <add> src_Set__Set.prototype.union = function() {var iters = SLICE$0.call(arguments, 0); <ide> iters = iters.filter(function(x ) {return x.size !== 0}); <ide> if (iters.length === 0) { <ide> return this; <ide> }); <ide> }; <ide> <del> Set.prototype.intersect = function() {var iters = SLICE$0.call(arguments, 0); <add> src_Set__Set.prototype.intersect = function() {var iters = SLICE$0.call(arguments, 0); <ide> if (iters.length === 0) { <ide> return this; <ide> } <ide> }); <ide> }; <ide> <del> Set.prototype.subtract = function() {var iters = SLICE$0.call(arguments, 0); <add> src_Set__Set.prototype.subtract = function() {var iters = SLICE$0.call(arguments, 0); <ide> if (iters.length === 0) { <ide> return this; <ide> } <ide> }); <ide> }; <ide> <del> Set.prototype.merge = function() { <add> src_Set__Set.prototype.merge = function() { <ide> return this.union.apply(this, arguments); <ide> }; <ide> <del> Set.prototype.mergeWith = function(merger) {var iters = SLICE$0.call(arguments, 1); <add> src_Set__Set.prototype.mergeWith = function(merger) {var iters = SLICE$0.call(arguments, 1); <ide> return this.union.apply(this, iters); <ide> }; <ide> <del> Set.prototype.sort = function(comparator) { <add> src_Set__Set.prototype.sort = function(comparator) { <ide> // Late binding <ide> return OrderedSet(sortFactory(this, comparator)); <ide> }; <ide> <del> Set.prototype.sortBy = function(mapper, comparator) { <add> src_Set__Set.prototype.sortBy = function(mapper, comparator) { <ide> // Late binding <ide> return OrderedSet(sortFactory(this, comparator, mapper)); <ide> }; <ide> <del> Set.prototype.wasAltered = function() { <add> src_Set__Set.prototype.wasAltered = function() { <ide> return this._map.wasAltered(); <ide> }; <ide> <del> Set.prototype.__iterate = function(fn, reverse) {var this$0 = this; <add> src_Set__Set.prototype.__iterate = function(fn, reverse) {var this$0 = this; <ide> return this._map.__iterate(function(_, k) {return fn(k, k, this$0)}, reverse); <ide> }; <ide> <del> Set.prototype.__iterator = function(type, reverse) { <add> src_Set__Set.prototype.__iterator = function(type, reverse) { <ide> return this._map.map(function(_, k) {return k}).__iterator(type, reverse); <ide> }; <ide> <del> Set.prototype.__ensureOwner = function(ownerID) { <add> src_Set__Set.prototype.__ensureOwner = function(ownerID) { <ide> if (ownerID === this.__ownerID) { <ide> return this; <ide> } <ide> return !!(maybeSet && maybeSet[IS_SET_SENTINEL]); <ide> } <ide> <del> Set.isSet = isSet; <add> src_Set__Set.isSet = isSet; <ide> <ide> var IS_SET_SENTINEL = ''; <ide> <del> var SetPrototype = Set.prototype; <add> var SetPrototype = src_Set__Set.prototype; <ide> SetPrototype[IS_SET_SENTINEL] = true; <ide> SetPrototype[DELETE] = SetPrototype.remove; <ide> SetPrototype.mergeDeep = SetPrototype.merge; <ide> return EMPTY_SET || (EMPTY_SET = makeSet(emptyMap())); <ide> } <ide> <del> createClass(OrderedSet, Set); <add> createClass(OrderedSet, src_Set__Set); <ide> <ide> // @pragma Construction <ide> <ide> if (!(this instanceof RecordType)) { <ide> return new RecordType(values); <ide> } <del> this._map = Map(values); <add> this._map = src_Map__Map(values); <ide> }; <ide> <ide> var keys = Object.keys(defaultValues); <ide> var step = this._step; <ide> var value = reverse ? this._start + maxIndex * step : this._start; <ide> var ii = 0; <del> return new Iterator(function() { <add> return new src_Iterator__Iterator(function() { <ide> var v = value; <ide> value += reverse ? -step : step; <ide> return ii > maxIndex ? iteratorDone() : iteratorValue(type, ii++, v); <ide> <ide> Repeat.prototype.__iterator = function(type, reverse) {var this$0 = this; <ide> var ii = 0; <del> return new Iterator(function() <add> return new src_Iterator__Iterator(function() <ide> {return ii < this$0.size ? iteratorValue(type, ii++, this$0._value) : iteratorDone()} <ide> ); <ide> }; <ide> return ctor; <ide> } <ide> <del> Iterable.Iterator = Iterator; <add> Iterable.Iterator = src_Iterator__Iterator; <ide> <ide> mixin(Iterable, { <ide> <ide> <ide> toMap: function() { <ide> // Use Late Binding here to solve the circular dependency. <del> return Map(this.toKeyedSeq()); <add> return src_Map__Map(this.toKeyedSeq()); <ide> }, <ide> <ide> toObject: function() { <ide> <ide> toSet: function() { <ide> // Use Late Binding here to solve the circular dependency. <del> return Set(isKeyed(this) ? this.valueSeq() : this); <add> return src_Set__Set(isKeyed(this) ? this.valueSeq() : this); <ide> }, <ide> <ide> toSetSeq: function() { <ide> } <ide> <ide> function murmurHashOfSize(size, h) { <del> h = Math__imul(h, 0xCC9E2D51); <del> h = Math__imul(h << 15 | h >>> -15, 0x1B873593); <del> h = Math__imul(h << 13 | h >>> -13, 5); <add> h = src_Math__imul(h, 0xCC9E2D51); <add> h = src_Math__imul(h << 15 | h >>> -15, 0x1B873593); <add> h = src_Math__imul(h << 13 | h >>> -13, 5); <ide> h = (h + 0xE6546B64 | 0) ^ size; <del> h = Math__imul(h ^ h >>> 16, 0x85EBCA6B); <del> h = Math__imul(h ^ h >>> 13, 0xC2B2AE35); <add> h = src_Math__imul(h ^ h >>> 16, 0x85EBCA6B); <add> h = src_Math__imul(h ^ h >>> 13, 0xC2B2AE35); <ide> h = smi(h ^ h >>> 16); <ide> return h; <ide> } <ide> <ide> Seq: Seq, <ide> Collection: Collection, <del> Map: Map, <add> Map: src_Map__Map, <ide> OrderedMap: OrderedMap, <ide> List: List, <ide> Stack: Stack, <del> Set: Set, <add> Set: src_Set__Set, <ide> OrderedSet: OrderedSet, <ide> <ide> Record: Record,
1
Go
Go
extract daemon to its own package
48de91a33f097d4c20515088af1f5bcb9a98c5c9
<ide><path>integration-cli/check_test.go <ide> import ( <ide> <ide> "github.com/docker/docker/api/types/swarm" <ide> "github.com/docker/docker/cliconfig" <add> "github.com/docker/docker/integration-cli/daemon" <ide> "github.com/docker/docker/pkg/reexec" <ide> "github.com/go-check/check" <ide> ) <ide> type DockerSuite struct { <ide> <ide> func (s *DockerSuite) OnTimeout(c *check.C) { <ide> if daemonPid > 0 && isLocalDaemon { <del> signalDaemonDump(daemonPid) <add> daemon.SignalDaemonDump(daemonPid) <ide> } <ide> } <ide> <ide> func init() { <ide> type DockerRegistrySuite struct { <ide> ds *DockerSuite <ide> reg *testRegistryV2 <del> d *Daemon <add> d *daemon.Daemon <ide> } <ide> <ide> func (s *DockerRegistrySuite) OnTimeout(c *check.C) { <ide> func (s *DockerRegistrySuite) OnTimeout(c *check.C) { <ide> func (s *DockerRegistrySuite) SetUpTest(c *check.C) { <ide> testRequires(c, DaemonIsLinux, RegistryHosting) <ide> s.reg = setupRegistry(c, false, "", "") <del> s.d = NewDaemon(c) <add> s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ <add> Experimental: experimentalDaemon, <add> }) <ide> } <ide> <ide> func (s *DockerRegistrySuite) TearDownTest(c *check.C) { <ide> func init() { <ide> type DockerSchema1RegistrySuite struct { <ide> ds *DockerSuite <ide> reg *testRegistryV2 <del> d *Daemon <add> d *daemon.Daemon <ide> } <ide> <ide> func (s *DockerSchema1RegistrySuite) OnTimeout(c *check.C) { <ide> func (s *DockerSchema1RegistrySuite) OnTimeout(c *check.C) { <ide> func (s *DockerSchema1RegistrySuite) SetUpTest(c *check.C) { <ide> testRequires(c, DaemonIsLinux, RegistryHosting, NotArm64) <ide> s.reg = setupRegistry(c, true, "", "") <del> s.d = NewDaemon(c) <add> s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ <add> Experimental: experimentalDaemon, <add> }) <ide> } <ide> <ide> func (s *DockerSchema1RegistrySuite) TearDownTest(c *check.C) { <ide> func init() { <ide> type DockerRegistryAuthHtpasswdSuite struct { <ide> ds *DockerSuite <ide> reg *testRegistryV2 <del> d *Daemon <add> d *daemon.Daemon <ide> } <ide> <ide> func (s *DockerRegistryAuthHtpasswdSuite) OnTimeout(c *check.C) { <ide> func (s *DockerRegistryAuthHtpasswdSuite) OnTimeout(c *check.C) { <ide> func (s *DockerRegistryAuthHtpasswdSuite) SetUpTest(c *check.C) { <ide> testRequires(c, DaemonIsLinux, RegistryHosting) <ide> s.reg = setupRegistry(c, false, "htpasswd", "") <del> s.d = NewDaemon(c) <add> s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ <add> Experimental: experimentalDaemon, <add> }) <ide> } <ide> <ide> func (s *DockerRegistryAuthHtpasswdSuite) TearDownTest(c *check.C) { <ide> func init() { <ide> type DockerRegistryAuthTokenSuite struct { <ide> ds *DockerSuite <ide> reg *testRegistryV2 <del> d *Daemon <add> d *daemon.Daemon <ide> } <ide> <ide> func (s *DockerRegistryAuthTokenSuite) OnTimeout(c *check.C) { <ide> func (s *DockerRegistryAuthTokenSuite) OnTimeout(c *check.C) { <ide> <ide> func (s *DockerRegistryAuthTokenSuite) SetUpTest(c *check.C) { <ide> testRequires(c, DaemonIsLinux, RegistryHosting) <del> s.d = NewDaemon(c) <add> s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ <add> Experimental: experimentalDaemon, <add> }) <ide> } <ide> <ide> func (s *DockerRegistryAuthTokenSuite) TearDownTest(c *check.C) { <ide> func init() { <ide> <ide> type DockerDaemonSuite struct { <ide> ds *DockerSuite <del> d *Daemon <add> d *daemon.Daemon <ide> } <ide> <ide> func (s *DockerDaemonSuite) OnTimeout(c *check.C) { <ide> func (s *DockerDaemonSuite) OnTimeout(c *check.C) { <ide> <ide> func (s *DockerDaemonSuite) SetUpTest(c *check.C) { <ide> testRequires(c, DaemonIsLinux) <del> s.d = NewDaemon(c) <add> s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ <add> Experimental: experimentalDaemon, <add> }) <ide> } <ide> <ide> func (s *DockerDaemonSuite) TearDownTest(c *check.C) { <ide> func (s *DockerDaemonSuite) TearDownTest(c *check.C) { <ide> } <ide> <ide> func (s *DockerDaemonSuite) TearDownSuite(c *check.C) { <del> filepath.Walk(daemonSockRoot, func(path string, fi os.FileInfo, err error) error { <add> filepath.Walk(daemon.SockRoot, func(path string, fi os.FileInfo, err error) error { <ide> if err != nil { <ide> // ignore errors here <ide> // not cleaning up sockets is not really an error <ide> func (s *DockerDaemonSuite) TearDownSuite(c *check.C) { <ide> } <ide> return nil <ide> }) <del> os.RemoveAll(daemonSockRoot) <add> os.RemoveAll(daemon.SockRoot) <ide> } <ide> <ide> const defaultSwarmPort = 2477 <ide> func init() { <ide> type DockerSwarmSuite struct { <ide> server *httptest.Server <ide> ds *DockerSuite <del> daemons []*SwarmDaemon <add> daemons []*daemon.Swarm <ide> daemonsLock sync.Mutex // protect access to daemons <ide> portIndex int <ide> } <ide> func (s *DockerSwarmSuite) SetUpTest(c *check.C) { <ide> testRequires(c, DaemonIsLinux) <ide> } <ide> <del>func (s *DockerSwarmSuite) AddDaemon(c *check.C, joinSwarm, manager bool) *SwarmDaemon { <del> d := &SwarmDaemon{ <del> Daemon: NewDaemon(c), <del> port: defaultSwarmPort + s.portIndex, <add>func (s *DockerSwarmSuite) AddDaemon(c *check.C, joinSwarm, manager bool) *daemon.Swarm { <add> d := &daemon.Swarm{ <add> Daemon: daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ <add> Experimental: experimentalDaemon, <add> }), <add> Port: defaultSwarmPort + s.portIndex, <ide> } <del> d.listenAddr = fmt.Sprintf("0.0.0.0:%d", d.port) <add> d.ListenAddr = fmt.Sprintf("0.0.0.0:%d", d.Port) <ide> args := []string{"--iptables=false", "--swarm-default-advertise-addr=lo"} // avoid networking conflicts <del> if experimentalDaemon { <del> args = append(args, "--experimental") <del> } <ide> err := d.StartWithBusybox(args...) <ide> c.Assert(err, check.IsNil) <ide> <ide> if joinSwarm == true { <ide> if len(s.daemons) > 0 { <del> tokens := s.daemons[0].joinTokens(c) <add> tokens := s.daemons[0].JoinTokens(c) <ide> token := tokens.Worker <ide> if manager { <ide> token = tokens.Manager <ide> } <ide> c.Assert(d.Join(swarm.JoinRequest{ <del> RemoteAddrs: []string{s.daemons[0].listenAddr}, <add> RemoteAddrs: []string{s.daemons[0].ListenAddr}, <ide> JoinToken: token, <ide> }), check.IsNil) <ide> } else { <ide> func (s *DockerSwarmSuite) TearDownTest(c *check.C) { <ide> for _, d := range s.daemons { <ide> if d != nil { <ide> d.Stop() <add> // FIXME(vdemeester) should be handled by SwarmDaemon ? <ide> // raft state file is quite big (64MB) so remove it after every test <del> walDir := filepath.Join(d.root, "swarm/raft/wal") <add> walDir := filepath.Join(d.Root, "swarm/raft/wal") <ide> if err := os.RemoveAll(walDir); err != nil { <ide> c.Logf("error removing %v: %v", walDir, err) <ide> } <ide> <del> cleanupExecRoot(c, d.execRoot) <add> d.CleanupExecRoot(c) <ide> } <ide> } <ide> s.daemons = nil <add><path>integration-cli/daemon/daemon.go <del><path>integration-cli/daemon.go <del>package main <add>package daemon <ide> <ide> import ( <ide> "bytes" <add> "crypto/tls" <ide> "encoding/json" <ide> "errors" <ide> "fmt" <ide> "io" <add> "io/ioutil" <add> "net" <ide> "net/http" <add> "net/http/httputil" <add> "net/url" <ide> "os" <ide> "os/exec" <ide> "path/filepath" <ide> import ( <ide> <ide> "github.com/docker/docker/api/types/events" <ide> "github.com/docker/docker/opts" <add> "github.com/docker/docker/pkg/integration" <ide> "github.com/docker/docker/pkg/integration/checker" <add> icmd "github.com/docker/docker/pkg/integration/cmd" <ide> "github.com/docker/docker/pkg/ioutils" <ide> "github.com/docker/docker/pkg/stringid" <ide> "github.com/docker/go-connections/sockets" <ide> "github.com/docker/go-connections/tlsconfig" <ide> "github.com/go-check/check" <ide> ) <ide> <del>var daemonSockRoot = filepath.Join(os.TempDir(), "docker-integration") <add>// SockRoot holds the path of the default docker integration daemon socket <add>var SockRoot = filepath.Join(os.TempDir(), "docker-integration") <ide> <ide> // Daemon represents a Docker daemon for the testing framework. <ide> type Daemon struct { <del> GlobalFlags []string <del> <del> id string <del> c *check.C <del> logFile *os.File <del> folder string <del> root string <del> stdin io.WriteCloser <del> stdout, stderr io.ReadCloser <del> cmd *exec.Cmd <del> storageDriver string <del> wait chan error <del> userlandProxy bool <del> useDefaultHost bool <del> useDefaultTLSHost bool <del> execRoot string <add> GlobalFlags []string <add> Root string <add> Folder string <add> Wait chan error <add> UseDefaultHost bool <add> UseDefaultTLSHost bool <add> <add> // FIXME(vdemeester) either should be used everywhere (do not return error) or nowhere, <add> // so I think we should remove it or use it for everything <add> c *check.C <add> id string <add> logFile *os.File <add> stdin io.WriteCloser <add> stdout, stderr io.ReadCloser <add> cmd *exec.Cmd <add> storageDriver string <add> userlandProxy bool <add> execRoot string <add> experimental bool <add> dockerBinary string <add> dockerdBinary string <add>} <add> <add>// Config holds docker daemon integration configuration <add>type Config struct { <add> Experimental bool <ide> } <ide> <ide> type clientConfig struct { <ide> type clientConfig struct { <ide> addr string <ide> } <ide> <del>// NewDaemon returns a Daemon instance to be used for testing. <add>// New returns a Daemon instance to be used for testing. <ide> // This will create a directory such as d123456789 in the folder specified by $DEST. <ide> // The daemon will not automatically start. <del>func NewDaemon(c *check.C) *Daemon { <add>func New(c *check.C, dockerBinary string, dockerdBinary string, config Config) *Daemon { <ide> dest := os.Getenv("DEST") <ide> c.Assert(dest, check.Not(check.Equals), "", check.Commentf("Please set the DEST environment variable")) <ide> <del> err := os.MkdirAll(daemonSockRoot, 0700) <add> err := os.MkdirAll(SockRoot, 0700) <ide> c.Assert(err, checker.IsNil, check.Commentf("could not create daemon socket root")) <ide> <ide> id := fmt.Sprintf("d%s", stringid.TruncateID(stringid.GenerateRandomID())) <ide> func NewDaemon(c *check.C) *Daemon { <ide> return &Daemon{ <ide> id: id, <ide> c: c, <del> folder: daemonFolder, <del> root: daemonRoot, <add> Folder: daemonFolder, <add> Root: daemonRoot, <ide> storageDriver: os.Getenv("DOCKER_GRAPHDRIVER"), <ide> userlandProxy: userlandProxy, <ide> execRoot: filepath.Join(os.TempDir(), "docker-execroot", id), <add> dockerBinary: dockerBinary, <add> dockerdBinary: dockerdBinary, <add> experimental: config.Experimental, <ide> } <ide> } <ide> <ide> // RootDir returns the root directory of the daemon. <ide> func (d *Daemon) RootDir() string { <del> return d.root <add> return d.Root <add>} <add> <add>// ID returns the generated id of the daemon <add>func (d *Daemon) ID() string { <add> return d.id <add>} <add> <add>// StorageDriver returns the configured storage driver of the daemon <add>func (d *Daemon) StorageDriver() string { <add> return d.storageDriver <add>} <add> <add>// CleanupExecRoot cleans the daemon exec root (network namespaces, ...) <add>func (d *Daemon) CleanupExecRoot(c *check.C) { <add> cleanupExecRoot(c, d.execRoot) <ide> } <ide> <ide> func (d *Daemon) getClientConfig() (*clientConfig, error) { <ide> func (d *Daemon) getClientConfig() (*clientConfig, error) { <ide> addr string <ide> proto string <ide> ) <del> if d.useDefaultTLSHost { <add> if d.UseDefaultTLSHost { <ide> option := &tlsconfig.Options{ <ide> CAFile: "fixtures/https/ca.pem", <ide> CertFile: "fixtures/https/client-cert.pem", <ide> func (d *Daemon) getClientConfig() (*clientConfig, error) { <ide> addr = fmt.Sprintf("%s:%d", opts.DefaultHTTPHost, opts.DefaultTLSHTTPPort) <ide> scheme = "https" <ide> proto = "tcp" <del> } else if d.useDefaultHost { <add> } else if d.UseDefaultHost { <ide> addr = opts.DefaultUnixSocket <ide> proto = "unix" <ide> scheme = "http" <ide> func (d *Daemon) getClientConfig() (*clientConfig, error) { <ide> // Start will start the daemon and return once it is ready to receive requests. <ide> // You can specify additional daemon flags. <ide> func (d *Daemon) Start(args ...string) error { <del> logFile, err := os.OpenFile(filepath.Join(d.folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) <del> d.c.Assert(err, check.IsNil, check.Commentf("[%s] Could not create %s/docker.log", d.id, d.folder)) <add> logFile, err := os.OpenFile(filepath.Join(d.Folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) <add> d.c.Assert(err, check.IsNil, check.Commentf("[%s] Could not create %s/docker.log", d.id, d.Folder)) <ide> <ide> return d.StartWithLogFile(logFile, args...) <ide> } <ide> <ide> // StartWithLogFile will start the daemon and attach its streams to a given file. <ide> func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error { <del> dockerdBinary, err := exec.LookPath(dockerdBinary) <add> dockerdBinary, err := exec.LookPath(d.dockerdBinary) <ide> d.c.Assert(err, check.IsNil, check.Commentf("[%s] could not find docker binary in $PATH", d.id)) <ide> <ide> args := append(d.GlobalFlags, <ide> "--containerd", "/var/run/docker/libcontainerd/docker-containerd.sock", <del> "--graph", d.root, <add> "--graph", d.Root, <ide> "--exec-root", d.execRoot, <del> "--pidfile", fmt.Sprintf("%s/docker.pid", d.folder), <add> "--pidfile", fmt.Sprintf("%s/docker.pid", d.Folder), <ide> fmt.Sprintf("--userland-proxy=%t", d.userlandProxy), <ide> ) <del> if experimentalDaemon { <add> if d.experimental { <ide> args = append(args, "--experimental", "--init") <ide> } <del> if !(d.useDefaultHost || d.useDefaultTLSHost) { <del> args = append(args, []string{"--host", d.sock()}...) <add> if !(d.UseDefaultHost || d.UseDefaultTLSHost) { <add> args = append(args, []string{"--host", d.Sock()}...) <ide> } <ide> if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { <ide> args = append(args, []string{"--userns-remap", root}...) <ide> func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error { <ide> close(wait) <ide> }() <ide> <del> d.wait = wait <add> d.Wait = wait <ide> <ide> tick := time.Tick(500 * time.Millisecond) <ide> // make sure daemon is ready to receive requests <ide> func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error { <ide> d.c.Logf("[%s] received status != 200 OK: %s", d.id, resp.Status) <ide> } <ide> d.c.Logf("[%s] daemon started", d.id) <del> d.root, err = d.queryRootDir() <add> d.Root, err = d.queryRootDir() <ide> if err != nil { <ide> return fmt.Errorf("[%s] error querying daemon for root directory: %v", d.id, err) <ide> } <ide> return nil <del> case <-d.wait: <add> case <-d.Wait: <ide> return fmt.Errorf("[%s] Daemon exited during startup", d.id) <ide> } <ide> } <ide> func (d *Daemon) StartWithBusybox(arg ...string) error { <ide> <ide> // Kill will send a SIGKILL to the daemon <ide> func (d *Daemon) Kill() error { <del> if d.cmd == nil || d.wait == nil { <add> if d.cmd == nil || d.Wait == nil { <ide> return errors.New("daemon not started") <ide> } <ide> <ide> func (d *Daemon) Kill() error { <ide> return err <ide> } <ide> <del> if err := os.Remove(fmt.Sprintf("%s/docker.pid", d.folder)); err != nil { <add> if err := os.Remove(fmt.Sprintf("%s/docker.pid", d.Folder)); err != nil { <ide> return err <ide> } <ide> <ide> return nil <ide> } <ide> <add>// Pid returns the pid of the daemon <add>func (d *Daemon) Pid() int { <add> return d.cmd.Process.Pid <add>} <add> <add>// Interrupt stops the daemon by sending it an Interrupt signal <add>func (d *Daemon) Interrupt() error { <add> return d.Signal(os.Interrupt) <add>} <add> <add>// Signal sends the specified signal to the daemon if running <add>func (d *Daemon) Signal(signal os.Signal) error { <add> if d.cmd == nil || d.Wait == nil { <add> return errors.New("daemon not started") <add> } <add> return d.cmd.Process.Signal(signal) <add>} <add> <ide> // DumpStackAndQuit sends SIGQUIT to the daemon, which triggers it to dump its <ide> // stack to its log file and exit <ide> // This is used primarily for gathering debug information on test timeout <ide> func (d *Daemon) DumpStackAndQuit() { <ide> if d.cmd == nil || d.cmd.Process == nil { <ide> return <ide> } <del> signalDaemonDump(d.cmd.Process.Pid) <add> SignalDaemonDump(d.cmd.Process.Pid) <ide> } <ide> <ide> // Stop will send a SIGINT every second and wait for the daemon to stop. <ide> // If it timeouts, a SIGKILL is sent. <ide> // Stop will not delete the daemon directory. If a purged daemon is needed, <ide> // instantiate a new one with NewDaemon. <ide> func (d *Daemon) Stop() error { <del> if d.cmd == nil || d.wait == nil { <add> if d.cmd == nil || d.Wait == nil { <ide> return errors.New("daemon not started") <ide> } <ide> <ide> func (d *Daemon) Stop() error { <ide> out1: <ide> for { <ide> select { <del> case err := <-d.wait: <add> case err := <-d.Wait: <ide> return err <ide> case <-time.After(20 * time.Second): <ide> // time for stopping jobs and run onShutdown hooks <ide> out1: <ide> out2: <ide> for { <ide> select { <del> case err := <-d.wait: <add> case err := <-d.Wait: <ide> return err <ide> case <-tick: <ide> i++ <ide> out2: <ide> return err <ide> } <ide> <del> if err := os.Remove(fmt.Sprintf("%s/docker.pid", d.folder)); err != nil { <add> if err := os.Remove(fmt.Sprintf("%s/docker.pid", d.Folder)); err != nil { <ide> return err <ide> } <ide> <ide> out2: <ide> func (d *Daemon) Restart(arg ...string) error { <ide> d.Stop() <ide> // in the case of tests running a user namespace-enabled daemon, we have resolved <del> // d.root to be the actual final path of the graph dir after the "uid.gid" of <add> // d.Root to be the actual final path of the graph dir after the "uid.gid" of <ide> // remapped root is added--we need to subtract it from the path before calling <ide> // start or else we will continue making subdirectories rather than truly restarting <ide> // with the same location/root: <ide> if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { <del> d.root = filepath.Dir(d.root) <add> d.Root = filepath.Dir(d.Root) <ide> } <ide> return d.Start(arg...) <ide> } <ide> <ide> // LoadBusybox will load the stored busybox into a newly started daemon <ide> func (d *Daemon) LoadBusybox() error { <del> bb := filepath.Join(d.folder, "busybox.tar") <add> bb := filepath.Join(d.Folder, "busybox.tar") <ide> if _, err := os.Stat(bb); err != nil { <ide> if !os.IsNotExist(err) { <ide> return fmt.Errorf("unexpected error on busybox.tar stat: %v", err) <ide> } <ide> // saving busybox image from main daemon <del> if out, err := exec.Command(dockerBinary, "save", "--output", bb, "busybox:latest").CombinedOutput(); err != nil { <del> imagesOut, _ := exec.Command(dockerBinary, "images", "--format", "{{ .Repository }}:{{ .Tag }}").CombinedOutput() <add> if out, err := exec.Command(d.dockerBinary, "save", "--output", bb, "busybox:latest").CombinedOutput(); err != nil { <add> imagesOut, _ := exec.Command(d.dockerBinary, "images", "--format", "{{ .Repository }}:{{ .Tag }}").CombinedOutput() <ide> return fmt.Errorf("could not save busybox image: %s\n%s", string(out), strings.TrimSpace(string(imagesOut))) <ide> } <ide> } <ide> func (d *Daemon) queryRootDir() (string, error) { <ide> } <ide> var b []byte <ide> var i Info <del> b, err = readBody(body) <add> b, err = integration.ReadBody(body) <ide> if err == nil && resp.StatusCode == http.StatusOK { <ide> // read the docker root dir <ide> if err = json.Unmarshal(b, &i); err == nil { <ide> func (d *Daemon) queryRootDir() (string, error) { <ide> return "", err <ide> } <ide> <del>func (d *Daemon) sock() string { <add>// Sock returns the socket path of the daemon <add>func (d *Daemon) Sock() string { <ide> return fmt.Sprintf("unix://" + d.sockPath()) <ide> } <ide> <ide> func (d *Daemon) sockPath() string { <del> return filepath.Join(daemonSockRoot, d.id+".sock") <add> return filepath.Join(SockRoot, d.id+".sock") <ide> } <ide> <del>func (d *Daemon) waitRun(contID string) error { <del> args := []string{"--host", d.sock()} <del> return waitInspectWithArgs(contID, "{{.State.Running}}", "true", 10*time.Second, args...) <add>// WaitRun waits for a container to be running for 10s <add>func (d *Daemon) WaitRun(contID string) error { <add> args := []string{"--host", d.Sock()} <add> return WaitInspectWithArgs(d.dockerBinary, contID, "{{.State.Running}}", "true", 10*time.Second, args...) <ide> } <ide> <del>func (d *Daemon) getBaseDeviceSize(c *check.C) int64 { <del> infoCmdOutput, _, err := runCommandPipelineWithOutput( <del> exec.Command(dockerBinary, "-H", d.sock(), "info"), <add>// GetBaseDeviceSize returns the base device size of the daemon <add>func (d *Daemon) GetBaseDeviceSize(c *check.C) int64 { <add> infoCmdOutput, _, err := integration.RunCommandPipelineWithOutput( <add> exec.Command(d.dockerBinary, "-H", d.Sock(), "info"), <ide> exec.Command("grep", "Base Device Size"), <ide> ) <ide> c.Assert(err, checker.IsNil) <ide> func (d *Daemon) getBaseDeviceSize(c *check.C) int64 { <ide> // Cmd will execute a docker CLI command against this Daemon. <ide> // Example: d.Cmd("version") will run docker -H unix://path/to/unix.sock version <ide> func (d *Daemon) Cmd(args ...string) (string, error) { <del> b, err := d.command(args...).CombinedOutput() <add> b, err := d.Command(args...).CombinedOutput() <ide> return string(b), err <ide> } <ide> <del>func (d *Daemon) command(args ...string) *exec.Cmd { <del> return exec.Command(dockerBinary, d.prependHostArg(args)...) <add>// Command will create a docker CLI command against this Daeomn. <add>func (d *Daemon) Command(args ...string) *exec.Cmd { <add> return exec.Command(d.dockerBinary, d.PrependHostArg(args)...) <ide> } <ide> <del>func (d *Daemon) prependHostArg(args []string) []string { <add>// PrependHostArg prepend the specified arguments by the daemon host flags <add>func (d *Daemon) PrependHostArg(args []string) []string { <ide> for _, arg := range args { <ide> if arg == "--host" || arg == "-H" { <ide> return args <ide> } <ide> } <del> return append([]string{"--host", d.sock()}, args...) <add> return append([]string{"--host", d.Sock()}, args...) <ide> } <ide> <ide> // SockRequest executes a socket request on a daemon and returns statuscode and output. <ide> func (d *Daemon) SockRequest(method, endpoint string, data interface{}) (int, [] <ide> if err != nil { <ide> return -1, nil, err <ide> } <del> b, err := readBody(body) <add> b, err := integration.ReadBody(body) <ide> return res.StatusCode, b, err <ide> } <ide> <ide> // SockRequestRaw executes a socket request on a daemon and returns an http <ide> // response and a reader for the output data. <ide> func (d *Daemon) SockRequestRaw(method, endpoint string, data io.Reader, ct string) (*http.Response, io.ReadCloser, error) { <del> return sockRequestRawToDaemon(method, endpoint, data, ct, d.sock()) <add> return SockRequestRawToDaemon(method, endpoint, data, ct, d.Sock()) <ide> } <ide> <ide> // LogFileName returns the path the the daemon's log file <ide> func (d *Daemon) LogFileName() string { <ide> return d.logFile.Name() <ide> } <ide> <del>func (d *Daemon) getIDByName(name string) (string, error) { <add>// GetIDByName returns the ID of an object (container, volume, …) given its name <add>func (d *Daemon) GetIDByName(name string) (string, error) { <ide> return d.inspectFieldWithError(name, "Id") <ide> } <ide> <del>func (d *Daemon) activeContainers() (ids []string) { <add>// ActiveContainers returns the list of ids of the currently running containers <add>func (d *Daemon) ActiveContainers() (ids []string) { <add> // FIXME(vdemeester) shouldn't ignore the error <ide> out, _ := d.Cmd("ps", "-q") <ide> for _, id := range strings.Split(out, "\n") { <ide> if id = strings.TrimSpace(id); id != "" { <ide> func (d *Daemon) activeContainers() (ids []string) { <ide> return <ide> } <ide> <add>// ReadLogFile returns the content of the daemon log file <add>func (d *Daemon) ReadLogFile() ([]byte, error) { <add> return ioutil.ReadFile(d.logFile.Name()) <add>} <add> <ide> func (d *Daemon) inspectFilter(name, filter string) (string, error) { <ide> format := fmt.Sprintf("{{%s}}", filter) <ide> out, err := d.Cmd("inspect", "-f", format, name) <ide> func (d *Daemon) inspectFieldWithError(name, field string) (string, error) { <ide> return d.inspectFilter(name, fmt.Sprintf(".%s", field)) <ide> } <ide> <del>func (d *Daemon) findContainerIP(id string) string { <add>// FindContainerIP returns the ip of the specified container <add>// FIXME(vdemeester) should probably erroring out <add>func (d *Daemon) FindContainerIP(id string) string { <ide> out, err := d.Cmd("inspect", fmt.Sprintf("--format='{{ .NetworkSettings.Networks.bridge.IPAddress }}'"), id) <ide> if err != nil { <ide> d.c.Log(err) <ide> } <ide> return strings.Trim(out, " \r\n'") <ide> } <ide> <del>func (d *Daemon) buildImageWithOut(name, dockerfile string, useCache bool, buildFlags ...string) (string, int, error) { <del> buildCmd := buildImageCmdWithHost(name, dockerfile, d.sock(), useCache, buildFlags...) <del> return runCommandWithOutput(buildCmd) <add>// BuildImageWithOut builds an image with the specified dockerfile and options and returns the output <add>func (d *Daemon) BuildImageWithOut(name, dockerfile string, useCache bool, buildFlags ...string) (string, int, error) { <add> buildCmd := BuildImageCmdWithHost(d.dockerBinary, name, dockerfile, d.Sock(), useCache, buildFlags...) <add> result := icmd.RunCmd(icmd.Cmd{ <add> Command: buildCmd.Args, <add> Env: buildCmd.Env, <add> Dir: buildCmd.Dir, <add> Stdin: buildCmd.Stdin, <add> Stdout: buildCmd.Stdout, <add> }) <add> return result.Combined(), result.ExitCode, result.Error <ide> } <ide> <del>func (d *Daemon) checkActiveContainerCount(c *check.C) (interface{}, check.CommentInterface) { <add>// CheckActiveContainerCount returns the number of active containers <add>// FIXME(vdemeester) should re-use ActivateContainers in some way <add>func (d *Daemon) CheckActiveContainerCount(c *check.C) (interface{}, check.CommentInterface) { <ide> out, err := d.Cmd("ps", "-q") <ide> c.Assert(err, checker.IsNil) <ide> if len(strings.TrimSpace(out)) == 0 { <ide> func (d *Daemon) checkActiveContainerCount(c *check.C) (interface{}, check.Comme <ide> return len(strings.Split(strings.TrimSpace(out), "\n")), check.Commentf("output: %q", string(out)) <ide> } <ide> <del>func (d *Daemon) reloadConfig() error { <add>// ReloadConfig asks the daemon to reload its configuration <add>func (d *Daemon) ReloadConfig() error { <ide> if d.cmd == nil || d.cmd.Process == nil { <ide> return fmt.Errorf("daemon is not running") <ide> } <ide> <ide> errCh := make(chan error) <ide> started := make(chan struct{}) <ide> go func() { <del> _, body, err := sockRequestRawToDaemon("GET", "/events", nil, "", d.sock()) <add> _, body, err := SockRequestRawToDaemon("GET", "/events", nil, "", d.Sock()) <ide> close(started) <ide> if err != nil { <ide> errCh <- err <ide> func (d *Daemon) reloadConfig() error { <ide> } <ide> return nil <ide> } <add> <add>// WaitInspectWithArgs waits for the specified expression to be equals to the specified expected string in the given time. <add>// FIXME(vdemeester) Attach this to the Daemon struct <add>func WaitInspectWithArgs(dockerBinary, name, expr, expected string, timeout time.Duration, arg ...string) error { <add> after := time.After(timeout) <add> <add> args := append(arg, "inspect", "-f", expr, name) <add> for { <add> result := icmd.RunCommand(dockerBinary, args...) <add> if result.Error != nil { <add> if !strings.Contains(result.Stderr(), "No such") { <add> return fmt.Errorf("error executing docker inspect: %v\n%s", <add> result.Stderr(), result.Stdout()) <add> } <add> select { <add> case <-after: <add> return result.Error <add> default: <add> time.Sleep(10 * time.Millisecond) <add> continue <add> } <add> } <add> <add> out := strings.TrimSpace(result.Stdout()) <add> if out == expected { <add> break <add> } <add> <add> select { <add> case <-after: <add> return fmt.Errorf("condition \"%q == %q\" not true in time", out, expected) <add> default: <add> } <add> <add> time.Sleep(100 * time.Millisecond) <add> } <add> return nil <add>} <add> <add>// SockRequestRawToDaemon creates an http request against the specified daemon socket <add>// FIXME(vdemeester) attach this to daemon ? <add>func SockRequestRawToDaemon(method, endpoint string, data io.Reader, ct, daemon string) (*http.Response, io.ReadCloser, error) { <add> req, client, err := newRequestClient(method, endpoint, data, ct, daemon) <add> if err != nil { <add> return nil, nil, err <add> } <add> <add> resp, err := client.Do(req) <add> if err != nil { <add> client.Close() <add> return nil, nil, err <add> } <add> body := ioutils.NewReadCloserWrapper(resp.Body, func() error { <add> defer resp.Body.Close() <add> return client.Close() <add> }) <add> <add> return resp, body, nil <add>} <add> <add>func getTLSConfig() (*tls.Config, error) { <add> dockerCertPath := os.Getenv("DOCKER_CERT_PATH") <add> <add> if dockerCertPath == "" { <add> return nil, fmt.Errorf("DOCKER_TLS_VERIFY specified, but no DOCKER_CERT_PATH environment variable") <add> } <add> <add> option := &tlsconfig.Options{ <add> CAFile: filepath.Join(dockerCertPath, "ca.pem"), <add> CertFile: filepath.Join(dockerCertPath, "cert.pem"), <add> KeyFile: filepath.Join(dockerCertPath, "key.pem"), <add> } <add> tlsConfig, err := tlsconfig.Client(*option) <add> if err != nil { <add> return nil, err <add> } <add> <add> return tlsConfig, nil <add>} <add> <add>// SockConn opens a connection on the specified socket <add>func SockConn(timeout time.Duration, daemon string) (net.Conn, error) { <add> daemonURL, err := url.Parse(daemon) <add> if err != nil { <add> return nil, fmt.Errorf("could not parse url %q: %v", daemon, err) <add> } <add> <add> var c net.Conn <add> switch daemonURL.Scheme { <add> case "npipe": <add> return npipeDial(daemonURL.Path, timeout) <add> case "unix": <add> return net.DialTimeout(daemonURL.Scheme, daemonURL.Path, timeout) <add> case "tcp": <add> if os.Getenv("DOCKER_TLS_VERIFY") != "" { <add> // Setup the socket TLS configuration. <add> tlsConfig, err := getTLSConfig() <add> if err != nil { <add> return nil, err <add> } <add> dialer := &net.Dialer{Timeout: timeout} <add> return tls.DialWithDialer(dialer, daemonURL.Scheme, daemonURL.Host, tlsConfig) <add> } <add> return net.DialTimeout(daemonURL.Scheme, daemonURL.Host, timeout) <add> default: <add> return c, fmt.Errorf("unknown scheme %v (%s)", daemonURL.Scheme, daemon) <add> } <add>} <add> <add>func newRequestClient(method, endpoint string, data io.Reader, ct, daemon string) (*http.Request, *httputil.ClientConn, error) { <add> c, err := SockConn(time.Duration(10*time.Second), daemon) <add> if err != nil { <add> return nil, nil, fmt.Errorf("could not dial docker daemon: %v", err) <add> } <add> <add> client := httputil.NewClientConn(c, nil) <add> <add> req, err := http.NewRequest(method, endpoint, data) <add> if err != nil { <add> client.Close() <add> return nil, nil, fmt.Errorf("could not create new request: %v", err) <add> } <add> <add> if ct != "" { <add> req.Header.Set("Content-Type", ct) <add> } <add> return req, client, nil <add>} <add> <add>// BuildImageCmdWithHost create a build command with the specified arguments. <add>// FIXME(vdemeester) move this away <add>func BuildImageCmdWithHost(dockerBinary, name, dockerfile, host string, useCache bool, buildFlags ...string) *exec.Cmd { <add> args := []string{} <add> if host != "" { <add> args = append(args, "--host", host) <add> } <add> args = append(args, "build", "-t", name) <add> if !useCache { <add> args = append(args, "--no-cache") <add> } <add> args = append(args, buildFlags...) <add> args = append(args, "-") <add> buildCmd := exec.Command(dockerBinary, args...) <add> buildCmd.Stdin = strings.NewReader(dockerfile) <add> return buildCmd <add>} <add><path>integration-cli/daemon/daemon_swarm.go <del><path>integration-cli/daemon_swarm.go <del>package main <add>package daemon <ide> <ide> import ( <ide> "encoding/json" <ide> import ( <ide> "github.com/go-check/check" <ide> ) <ide> <del>// SwarmDaemon is a test daemon with helpers for participating in a swarm. <del>type SwarmDaemon struct { <add>// Swarm is a test daemon with helpers for participating in a swarm. <add>type Swarm struct { <ide> *Daemon <ide> swarm.Info <del> port int <del> listenAddr string <add> Port int <add> ListenAddr string <ide> } <ide> <ide> // Init initializes a new swarm cluster. <del>func (d *SwarmDaemon) Init(req swarm.InitRequest) error { <add>func (d *Swarm) Init(req swarm.InitRequest) error { <ide> if req.ListenAddr == "" { <del> req.ListenAddr = d.listenAddr <add> req.ListenAddr = d.ListenAddr <ide> } <ide> status, out, err := d.SockRequest("POST", "/swarm/init", req) <ide> if status != http.StatusOK { <ide> func (d *SwarmDaemon) Init(req swarm.InitRequest) error { <ide> if err != nil { <ide> return fmt.Errorf("initializing swarm: %v", err) <ide> } <del> info, err := d.info() <add> info, err := d.SwarmInfo() <ide> if err != nil { <ide> return err <ide> } <ide> func (d *SwarmDaemon) Init(req swarm.InitRequest) error { <ide> } <ide> <ide> // Join joins a daemon to an existing cluster. <del>func (d *SwarmDaemon) Join(req swarm.JoinRequest) error { <add>func (d *Swarm) Join(req swarm.JoinRequest) error { <ide> if req.ListenAddr == "" { <del> req.ListenAddr = d.listenAddr <add> req.ListenAddr = d.ListenAddr <ide> } <ide> status, out, err := d.SockRequest("POST", "/swarm/join", req) <ide> if status != http.StatusOK { <ide> func (d *SwarmDaemon) Join(req swarm.JoinRequest) error { <ide> if err != nil { <ide> return fmt.Errorf("joining swarm: %v", err) <ide> } <del> info, err := d.info() <add> info, err := d.SwarmInfo() <ide> if err != nil { <ide> return err <ide> } <ide> func (d *SwarmDaemon) Join(req swarm.JoinRequest) error { <ide> } <ide> <ide> // Leave forces daemon to leave current cluster. <del>func (d *SwarmDaemon) Leave(force bool) error { <add>func (d *Swarm) Leave(force bool) error { <ide> url := "/swarm/leave" <ide> if force { <ide> url += "?force=1" <ide> func (d *SwarmDaemon) Leave(force bool) error { <ide> return err <ide> } <ide> <del>func (d *SwarmDaemon) info() (swarm.Info, error) { <add>// SwarmInfo returns the swarm information of the daemon <add>func (d *Swarm) SwarmInfo() (swarm.Info, error) { <ide> var info struct { <ide> Swarm swarm.Info <ide> } <ide> func (d *SwarmDaemon) info() (swarm.Info, error) { <ide> return info.Swarm, nil <ide> } <ide> <del>type serviceConstructor func(*swarm.Service) <del>type nodeConstructor func(*swarm.Node) <del>type specConstructor func(*swarm.Spec) <add>// ServiceConstructor defines a swarm service constructor function <add>type ServiceConstructor func(*swarm.Service) <ide> <del>func (d *SwarmDaemon) createService(c *check.C, f ...serviceConstructor) string { <add>// NodeConstructor defines a swarm node constructor <add>type NodeConstructor func(*swarm.Node) <add> <add>// SpecConstructor defines a swarm spec constructor <add>type SpecConstructor func(*swarm.Spec) <add> <add>// CreateService creates a swarm service given the specified service constructor <add>func (d *Swarm) CreateService(c *check.C, f ...ServiceConstructor) string { <ide> var service swarm.Service <ide> for _, fn := range f { <ide> fn(&service) <ide> func (d *SwarmDaemon) createService(c *check.C, f ...serviceConstructor) string <ide> return scr.ID <ide> } <ide> <del>func (d *SwarmDaemon) getService(c *check.C, id string) *swarm.Service { <add>// GetService returns the swarm service corresponding to the specified id <add>func (d *Swarm) GetService(c *check.C, id string) *swarm.Service { <ide> var service swarm.Service <ide> status, out, err := d.SockRequest("GET", "/services/"+id, nil) <ide> c.Assert(err, checker.IsNil, check.Commentf(string(out))) <ide> func (d *SwarmDaemon) getService(c *check.C, id string) *swarm.Service { <ide> return &service <ide> } <ide> <del>func (d *SwarmDaemon) getServiceTasks(c *check.C, service string) []swarm.Task { <add>// GetServiceTasks returns the swarm tasks for the specified service <add>func (d *Swarm) GetServiceTasks(c *check.C, service string) []swarm.Task { <ide> var tasks []swarm.Task <ide> <ide> filterArgs := filters.NewArgs() <ide> func (d *SwarmDaemon) getServiceTasks(c *check.C, service string) []swarm.Task { <ide> return tasks <ide> } <ide> <del>func (d *SwarmDaemon) checkServiceRunningTasks(service string) func(*check.C) (interface{}, check.CommentInterface) { <add>// CheckServiceRunningTasks returns the number of running tasks for the specified service <add>func (d *Swarm) CheckServiceRunningTasks(service string) func(*check.C) (interface{}, check.CommentInterface) { <ide> return func(c *check.C) (interface{}, check.CommentInterface) { <del> tasks := d.getServiceTasks(c, service) <add> tasks := d.GetServiceTasks(c, service) <ide> var runningCount int <ide> for _, task := range tasks { <ide> if task.Status.State == swarm.TaskStateRunning { <ide> func (d *SwarmDaemon) checkServiceRunningTasks(service string) func(*check.C) (i <ide> } <ide> } <ide> <del>func (d *SwarmDaemon) checkServiceUpdateState(service string) func(*check.C) (interface{}, check.CommentInterface) { <add>// CheckServiceUpdateState returns the current update state for the specified service <add>func (d *Swarm) CheckServiceUpdateState(service string) func(*check.C) (interface{}, check.CommentInterface) { <ide> return func(c *check.C) (interface{}, check.CommentInterface) { <del> service := d.getService(c, service) <add> service := d.GetService(c, service) <ide> if service.UpdateStatus == nil { <ide> return "", nil <ide> } <ide> return service.UpdateStatus.State, nil <ide> } <ide> } <ide> <del>func (d *SwarmDaemon) checkServiceTasks(service string) func(*check.C) (interface{}, check.CommentInterface) { <add>// CheckServiceTasks returns the number of tasks for the specified service <add>func (d *Swarm) CheckServiceTasks(service string) func(*check.C) (interface{}, check.CommentInterface) { <ide> return func(c *check.C) (interface{}, check.CommentInterface) { <del> tasks := d.getServiceTasks(c, service) <add> tasks := d.GetServiceTasks(c, service) <ide> return len(tasks), nil <ide> } <ide> } <ide> <del>func (d *SwarmDaemon) checkRunningTaskImages(c *check.C) (interface{}, check.CommentInterface) { <add>// CheckRunningTaskImages returns the number of different images attached to a running task <add>func (d *Swarm) CheckRunningTaskImages(c *check.C) (interface{}, check.CommentInterface) { <ide> var tasks []swarm.Task <ide> <ide> filterArgs := filters.NewArgs() <ide> func (d *SwarmDaemon) checkRunningTaskImages(c *check.C) (interface{}, check.Com <ide> return result, nil <ide> } <ide> <del>func (d *SwarmDaemon) checkNodeReadyCount(c *check.C) (interface{}, check.CommentInterface) { <del> nodes := d.listNodes(c) <add>// CheckNodeReadyCount returns the number of ready node on the swarm <add>func (d *Swarm) CheckNodeReadyCount(c *check.C) (interface{}, check.CommentInterface) { <add> nodes := d.ListNodes(c) <ide> var readyCount int <ide> for _, node := range nodes { <ide> if node.Status.State == swarm.NodeStateReady { <ide> func (d *SwarmDaemon) checkNodeReadyCount(c *check.C) (interface{}, check.Commen <ide> return readyCount, nil <ide> } <ide> <del>func (d *SwarmDaemon) getTask(c *check.C, id string) swarm.Task { <add>// GetTask returns the swarm task identified by the specified id <add>func (d *Swarm) GetTask(c *check.C, id string) swarm.Task { <ide> var task swarm.Task <ide> <ide> status, out, err := d.SockRequest("GET", "/tasks/"+id, nil) <ide> func (d *SwarmDaemon) getTask(c *check.C, id string) swarm.Task { <ide> return task <ide> } <ide> <del>func (d *SwarmDaemon) updateService(c *check.C, service *swarm.Service, f ...serviceConstructor) { <add>// UpdateService updates a swarm service with the specified service constructor <add>func (d *Swarm) UpdateService(c *check.C, service *swarm.Service, f ...ServiceConstructor) { <ide> for _, fn := range f { <ide> fn(service) <ide> } <ide> func (d *SwarmDaemon) updateService(c *check.C, service *swarm.Service, f ...ser <ide> c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) <ide> } <ide> <del>func (d *SwarmDaemon) removeService(c *check.C, id string) { <add>// RemoveService removes the specified service <add>func (d *Swarm) RemoveService(c *check.C, id string) { <ide> status, out, err := d.SockRequest("DELETE", "/services/"+id, nil) <ide> c.Assert(err, checker.IsNil, check.Commentf(string(out))) <ide> c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) <ide> } <ide> <del>func (d *SwarmDaemon) getNode(c *check.C, id string) *swarm.Node { <add>// GetNode returns a swarm node identified by the specified id <add>func (d *Swarm) GetNode(c *check.C, id string) *swarm.Node { <ide> var node swarm.Node <ide> status, out, err := d.SockRequest("GET", "/nodes/"+id, nil) <ide> c.Assert(err, checker.IsNil, check.Commentf(string(out))) <ide> func (d *SwarmDaemon) getNode(c *check.C, id string) *swarm.Node { <ide> return &node <ide> } <ide> <del>func (d *SwarmDaemon) removeNode(c *check.C, id string, force bool) { <add>// RemoveNode removes the specified node <add>func (d *Swarm) RemoveNode(c *check.C, id string, force bool) { <ide> url := "/nodes/" + id <ide> if force { <ide> url += "?force=1" <ide> func (d *SwarmDaemon) removeNode(c *check.C, id string, force bool) { <ide> c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) <ide> } <ide> <del>func (d *SwarmDaemon) updateNode(c *check.C, id string, f ...nodeConstructor) { <add>// UpdateNode updates a swarm node with the specified node constructor <add>func (d *Swarm) UpdateNode(c *check.C, id string, f ...NodeConstructor) { <ide> for i := 0; ; i++ { <del> node := d.getNode(c, id) <add> node := d.GetNode(c, id) <ide> for _, fn := range f { <ide> fn(node) <ide> } <ide> func (d *SwarmDaemon) updateNode(c *check.C, id string, f ...nodeConstructor) { <ide> } <ide> } <ide> <del>func (d *SwarmDaemon) listNodes(c *check.C) []swarm.Node { <add>// ListNodes returns the list of the current swarm nodes <add>func (d *Swarm) ListNodes(c *check.C) []swarm.Node { <ide> status, out, err := d.SockRequest("GET", "/nodes", nil) <ide> c.Assert(err, checker.IsNil, check.Commentf(string(out))) <ide> c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) <ide> func (d *SwarmDaemon) listNodes(c *check.C) []swarm.Node { <ide> return nodes <ide> } <ide> <del>func (d *SwarmDaemon) listServices(c *check.C) []swarm.Service { <add>// ListServices return the list of the current swarm services <add>func (d *Swarm) ListServices(c *check.C) []swarm.Service { <ide> status, out, err := d.SockRequest("GET", "/services", nil) <ide> c.Assert(err, checker.IsNil, check.Commentf(string(out))) <ide> c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) <ide> func (d *SwarmDaemon) listServices(c *check.C) []swarm.Service { <ide> return services <ide> } <ide> <del>func (d *SwarmDaemon) createSecret(c *check.C, secretSpec swarm.SecretSpec) string { <add>// CreateSecret creates a secret given the specified spec <add>func (d *Swarm) CreateSecret(c *check.C, secretSpec swarm.SecretSpec) string { <ide> status, out, err := d.SockRequest("POST", "/secrets/create", secretSpec) <ide> <ide> c.Assert(err, checker.IsNil, check.Commentf(string(out))) <ide> func (d *SwarmDaemon) createSecret(c *check.C, secretSpec swarm.SecretSpec) stri <ide> return scr.ID <ide> } <ide> <del>func (d *SwarmDaemon) listSecrets(c *check.C) []swarm.Secret { <add>// ListSecrets returns the list of the current swarm secrets <add>func (d *Swarm) ListSecrets(c *check.C) []swarm.Secret { <ide> status, out, err := d.SockRequest("GET", "/secrets", nil) <ide> c.Assert(err, checker.IsNil, check.Commentf(string(out))) <ide> c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) <ide> func (d *SwarmDaemon) listSecrets(c *check.C) []swarm.Secret { <ide> return secrets <ide> } <ide> <del>func (d *SwarmDaemon) getSecret(c *check.C, id string) *swarm.Secret { <add>// GetSecret returns a swarm secret identified by the specified id <add>func (d *Swarm) GetSecret(c *check.C, id string) *swarm.Secret { <ide> var secret swarm.Secret <ide> status, out, err := d.SockRequest("GET", "/secrets/"+id, nil) <ide> c.Assert(err, checker.IsNil, check.Commentf(string(out))) <ide> func (d *SwarmDaemon) getSecret(c *check.C, id string) *swarm.Secret { <ide> return &secret <ide> } <ide> <del>func (d *SwarmDaemon) deleteSecret(c *check.C, id string) { <add>// DeleteSecret removes the swarm secret identified by the specified id <add>func (d *Swarm) DeleteSecret(c *check.C, id string) { <ide> status, out, err := d.SockRequest("DELETE", "/secrets/"+id, nil) <ide> c.Assert(err, checker.IsNil, check.Commentf(string(out))) <ide> c.Assert(status, checker.Equals, http.StatusNoContent, check.Commentf("output: %q", string(out))) <ide> } <ide> <del>func (d *SwarmDaemon) getSwarm(c *check.C) swarm.Swarm { <add>// GetSwarm return the current swarm object <add>func (d *Swarm) GetSwarm(c *check.C) swarm.Swarm { <ide> var sw swarm.Swarm <ide> status, out, err := d.SockRequest("GET", "/swarm", nil) <ide> c.Assert(err, checker.IsNil, check.Commentf(string(out))) <ide> func (d *SwarmDaemon) getSwarm(c *check.C) swarm.Swarm { <ide> return sw <ide> } <ide> <del>func (d *SwarmDaemon) updateSwarm(c *check.C, f ...specConstructor) { <del> sw := d.getSwarm(c) <add>// UpdateSwarm updates the current swarm object with the specified spec constructors <add>func (d *Swarm) UpdateSwarm(c *check.C, f ...SpecConstructor) { <add> sw := d.GetSwarm(c) <ide> for _, fn := range f { <ide> fn(&sw.Spec) <ide> } <ide> func (d *SwarmDaemon) updateSwarm(c *check.C, f ...specConstructor) { <ide> c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) <ide> } <ide> <del>func (d *SwarmDaemon) rotateTokens(c *check.C) { <add>// RotateTokens update the swarm to rotate tokens <add>func (d *Swarm) RotateTokens(c *check.C) { <ide> var sw swarm.Swarm <ide> status, out, err := d.SockRequest("GET", "/swarm", nil) <ide> c.Assert(err, checker.IsNil, check.Commentf(string(out))) <ide> func (d *SwarmDaemon) rotateTokens(c *check.C) { <ide> c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) <ide> } <ide> <del>func (d *SwarmDaemon) joinTokens(c *check.C) swarm.JoinTokens { <add>// JoinTokens returns the current swarm join tokens <add>func (d *Swarm) JoinTokens(c *check.C) swarm.JoinTokens { <ide> var sw swarm.Swarm <ide> status, out, err := d.SockRequest("GET", "/swarm", nil) <ide> c.Assert(err, checker.IsNil, check.Commentf(string(out))) <ide> func (d *SwarmDaemon) joinTokens(c *check.C) swarm.JoinTokens { <ide> return sw.JoinTokens <ide> } <ide> <del>func (d *SwarmDaemon) checkLocalNodeState(c *check.C) (interface{}, check.CommentInterface) { <del> info, err := d.info() <add>// CheckLocalNodeState returns the current swarm node state <add>func (d *Swarm) CheckLocalNodeState(c *check.C) (interface{}, check.CommentInterface) { <add> info, err := d.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> return info.LocalNodeState, nil <ide> } <ide> <del>func (d *SwarmDaemon) checkControlAvailable(c *check.C) (interface{}, check.CommentInterface) { <del> info, err := d.info() <add>// CheckControlAvailable returns the current swarm control available <add>func (d *Swarm) CheckControlAvailable(c *check.C) (interface{}, check.CommentInterface) { <add> info, err := d.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) <ide> return info.ControlAvailable, nil <ide> } <ide> <del>func (d *SwarmDaemon) checkLeader(c *check.C) (interface{}, check.CommentInterface) { <add>// CheckLeader returns whether there is a leader on the swarm or not <add>func (d *Swarm) CheckLeader(c *check.C) (interface{}, check.CommentInterface) { <ide> errList := check.Commentf("could not get node list") <ide> status, out, err := d.SockRequest("GET", "/nodes", nil) <ide> if err != nil { <ide> func (d *SwarmDaemon) checkLeader(c *check.C) (interface{}, check.CommentInterfa <ide> return fmt.Errorf("no leader"), check.Commentf("could not find leader") <ide> } <ide> <del>func (d *SwarmDaemon) cmdRetryOutOfSequence(args ...string) (string, error) { <add>// CmdRetryOutOfSequence tries the specified command against the current daemon for 10 times <add>func (d *Swarm) CmdRetryOutOfSequence(args ...string) (string, error) { <ide> for i := 0; ; i++ { <ide> out, err := d.Cmd(args...) <ide> if err != nil { <add><path>integration-cli/daemon/daemon_unix.go <del><path>integration-cli/daemon_unix.go <ide> // +build !windows <ide> <del>package main <add>package daemon <ide> <ide> import ( <ide> "os" <ide> func cleanupExecRoot(c *check.C, execRoot string) { <ide> }) <ide> } <ide> <del>func signalDaemonDump(pid int) { <add>// SignalDaemonDump sends a signal to the daemon to write a dump file <add>func SignalDaemonDump(pid int) { <ide> syscall.Kill(pid, syscall.SIGQUIT) <ide> } <ide> <add><path>integration-cli/daemon/daemon_windows.go <del><path>integration-cli/daemon_windows.go <del>package main <add>package daemon <ide> <ide> import ( <ide> "fmt" <ide> func pulseEvent(handle syscall.Handle, proc *windows.LazyProc) (err error) { <ide> return <ide> } <ide> <del>func signalDaemonDump(pid int) { <add>// SignalDaemonDump sends a signal to the daemon to write a dump file <add>func SignalDaemonDump(pid int) { <ide> modkernel32 := windows.NewLazySystemDLL("kernel32.dll") <ide> procOpenEvent := modkernel32.NewProc("OpenEventW") <ide> procPulseEvent := modkernel32.NewProc("PulseEvent") <add><path>integration-cli/daemon/npipe.go <del><path>integration-cli/npipe.go <ide> // +build !windows <ide> <del>package main <add>package daemon <ide> <ide> import ( <ide> "net" <add><path>integration-cli/daemon/npipe_windows.go <del><path>integration-cli/npipe_windows.go <del>package main <add>package daemon <ide> <ide> import ( <ide> "net" <ide><path>integration-cli/daemon_swarm_hack.go <ide> package main <ide> <del>import "github.com/go-check/check" <add>import ( <add> "github.com/docker/docker/integration-cli/daemon" <add> "github.com/go-check/check" <add>) <ide> <del>func (s *DockerSwarmSuite) getDaemon(c *check.C, nodeID string) *SwarmDaemon { <add>func (s *DockerSwarmSuite) getDaemon(c *check.C, nodeID string) *daemon.Swarm { <ide> s.daemonsLock.Lock() <ide> defer s.daemonsLock.Unlock() <ide> for _, d := range s.daemons { <ide><path>integration-cli/docker_api_attach_test.go <ide> import ( <ide> <ide> "github.com/docker/docker/api/types" <ide> "github.com/docker/docker/client" <add> "github.com/docker/docker/pkg/integration" <ide> "github.com/docker/docker/pkg/integration/checker" <ide> "github.com/docker/docker/pkg/stdcopy" <ide> "github.com/go-check/check" <ide> func (s *DockerSuite) TestPostContainersAttachContainerNotFound(c *check.C) { <ide> // connection will shutdown, err should be "persistent connection closed" <ide> c.Assert(err, checker.NotNil) // Server shutdown connection <ide> <del> body, err := readBody(resp.Body) <add> body, err := integration.ReadBody(resp.Body) <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(resp.StatusCode, checker.Equals, http.StatusNotFound) <ide> expected := "No such container: doesnotexist\r\n" <ide><path>integration-cli/docker_api_build_test.go <ide> import ( <ide> "regexp" <ide> "strings" <ide> <add> "github.com/docker/docker/pkg/integration" <ide> "github.com/docker/docker/pkg/integration/checker" <ide> "github.com/go-check/check" <ide> ) <ide> RUN find /tmp/` <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(res.StatusCode, checker.Equals, http.StatusOK) <ide> <del> buf, err := readBody(body) <add> buf, err := integration.ReadBody(body) <ide> c.Assert(err, checker.IsNil) <ide> <ide> // Make sure Dockerfile exists. <ide> RUN echo 'right' <ide> c.Assert(res.StatusCode, checker.Equals, http.StatusOK) <ide> <ide> defer body.Close() <del> content, err := readBody(body) <add> content, err := integration.ReadBody(body) <ide> c.Assert(err, checker.IsNil) <ide> <ide> // Build used the wrong dockerfile. <ide> RUN echo from dockerfile`, <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(res.StatusCode, checker.Equals, http.StatusOK) <ide> <del> buf, err := readBody(body) <add> buf, err := integration.ReadBody(body) <ide> c.Assert(err, checker.IsNil) <ide> <ide> out := string(buf) <ide> RUN echo from Dockerfile`, <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(res.StatusCode, checker.Equals, http.StatusOK) <ide> <del> buf, err := readBody(body) <add> buf, err := integration.ReadBody(body) <ide> c.Assert(err, checker.IsNil) <ide> <ide> out := string(buf) <ide> RUN echo from dockerfile`, <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(res.StatusCode, checker.Equals, http.StatusOK) <ide> <del> buf, err := readBody(body) <add> buf, err := integration.ReadBody(body) <ide> c.Assert(err, checker.IsNil) <ide> <ide> out := string(buf) <ide> func (s *DockerSuite) TestBuildAPIUnnormalizedTarPaths(c *check.C) { <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(res.StatusCode, checker.Equals, http.StatusOK) <ide> <del> out, err := readBody(body) <add> out, err := integration.ReadBody(body) <ide> c.Assert(err, checker.IsNil) <ide> lines := strings.Split(string(out), "\n") <ide> c.Assert(len(lines), checker.GreaterThan, 1) <ide><path>integration-cli/docker_api_containers_test.go <ide> func (s *DockerSuite) TestContainerAPIInvalidPortSyntax(c *check.C) { <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) <ide> <del> b, err := readBody(body) <add> b, err := integration.ReadBody(body) <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(string(b[:]), checker.Contains, "invalid port") <ide> } <ide> func (s *DockerSuite) TestContainerAPIRestartPolicyInvalidPolicyName(c *check.C) <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) <ide> <del> b, err := readBody(body) <add> b, err := integration.ReadBody(body) <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(string(b[:]), checker.Contains, "invalid restart policy") <ide> } <ide> func (s *DockerSuite) TestContainerAPIRestartPolicyRetryMismatch(c *check.C) { <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) <ide> <del> b, err := readBody(body) <add> b, err := integration.ReadBody(body) <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(string(b[:]), checker.Contains, "maximum retry count cannot be used with restart policy") <ide> } <ide> func (s *DockerSuite) TestContainerAPIRestartPolicyNegativeRetryCount(c *check.C <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) <ide> <del> b, err := readBody(body) <add> b, err := integration.ReadBody(body) <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(string(b[:]), checker.Contains, "maximum retry count cannot be negative") <ide> } <ide> func (s *DockerSuite) TestContainerAPIPostCreateNull(c *check.C) { <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(res.StatusCode, checker.Equals, http.StatusCreated) <ide> <del> b, err := readBody(body) <add> b, err := integration.ReadBody(body) <ide> c.Assert(err, checker.IsNil) <ide> type createResp struct { <ide> ID string <ide> func (s *DockerSuite) TestCreateWithTooLowMemoryLimit(c *check.C) { <ide> <ide> res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") <ide> c.Assert(err, checker.IsNil) <del> b, err2 := readBody(body) <add> b, err2 := integration.ReadBody(body) <ide> c.Assert(err2, checker.IsNil) <ide> <ide> c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) <ide><path>integration-cli/docker_api_exec_test.go <ide> import ( <ide> "strings" <ide> "time" <ide> <add> "github.com/docker/docker/pkg/integration" <ide> "github.com/docker/docker/pkg/integration/checker" <ide> "github.com/go-check/check" <ide> ) <ide> func (s *DockerSuite) TestExecAPICreateNoValidContentType(c *check.C) { <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) <ide> <del> b, err := readBody(body) <add> b, err := integration.ReadBody(body) <ide> c.Assert(err, checker.IsNil) <ide> <ide> comment := check.Commentf("Expected message when creating exec command with invalid Content-Type specified") <ide> func (s *DockerSuite) TestExecAPIStartBackwardsCompatible(c *check.C) { <ide> resp, body, err := sockRequestRaw("POST", fmt.Sprintf("/v1.20/exec/%s/start", id), strings.NewReader(`{"Detach": true}`), "text/plain") <ide> c.Assert(err, checker.IsNil) <ide> <del> b, err := readBody(body) <add> b, err := integration.ReadBody(body) <ide> comment := check.Commentf("response body: %s", b) <ide> c.Assert(err, checker.IsNil, comment) <ide> c.Assert(resp.StatusCode, checker.Equals, http.StatusOK, comment) <ide> func (s *DockerSuite) TestExecAPIStartWithDetach(c *check.C) { <ide> _, body, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/start", createResp.ID), strings.NewReader(`{"Detach": true}`), "application/json") <ide> c.Assert(err, checker.IsNil) <ide> <del> b, err = readBody(body) <add> b, err = integration.ReadBody(body) <ide> comment := check.Commentf("response body: %s", b) <ide> c.Assert(err, checker.IsNil, comment) <ide> <ide> func startExec(c *check.C, id string, code int) { <ide> resp, body, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/start", id), strings.NewReader(`{"Detach": true}`), "application/json") <ide> c.Assert(err, checker.IsNil) <ide> <del> b, err := readBody(body) <add> b, err := integration.ReadBody(body) <ide> comment := check.Commentf("response body: %s", b) <ide> c.Assert(err, checker.IsNil, comment) <ide> c.Assert(resp.StatusCode, checker.Equals, code, comment) <ide><path>integration-cli/docker_api_service_update_test.go <ide> package main <ide> <ide> import ( <ide> "github.com/docker/docker/api/types/swarm" <add> "github.com/docker/docker/integration-cli/daemon" <ide> "github.com/docker/docker/pkg/integration/checker" <ide> "github.com/go-check/check" <ide> ) <ide> <del>func setPortConfig(portConfig []swarm.PortConfig) serviceConstructor { <add>func setPortConfig(portConfig []swarm.PortConfig) daemon.ServiceConstructor { <ide> return func(s *swarm.Service) { <ide> if s.Spec.EndpointSpec == nil { <ide> s.Spec.EndpointSpec = &swarm.EndpointSpec{} <ide> func (s *DockerSwarmSuite) TestAPIServiceUpdatePort(c *check.C) { <ide> <ide> // Create a service with a port mapping of 8080:8081. <ide> portConfig := []swarm.PortConfig{{TargetPort: 8081, PublishedPort: 8080}} <del> serviceID := d.createService(c, simpleTestService, setInstances(1), setPortConfig(portConfig)) <del> waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) <add> serviceID := d.CreateService(c, simpleTestService, setInstances(1), setPortConfig(portConfig)) <add> waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) <ide> <ide> // Update the service: changed the port mapping from 8080:8081 to 8082:8083. <ide> updatedPortConfig := []swarm.PortConfig{{TargetPort: 8083, PublishedPort: 8082}} <del> remoteService := d.getService(c, serviceID) <del> d.updateService(c, remoteService, setPortConfig(updatedPortConfig)) <add> remoteService := d.GetService(c, serviceID) <add> d.UpdateService(c, remoteService, setPortConfig(updatedPortConfig)) <ide> <ide> // Inspect the service and verify port mapping. <del> updatedService := d.getService(c, serviceID) <add> updatedService := d.GetService(c, serviceID) <ide> c.Assert(updatedService.Spec.EndpointSpec, check.NotNil) <ide> c.Assert(len(updatedService.Spec.EndpointSpec.Ports), check.Equals, 1) <ide> c.Assert(updatedService.Spec.EndpointSpec.Ports[0].TargetPort, check.Equals, uint32(8083)) <ide><path>integration-cli/docker_api_swarm_test.go <ide> import ( <ide> "time" <ide> <ide> "github.com/docker/docker/api/types/swarm" <add> "github.com/docker/docker/integration-cli/daemon" <ide> "github.com/docker/docker/pkg/integration/checker" <ide> "github.com/go-check/check" <ide> ) <ide> var defaultReconciliationTimeout = 30 * time.Second <ide> func (s *DockerSwarmSuite) TestAPISwarmInit(c *check.C) { <ide> // todo: should find a better way to verify that components are running than /info <ide> d1 := s.AddDaemon(c, true, true) <del> info, err := d1.info() <add> info, err := d1.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(info.ControlAvailable, checker.True) <ide> c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) <ide> <ide> d2 := s.AddDaemon(c, true, false) <del> info, err = d2.info() <add> info, err = d2.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(info.ControlAvailable, checker.False) <ide> c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) <ide> <ide> // Leaving cluster <ide> c.Assert(d2.Leave(false), checker.IsNil) <ide> <del> info, err = d2.info() <add> info, err = d2.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(info.ControlAvailable, checker.False) <ide> c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) <ide> <del> c.Assert(d2.Join(swarm.JoinRequest{JoinToken: d1.joinTokens(c).Worker, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil) <add> c.Assert(d2.Join(swarm.JoinRequest{JoinToken: d1.JoinTokens(c).Worker, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil) <ide> <del> info, err = d2.info() <add> info, err = d2.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(info.ControlAvailable, checker.False) <ide> c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) <ide> func (s *DockerSwarmSuite) TestAPISwarmInit(c *check.C) { <ide> err = d2.Start() <ide> c.Assert(err, checker.IsNil) <ide> <del> info, err = d1.info() <add> info, err = d1.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(info.ControlAvailable, checker.True) <ide> c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) <ide> <del> info, err = d2.info() <add> info, err = d2.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(info.ControlAvailable, checker.False) <ide> c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) <ide> func (s *DockerSwarmSuite) TestAPISwarmJoinToken(c *check.C) { <ide> // todo: error message differs depending if some components of token are valid <ide> <ide> d2 := s.AddDaemon(c, false, false) <del> err := d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}}) <add> err := d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.ListenAddr}}) <ide> c.Assert(err, checker.NotNil) <ide> c.Assert(err.Error(), checker.Contains, "join token is necessary") <del> info, err := d2.info() <add> info, err := d2.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) <ide> <del> err = d2.Join(swarm.JoinRequest{JoinToken: "foobaz", RemoteAddrs: []string{d1.listenAddr}}) <add> err = d2.Join(swarm.JoinRequest{JoinToken: "foobaz", RemoteAddrs: []string{d1.ListenAddr}}) <ide> c.Assert(err, checker.NotNil) <ide> c.Assert(err.Error(), checker.Contains, "invalid join token") <del> info, err = d2.info() <add> info, err = d2.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) <ide> <del> workerToken := d1.joinTokens(c).Worker <add> workerToken := d1.JoinTokens(c).Worker <ide> <del> c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil) <del> info, err = d2.info() <add> c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil) <add> info, err = d2.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) <ide> c.Assert(d2.Leave(false), checker.IsNil) <del> info, err = d2.info() <add> info, err = d2.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) <ide> <ide> // change tokens <del> d1.rotateTokens(c) <add> d1.RotateTokens(c) <ide> <del> err = d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}) <add> err = d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}) <ide> c.Assert(err, checker.NotNil) <ide> c.Assert(err.Error(), checker.Contains, "join token is necessary") <del> info, err = d2.info() <add> info, err = d2.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) <ide> <del> workerToken = d1.joinTokens(c).Worker <add> workerToken = d1.JoinTokens(c).Worker <ide> <del> c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil) <del> info, err = d2.info() <add> c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil) <add> info, err = d2.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) <ide> c.Assert(d2.Leave(false), checker.IsNil) <del> info, err = d2.info() <add> info, err = d2.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) <ide> <ide> // change spec, don't change tokens <del> d1.updateSwarm(c, func(s *swarm.Spec) {}) <add> d1.UpdateSwarm(c, func(s *swarm.Spec) {}) <ide> <del> err = d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}}) <add> err = d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.ListenAddr}}) <ide> c.Assert(err, checker.NotNil) <ide> c.Assert(err.Error(), checker.Contains, "join token is necessary") <del> info, err = d2.info() <add> info, err = d2.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) <ide> <del> c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil) <del> info, err = d2.info() <add> c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil) <add> info, err = d2.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) <ide> c.Assert(d2.Leave(false), checker.IsNil) <del> info, err = d2.info() <add> info, err = d2.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) <ide> } <ide> <ide> func (s *DockerSwarmSuite) TestAPISwarmCAHash(c *check.C) { <ide> d1 := s.AddDaemon(c, true, true) <ide> d2 := s.AddDaemon(c, false, false) <del> splitToken := strings.Split(d1.joinTokens(c).Worker, "-") <add> splitToken := strings.Split(d1.JoinTokens(c).Worker, "-") <ide> splitToken[2] = "1kxftv4ofnc6mt30lmgipg6ngf9luhwqopfk1tz6bdmnkubg0e" <ide> replacementToken := strings.Join(splitToken, "-") <del> err := d2.Join(swarm.JoinRequest{JoinToken: replacementToken, RemoteAddrs: []string{d1.listenAddr}}) <add> err := d2.Join(swarm.JoinRequest{JoinToken: replacementToken, RemoteAddrs: []string{d1.ListenAddr}}) <ide> c.Assert(err, checker.NotNil) <ide> c.Assert(err.Error(), checker.Contains, "remote CA does not match fingerprint") <ide> } <ide> func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *check.C) { <ide> c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil) <ide> d2 := s.AddDaemon(c, true, false) <ide> <del> info, err := d2.info() <add> info, err := d2.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(info.ControlAvailable, checker.False) <ide> c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) <ide> <del> d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { <add> d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) { <ide> n.Spec.Role = swarm.NodeRoleManager <ide> }) <ide> <del> waitAndAssert(c, defaultReconciliationTimeout, d2.checkControlAvailable, checker.True) <add> waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.True) <ide> <del> d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { <add> d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) { <ide> n.Spec.Role = swarm.NodeRoleWorker <ide> }) <ide> <del> waitAndAssert(c, defaultReconciliationTimeout, d2.checkControlAvailable, checker.False) <add> waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.False) <ide> <ide> // Demoting last node should fail <del> node := d1.getNode(c, d1.NodeID) <add> node := d1.GetNode(c, d1.NodeID) <ide> node.Spec.Role = swarm.NodeRoleWorker <ide> url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index) <ide> status, out, err := d1.SockRequest("POST", url, node.Spec) <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(status, checker.Equals, http.StatusInternalServerError, check.Commentf("output: %q", string(out))) <ide> c.Assert(string(out), checker.Contains, "last manager of the swarm") <del> info, err = d1.info() <add> info, err = d1.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) <ide> c.Assert(info.ControlAvailable, checker.True) <ide> <ide> // Promote already demoted node <del> d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { <add> d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) { <ide> n.Spec.Role = swarm.NodeRoleManager <ide> }) <ide> <del> waitAndAssert(c, defaultReconciliationTimeout, d2.checkControlAvailable, checker.True) <add> waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.True) <ide> } <ide> <ide> func (s *DockerSwarmSuite) TestAPISwarmServicesEmptyList(c *check.C) { <ide> d := s.AddDaemon(c, true, true) <ide> <del> services := d.listServices(c) <add> services := d.ListServices(c) <ide> c.Assert(services, checker.NotNil) <ide> c.Assert(len(services), checker.Equals, 0, check.Commentf("services: %#v", services)) <ide> } <ide> func (s *DockerSwarmSuite) TestAPISwarmServicesCreate(c *check.C) { <ide> d := s.AddDaemon(c, true, true) <ide> <ide> instances := 2 <del> id := d.createService(c, simpleTestService, setInstances(instances)) <del> waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) <add> id := d.CreateService(c, simpleTestService, setInstances(instances)) <add> waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) <ide> <del> service := d.getService(c, id) <add> service := d.GetService(c, id) <ide> instances = 5 <del> d.updateService(c, service, setInstances(instances)) <del> waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) <add> d.UpdateService(c, service, setInstances(instances)) <add> waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) <ide> <del> d.removeService(c, service.ID) <del> waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 0) <add> d.RemoveService(c, service.ID) <add> waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 0) <ide> } <ide> <ide> func (s *DockerSwarmSuite) TestAPISwarmServicesMultipleAgents(c *check.C) { <ide> func (s *DockerSwarmSuite) TestAPISwarmServicesMultipleAgents(c *check.C) { <ide> time.Sleep(1 * time.Second) // make sure all daemons are ready to accept tasks <ide> <ide> instances := 9 <del> id := d1.createService(c, simpleTestService, setInstances(instances)) <add> id := d1.CreateService(c, simpleTestService, setInstances(instances)) <ide> <del> waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.GreaterThan, 0) <del> waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0) <del> waitAndAssert(c, defaultReconciliationTimeout, d3.checkActiveContainerCount, checker.GreaterThan, 0) <add> waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.GreaterThan, 0) <add> waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.GreaterThan, 0) <add> waitAndAssert(c, defaultReconciliationTimeout, d3.CheckActiveContainerCount, checker.GreaterThan, 0) <ide> <del> waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) <add> waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) <ide> <ide> // reconciliation on d2 node down <ide> c.Assert(d2.Stop(), checker.IsNil) <ide> <del> waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) <add> waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) <ide> <ide> // test downscaling <ide> instances = 5 <del> d1.updateService(c, d1.getService(c, id), setInstances(instances)) <del> waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) <add> d1.UpdateService(c, d1.GetService(c, id), setInstances(instances)) <add> waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) <ide> <ide> } <ide> <ide> func (s *DockerSwarmSuite) TestAPISwarmServicesCreateGlobal(c *check.C) { <ide> d2 := s.AddDaemon(c, true, false) <ide> d3 := s.AddDaemon(c, true, false) <ide> <del> d1.createService(c, simpleTestService, setGlobalMode) <add> d1.CreateService(c, simpleTestService, setGlobalMode) <ide> <del> waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, 1) <del> waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 1) <del> waitAndAssert(c, defaultReconciliationTimeout, d3.checkActiveContainerCount, checker.Equals, 1) <add> waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, 1) <add> waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 1) <add> waitAndAssert(c, defaultReconciliationTimeout, d3.CheckActiveContainerCount, checker.Equals, 1) <ide> <ide> d4 := s.AddDaemon(c, true, false) <ide> d5 := s.AddDaemon(c, true, false) <ide> <del> waitAndAssert(c, defaultReconciliationTimeout, d4.checkActiveContainerCount, checker.Equals, 1) <del> waitAndAssert(c, defaultReconciliationTimeout, d5.checkActiveContainerCount, checker.Equals, 1) <add> waitAndAssert(c, defaultReconciliationTimeout, d4.CheckActiveContainerCount, checker.Equals, 1) <add> waitAndAssert(c, defaultReconciliationTimeout, d5.CheckActiveContainerCount, checker.Equals, 1) <ide> } <ide> <ide> func (s *DockerSwarmSuite) TestAPISwarmServicesUpdate(c *check.C) { <ide> const nodeCount = 3 <del> var daemons [nodeCount]*SwarmDaemon <add> var daemons [nodeCount]*daemon.Swarm <ide> for i := 0; i < nodeCount; i++ { <ide> daemons[i] = s.AddDaemon(c, true, i == 0) <ide> } <ide> // wait for nodes ready <del> waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) <add> waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount) <ide> <ide> // service image at start <ide> image1 := "busybox:latest" <ide> func (s *DockerSwarmSuite) TestAPISwarmServicesUpdate(c *check.C) { <ide> // create service <ide> instances := 5 <ide> parallelism := 2 <del> id := daemons[0].createService(c, serviceForUpdate, setInstances(instances)) <add> id := daemons[0].CreateService(c, serviceForUpdate, setInstances(instances)) <ide> <ide> // wait for tasks ready <del> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, <add> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, <ide> map[string]int{image1: instances}) <ide> <ide> // issue service update <del> service := daemons[0].getService(c, id) <del> daemons[0].updateService(c, service, setImage(image2)) <add> service := daemons[0].GetService(c, id) <add> daemons[0].UpdateService(c, service, setImage(image2)) <ide> <ide> // first batch <del> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, <add> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, <ide> map[string]int{image1: instances - parallelism, image2: parallelism}) <ide> <ide> // 2nd batch <del> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, <add> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, <ide> map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism}) <ide> <ide> // 3nd batch <del> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, <add> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, <ide> map[string]int{image2: instances}) <ide> <ide> // Roll back to the previous version. This uses the CLI because <ide> func (s *DockerSwarmSuite) TestAPISwarmServicesUpdate(c *check.C) { <ide> c.Assert(err, checker.IsNil, check.Commentf(out)) <ide> <ide> // first batch <del> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, <add> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, <ide> map[string]int{image2: instances - parallelism, image1: parallelism}) <ide> <ide> // 2nd batch <del> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, <add> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, <ide> map[string]int{image2: instances - 2*parallelism, image1: 2 * parallelism}) <ide> <ide> // 3nd batch <del> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, <add> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, <ide> map[string]int{image1: instances}) <ide> } <ide> <ide> func (s *DockerSwarmSuite) TestAPISwarmServicesFailedUpdate(c *check.C) { <ide> const nodeCount = 3 <del> var daemons [nodeCount]*SwarmDaemon <add> var daemons [nodeCount]*daemon.Swarm <ide> for i := 0; i < nodeCount; i++ { <ide> daemons[i] = s.AddDaemon(c, true, i == 0) <ide> } <ide> // wait for nodes ready <del> waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) <add> waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount) <ide> <ide> // service image at start <ide> image1 := "busybox:latest" <ide> func (s *DockerSwarmSuite) TestAPISwarmServicesFailedUpdate(c *check.C) { <ide> <ide> // create service <ide> instances := 5 <del> id := daemons[0].createService(c, serviceForUpdate, setInstances(instances)) <add> id := daemons[0].CreateService(c, serviceForUpdate, setInstances(instances)) <ide> <ide> // wait for tasks ready <del> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, <add> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, <ide> map[string]int{image1: instances}) <ide> <ide> // issue service update <del> service := daemons[0].getService(c, id) <del> daemons[0].updateService(c, service, setImage(image2), setFailureAction(swarm.UpdateFailureActionPause), setMaxFailureRatio(0.25), setParallelism(1)) <add> service := daemons[0].GetService(c, id) <add> daemons[0].UpdateService(c, service, setImage(image2), setFailureAction(swarm.UpdateFailureActionPause), setMaxFailureRatio(0.25), setParallelism(1)) <ide> <ide> // should update 2 tasks and then pause <del> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceUpdateState(id), checker.Equals, swarm.UpdateStatePaused) <del> v, _ := daemons[0].checkServiceRunningTasks(id)(c) <add> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceUpdateState(id), checker.Equals, swarm.UpdateStatePaused) <add> v, _ := daemons[0].CheckServiceRunningTasks(id)(c) <ide> c.Assert(v, checker.Equals, instances-2) <ide> <ide> // Roll back to the previous version. This uses the CLI because <ide> // rollback is a client-side operation. <ide> out, err := daemons[0].Cmd("service", "update", "--rollback", id) <ide> c.Assert(err, checker.IsNil, check.Commentf(out)) <ide> <del> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, <add> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, <ide> map[string]int{image1: instances}) <ide> } <ide> <ide> func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintRole(c *check.C) { <ide> const nodeCount = 3 <del> var daemons [nodeCount]*SwarmDaemon <add> var daemons [nodeCount]*daemon.Swarm <ide> for i := 0; i < nodeCount; i++ { <ide> daemons[i] = s.AddDaemon(c, true, i == 0) <ide> } <ide> // wait for nodes ready <del> waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) <add> waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount) <ide> <ide> // create service <ide> constraints := []string{"node.role==worker"} <ide> instances := 3 <del> id := daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) <add> id := daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) <ide> // wait for tasks ready <del> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) <add> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) <ide> // validate tasks are running on worker nodes <del> tasks := daemons[0].getServiceTasks(c, id) <add> tasks := daemons[0].GetServiceTasks(c, id) <ide> for _, task := range tasks { <del> node := daemons[0].getNode(c, task.NodeID) <add> node := daemons[0].GetNode(c, task.NodeID) <ide> c.Assert(node.Spec.Role, checker.Equals, swarm.NodeRoleWorker) <ide> } <ide> //remove service <del> daemons[0].removeService(c, id) <add> daemons[0].RemoveService(c, id) <ide> <ide> // create service <ide> constraints = []string{"node.role!=worker"} <del> id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) <add> id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) <ide> // wait for tasks ready <del> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) <del> tasks = daemons[0].getServiceTasks(c, id) <add> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) <add> tasks = daemons[0].GetServiceTasks(c, id) <ide> // validate tasks are running on manager nodes <ide> for _, task := range tasks { <del> node := daemons[0].getNode(c, task.NodeID) <add> node := daemons[0].GetNode(c, task.NodeID) <ide> c.Assert(node.Spec.Role, checker.Equals, swarm.NodeRoleManager) <ide> } <ide> //remove service <del> daemons[0].removeService(c, id) <add> daemons[0].RemoveService(c, id) <ide> <ide> // create service <ide> constraints = []string{"node.role==nosuchrole"} <del> id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) <add> id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) <ide> // wait for tasks created <del> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceTasks(id), checker.Equals, instances) <add> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceTasks(id), checker.Equals, instances) <ide> // let scheduler try <ide> time.Sleep(250 * time.Millisecond) <ide> // validate tasks are not assigned to any node <del> tasks = daemons[0].getServiceTasks(c, id) <add> tasks = daemons[0].GetServiceTasks(c, id) <ide> for _, task := range tasks { <ide> c.Assert(task.NodeID, checker.Equals, "") <ide> } <ide> } <ide> <ide> func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *check.C) { <ide> const nodeCount = 3 <del> var daemons [nodeCount]*SwarmDaemon <add> var daemons [nodeCount]*daemon.Swarm <ide> for i := 0; i < nodeCount; i++ { <ide> daemons[i] = s.AddDaemon(c, true, i == 0) <ide> } <ide> // wait for nodes ready <del> waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) <del> nodes := daemons[0].listNodes(c) <add> waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount) <add> nodes := daemons[0].ListNodes(c) <ide> c.Assert(len(nodes), checker.Equals, nodeCount) <ide> <ide> // add labels to nodes <del> daemons[0].updateNode(c, nodes[0].ID, func(n *swarm.Node) { <add> daemons[0].UpdateNode(c, nodes[0].ID, func(n *swarm.Node) { <ide> n.Spec.Annotations.Labels = map[string]string{ <ide> "security": "high", <ide> } <ide> }) <ide> for i := 1; i < nodeCount; i++ { <del> daemons[0].updateNode(c, nodes[i].ID, func(n *swarm.Node) { <add> daemons[0].UpdateNode(c, nodes[i].ID, func(n *swarm.Node) { <ide> n.Spec.Annotations.Labels = map[string]string{ <ide> "security": "low", <ide> } <ide> func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *check.C) { <ide> // create service <ide> instances := 3 <ide> constraints := []string{"node.labels.security==high"} <del> id := daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) <add> id := daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) <ide> // wait for tasks ready <del> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) <del> tasks := daemons[0].getServiceTasks(c, id) <add> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) <add> tasks := daemons[0].GetServiceTasks(c, id) <ide> // validate all tasks are running on nodes[0] <ide> for _, task := range tasks { <ide> c.Assert(task.NodeID, checker.Equals, nodes[0].ID) <ide> } <ide> //remove service <del> daemons[0].removeService(c, id) <add> daemons[0].RemoveService(c, id) <ide> <ide> // create service <ide> constraints = []string{"node.labels.security!=high"} <del> id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) <add> id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) <ide> // wait for tasks ready <del> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) <del> tasks = daemons[0].getServiceTasks(c, id) <add> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) <add> tasks = daemons[0].GetServiceTasks(c, id) <ide> // validate all tasks are NOT running on nodes[0] <ide> for _, task := range tasks { <ide> c.Assert(task.NodeID, checker.Not(checker.Equals), nodes[0].ID) <ide> } <ide> //remove service <del> daemons[0].removeService(c, id) <add> daemons[0].RemoveService(c, id) <ide> <ide> constraints = []string{"node.labels.security==medium"} <del> id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) <add> id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) <ide> // wait for tasks created <del> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceTasks(id), checker.Equals, instances) <add> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceTasks(id), checker.Equals, instances) <ide> // let scheduler try <ide> time.Sleep(250 * time.Millisecond) <del> tasks = daemons[0].getServiceTasks(c, id) <add> tasks = daemons[0].GetServiceTasks(c, id) <ide> // validate tasks are not assigned <ide> for _, task := range tasks { <ide> c.Assert(task.NodeID, checker.Equals, "") <ide> } <ide> //remove service <del> daemons[0].removeService(c, id) <add> daemons[0].RemoveService(c, id) <ide> <ide> // multiple constraints <ide> constraints = []string{ <ide> "node.labels.security==high", <ide> fmt.Sprintf("node.id==%s", nodes[1].ID), <ide> } <del> id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) <add> id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) <ide> // wait for tasks created <del> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceTasks(id), checker.Equals, instances) <add> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceTasks(id), checker.Equals, instances) <ide> // let scheduler try <ide> time.Sleep(250 * time.Millisecond) <del> tasks = daemons[0].getServiceTasks(c, id) <add> tasks = daemons[0].GetServiceTasks(c, id) <ide> // validate tasks are not assigned <ide> for _, task := range tasks { <ide> c.Assert(task.NodeID, checker.Equals, "") <ide> } <ide> // make nodes[1] fulfills the constraints <del> daemons[0].updateNode(c, nodes[1].ID, func(n *swarm.Node) { <add> daemons[0].UpdateNode(c, nodes[1].ID, func(n *swarm.Node) { <ide> n.Spec.Annotations.Labels = map[string]string{ <ide> "security": "high", <ide> } <ide> }) <ide> // wait for tasks ready <del> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) <del> tasks = daemons[0].getServiceTasks(c, id) <add> waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) <add> tasks = daemons[0].GetServiceTasks(c, id) <ide> for _, task := range tasks { <ide> c.Assert(task.NodeID, checker.Equals, nodes[1].ID) <ide> } <ide> func (s *DockerSwarmSuite) TestAPISwarmServicesStateReporting(c *check.C) { <ide> time.Sleep(1 * time.Second) // make sure all daemons are ready to accept <ide> <ide> instances := 9 <del> d1.createService(c, simpleTestService, setInstances(instances)) <add> d1.CreateService(c, simpleTestService, setInstances(instances)) <ide> <del> waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) <add> waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) <ide> <del> getContainers := func() map[string]*SwarmDaemon { <del> m := make(map[string]*SwarmDaemon) <del> for _, d := range []*SwarmDaemon{d1, d2, d3} { <del> for _, id := range d.activeContainers() { <add> getContainers := func() map[string]*daemon.Swarm { <add> m := make(map[string]*daemon.Swarm) <add> for _, d := range []*daemon.Swarm{d1, d2, d3} { <add> for _, id := range d.ActiveContainers() { <ide> m[id] = d <ide> } <ide> } <ide> func (s *DockerSwarmSuite) TestAPISwarmServicesStateReporting(c *check.C) { <ide> _, err := containers[toRemove].Cmd("stop", toRemove) <ide> c.Assert(err, checker.IsNil) <ide> <del> waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) <add> waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) <ide> <ide> containers2 := getContainers() <ide> c.Assert(containers2, checker.HasLen, instances) <ide> func (s *DockerSwarmSuite) TestAPISwarmServicesStateReporting(c *check.C) { <ide> <ide> time.Sleep(time.Second) // give some time to handle the signal <ide> <del> waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) <add> waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) <ide> <ide> containers2 = getContainers() <ide> c.Assert(containers2, checker.HasLen, instances) <ide> func (s *DockerSwarmSuite) TestAPISwarmLeaderProxy(c *check.C) { <ide> d3 := s.AddDaemon(c, true, true) <ide> <ide> // start a service by hitting each of the 3 managers <del> d1.createService(c, simpleTestService, func(s *swarm.Service) { <add> d1.CreateService(c, simpleTestService, func(s *swarm.Service) { <ide> s.Spec.Name = "test1" <ide> }) <del> d2.createService(c, simpleTestService, func(s *swarm.Service) { <add> d2.CreateService(c, simpleTestService, func(s *swarm.Service) { <ide> s.Spec.Name = "test2" <ide> }) <del> d3.createService(c, simpleTestService, func(s *swarm.Service) { <add> d3.CreateService(c, simpleTestService, func(s *swarm.Service) { <ide> s.Spec.Name = "test3" <ide> }) <ide> <ide> // 3 services should be started now, because the requests were proxied to leader <ide> // query each node and make sure it returns 3 services <del> for _, d := range []*SwarmDaemon{d1, d2, d3} { <del> services := d.listServices(c) <add> for _, d := range []*daemon.Swarm{d1, d2, d3} { <add> services := d.ListServices(c) <ide> c.Assert(services, checker.HasLen, 3) <ide> } <ide> } <ide> func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) { <ide> d3 := s.AddDaemon(c, true, true) <ide> <ide> // assert that the first node we made is the leader, and the other two are followers <del> c.Assert(d1.getNode(c, d1.NodeID).ManagerStatus.Leader, checker.True) <del> c.Assert(d1.getNode(c, d2.NodeID).ManagerStatus.Leader, checker.False) <del> c.Assert(d1.getNode(c, d3.NodeID).ManagerStatus.Leader, checker.False) <add> c.Assert(d1.GetNode(c, d1.NodeID).ManagerStatus.Leader, checker.True) <add> c.Assert(d1.GetNode(c, d2.NodeID).ManagerStatus.Leader, checker.False) <add> c.Assert(d1.GetNode(c, d3.NodeID).ManagerStatus.Leader, checker.False) <ide> <ide> d1.Stop() // stop the leader <ide> <ide> var ( <del> leader *SwarmDaemon // keep track of leader <del> followers []*SwarmDaemon // keep track of followers <add> leader *daemon.Swarm // keep track of leader <add> followers []*daemon.Swarm // keep track of followers <ide> ) <del> checkLeader := func(nodes ...*SwarmDaemon) checkF { <add> checkLeader := func(nodes ...*daemon.Swarm) checkF { <ide> return func(c *check.C) (interface{}, check.CommentInterface) { <ide> // clear these out before each run <ide> leader = nil <ide> followers = nil <ide> for _, d := range nodes { <del> if d.getNode(c, d.NodeID).ManagerStatus.Leader { <add> if d.GetNode(c, d.NodeID).ManagerStatus.Leader { <ide> leader = d <ide> } else { <ide> followers = append(followers, d) <ide> func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) { <ide> return false, check.Commentf("no leader elected") <ide> } <ide> <del> return true, check.Commentf("elected %v", leader.id) <add> return true, check.Commentf("elected %v", leader.ID()) <ide> } <ide> } <ide> <ide> func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *check.C) { <ide> d2 := s.AddDaemon(c, true, true) <ide> d3 := s.AddDaemon(c, true, true) <ide> <del> d1.createService(c, simpleTestService) <add> d1.CreateService(c, simpleTestService) <ide> <ide> c.Assert(d2.Stop(), checker.IsNil) <ide> <ide> // make sure there is a leader <del> waitAndAssert(c, defaultReconciliationTimeout, d1.checkLeader, checker.IsNil) <add> waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil) <ide> <del> d1.createService(c, simpleTestService, func(s *swarm.Service) { <add> d1.CreateService(c, simpleTestService, func(s *swarm.Service) { <ide> s.Spec.Name = "top1" <ide> }) <ide> <ide> c.Assert(d3.Stop(), checker.IsNil) <ide> <ide> // make sure there is a leader <del> waitAndAssert(c, defaultReconciliationTimeout, d1.checkLeader, checker.IsNil) <add> waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil) <ide> <ide> var service swarm.Service <ide> simpleTestService(&service) <ide> func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *check.C) { <ide> c.Assert(d2.Start(), checker.IsNil) <ide> <ide> // make sure there is a leader <del> waitAndAssert(c, defaultReconciliationTimeout, d1.checkLeader, checker.IsNil) <add> waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil) <ide> <del> d1.createService(c, simpleTestService, func(s *swarm.Service) { <add> d1.CreateService(c, simpleTestService, func(s *swarm.Service) { <ide> s.Spec.Name = "top3" <ide> }) <ide> } <ide> func (s *DockerSwarmSuite) TestAPISwarmListNodes(c *check.C) { <ide> d2 := s.AddDaemon(c, true, false) <ide> d3 := s.AddDaemon(c, true, false) <ide> <del> nodes := d1.listNodes(c) <add> nodes := d1.ListNodes(c) <ide> c.Assert(len(nodes), checker.Equals, 3, check.Commentf("nodes: %#v", nodes)) <ide> <ide> loop0: <ide> for _, n := range nodes { <del> for _, d := range []*SwarmDaemon{d1, d2, d3} { <add> for _, d := range []*daemon.Swarm{d1, d2, d3} { <ide> if n.ID == d.NodeID { <ide> continue loop0 <ide> } <ide> loop0: <ide> func (s *DockerSwarmSuite) TestAPISwarmNodeUpdate(c *check.C) { <ide> d := s.AddDaemon(c, true, true) <ide> <del> nodes := d.listNodes(c) <add> nodes := d.ListNodes(c) <ide> <del> d.updateNode(c, nodes[0].ID, func(n *swarm.Node) { <add> d.UpdateNode(c, nodes[0].ID, func(n *swarm.Node) { <ide> n.Spec.Availability = swarm.NodeAvailabilityPause <ide> }) <ide> <del> n := d.getNode(c, nodes[0].ID) <add> n := d.GetNode(c, nodes[0].ID) <ide> c.Assert(n.Spec.Availability, checker.Equals, swarm.NodeAvailabilityPause) <ide> } <ide> <ide> func (s *DockerSwarmSuite) TestAPISwarmNodeRemove(c *check.C) { <ide> d2 := s.AddDaemon(c, true, false) <ide> _ = s.AddDaemon(c, true, false) <ide> <del> nodes := d1.listNodes(c) <add> nodes := d1.ListNodes(c) <ide> c.Assert(len(nodes), checker.Equals, 3, check.Commentf("nodes: %#v", nodes)) <ide> <ide> // Getting the info so we can take the NodeID <del> d2Info, err := d2.info() <add> d2Info, err := d2.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> <ide> // forceful removal of d2 should work <del> d1.removeNode(c, d2Info.NodeID, true) <add> d1.RemoveNode(c, d2Info.NodeID, true) <ide> <del> nodes = d1.listNodes(c) <add> nodes = d1.ListNodes(c) <ide> c.Assert(len(nodes), checker.Equals, 2, check.Commentf("nodes: %#v", nodes)) <ide> <ide> // Restart the node that was removed <ide> func (s *DockerSwarmSuite) TestAPISwarmNodeRemove(c *check.C) { <ide> time.Sleep(1 * time.Second) <ide> <ide> // Make sure the node didn't rejoin <del> nodes = d1.listNodes(c) <add> nodes = d1.ListNodes(c) <ide> c.Assert(len(nodes), checker.Equals, 2, check.Commentf("nodes: %#v", nodes)) <ide> } <ide> <ide> func (s *DockerSwarmSuite) TestAPISwarmNodeDrainPause(c *check.C) { <ide> <ide> // start a service, expect balanced distribution <ide> instances := 8 <del> id := d1.createService(c, simpleTestService, setInstances(instances)) <add> id := d1.CreateService(c, simpleTestService, setInstances(instances)) <ide> <del> waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.GreaterThan, 0) <del> waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0) <del> waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) <add> waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.GreaterThan, 0) <add> waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.GreaterThan, 0) <add> waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances) <ide> <ide> // drain d2, all containers should move to d1 <del> d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { <add> d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) { <ide> n.Spec.Availability = swarm.NodeAvailabilityDrain <ide> }) <del> waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances) <del> waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 0) <add> waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances) <add> waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 0) <ide> <ide> // set d2 back to active <del> d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { <add> d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) { <ide> n.Spec.Availability = swarm.NodeAvailabilityActive <ide> }) <ide> <ide> instances = 1 <del> d1.updateService(c, d1.getService(c, id), setInstances(instances)) <add> d1.UpdateService(c, d1.GetService(c, id), setInstances(instances)) <ide> <del> waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) <add> waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances) <ide> <ide> instances = 8 <del> d1.updateService(c, d1.getService(c, id), setInstances(instances)) <add> d1.UpdateService(c, d1.GetService(c, id), setInstances(instances)) <ide> <ide> // drained node first so we don't get any old containers <del> waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0) <del> waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.GreaterThan, 0) <del> waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) <add> waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.GreaterThan, 0) <add> waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.GreaterThan, 0) <add> waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances) <ide> <del> d2ContainerCount := len(d2.activeContainers()) <add> d2ContainerCount := len(d2.ActiveContainers()) <ide> <ide> // set d2 to paused, scale service up, only d1 gets new tasks <del> d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { <add> d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) { <ide> n.Spec.Availability = swarm.NodeAvailabilityPause <ide> }) <ide> <ide> instances = 14 <del> d1.updateService(c, d1.getService(c, id), setInstances(instances)) <add> d1.UpdateService(c, d1.GetService(c, id), setInstances(instances)) <ide> <del> waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances-d2ContainerCount) <del> waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, d2ContainerCount) <add> waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances-d2ContainerCount) <add> waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, d2ContainerCount) <ide> <ide> } <ide> <ide> func (s *DockerSwarmSuite) TestAPISwarmLeaveRemovesContainer(c *check.C) { <ide> d := s.AddDaemon(c, true, true) <ide> <ide> instances := 2 <del> d.createService(c, simpleTestService, setInstances(instances)) <add> d.CreateService(c, simpleTestService, setInstances(instances)) <ide> <ide> id, err := d.Cmd("run", "-d", "busybox", "top") <ide> c.Assert(err, checker.IsNil) <ide> id = strings.TrimSpace(id) <ide> <del> waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances+1) <add> waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances+1) <ide> <ide> c.Assert(d.Leave(false), checker.NotNil) <ide> c.Assert(d.Leave(true), checker.IsNil) <ide> <del> waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) <add> waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) <ide> <ide> id2, err := d.Cmd("ps", "-q") <ide> c.Assert(err, checker.IsNil) <ide> func (s *DockerSwarmSuite) TestAPISwarmLeaveOnPendingJoin(c *check.C) { <ide> c.Assert(err, check.NotNil) <ide> c.Assert(err.Error(), checker.Contains, "Timeout was reached") <ide> <del> info, err := d2.info() <add> info, err := d2.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStatePending) <ide> <ide> c.Assert(d2.Leave(true), checker.IsNil) <ide> <del> waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 1) <add> waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 1) <ide> <ide> id2, err := d2.Cmd("ps", "-q") <ide> c.Assert(err, checker.IsNil) <ide> func (s *DockerSwarmSuite) TestAPISwarmRestoreOnPendingJoin(c *check.C) { <ide> c.Assert(err, check.NotNil) <ide> c.Assert(err.Error(), checker.Contains, "Timeout was reached") <ide> <del> waitAndAssert(c, defaultReconciliationTimeout, d.checkLocalNodeState, checker.Equals, swarm.LocalNodeStatePending) <add> waitAndAssert(c, defaultReconciliationTimeout, d.CheckLocalNodeState, checker.Equals, swarm.LocalNodeStatePending) <ide> <ide> c.Assert(d.Stop(), checker.IsNil) <ide> c.Assert(d.Start(), checker.IsNil) <ide> <del> info, err := d.info() <add> info, err := d.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) <ide> } <ide> func (s *DockerSwarmSuite) TestAPISwarmManagerRestore(c *check.C) { <ide> d1 := s.AddDaemon(c, true, true) <ide> <ide> instances := 2 <del> id := d1.createService(c, simpleTestService, setInstances(instances)) <add> id := d1.CreateService(c, simpleTestService, setInstances(instances)) <ide> <del> d1.getService(c, id) <add> d1.GetService(c, id) <ide> d1.Stop() <ide> d1.Start() <del> d1.getService(c, id) <add> d1.GetService(c, id) <ide> <ide> d2 := s.AddDaemon(c, true, true) <del> d2.getService(c, id) <add> d2.GetService(c, id) <ide> d2.Stop() <ide> d2.Start() <del> d2.getService(c, id) <add> d2.GetService(c, id) <ide> <ide> d3 := s.AddDaemon(c, true, true) <del> d3.getService(c, id) <add> d3.GetService(c, id) <ide> d3.Stop() <ide> d3.Start() <del> d3.getService(c, id) <add> d3.GetService(c, id) <ide> <ide> d3.Kill() <ide> time.Sleep(1 * time.Second) // time to handle signal <ide> d3.Start() <del> d3.getService(c, id) <add> d3.GetService(c, id) <ide> } <ide> <ide> func (s *DockerSwarmSuite) TestAPISwarmScaleNoRollingUpdate(c *check.C) { <ide> d := s.AddDaemon(c, true, true) <ide> <ide> instances := 2 <del> id := d.createService(c, simpleTestService, setInstances(instances)) <add> id := d.CreateService(c, simpleTestService, setInstances(instances)) <ide> <del> waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) <del> containers := d.activeContainers() <add> waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) <add> containers := d.ActiveContainers() <ide> instances = 4 <del> d.updateService(c, d.getService(c, id), setInstances(instances)) <del> waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) <del> containers2 := d.activeContainers() <add> d.UpdateService(c, d.GetService(c, id), setInstances(instances)) <add> waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) <add> containers2 := d.ActiveContainers() <ide> <ide> loop0: <ide> for _, c1 := range containers { <ide> func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *check.C) { <ide> d2 := s.AddDaemon(c, true, true) <ide> <ide> instances := 2 <del> id := d1.createService(c, simpleTestService, setInstances(instances)) <del> waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) <add> id := d1.CreateService(c, simpleTestService, setInstances(instances)) <add> waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances) <ide> <ide> // drain d2, all containers should move to d1 <del> d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { <add> d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) { <ide> n.Spec.Availability = swarm.NodeAvailabilityDrain <ide> }) <del> waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances) <del> waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 0) <add> waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances) <add> waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 0) <ide> <ide> c.Assert(d2.Stop(), checker.IsNil) <ide> <ide> func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *check.C) { <ide> Spec: swarm.Spec{}, <ide> }), checker.IsNil) <ide> <del> waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances) <add> waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances) <ide> <ide> d3 := s.AddDaemon(c, true, true) <del> info, err := d3.info() <add> info, err := d3.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(info.ControlAvailable, checker.True) <ide> c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) <ide> <ide> instances = 4 <del> d3.updateService(c, d3.getService(c, id), setInstances(instances)) <add> d3.UpdateService(c, d3.GetService(c, id), setInstances(instances)) <ide> <del> waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) <add> waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) <ide> } <ide> <ide> func simpleTestService(s *swarm.Service) { <ide> func serviceForUpdate(s *swarm.Service) { <ide> s.Spec.Name = "updatetest" <ide> } <ide> <del>func setInstances(replicas int) serviceConstructor { <add>func setInstances(replicas int) daemon.ServiceConstructor { <ide> ureplicas := uint64(replicas) <ide> return func(s *swarm.Service) { <ide> s.Spec.Mode = swarm.ServiceMode{ <ide> func setInstances(replicas int) serviceConstructor { <ide> } <ide> } <ide> <del>func setImage(image string) serviceConstructor { <add>func setImage(image string) daemon.ServiceConstructor { <ide> return func(s *swarm.Service) { <ide> s.Spec.TaskTemplate.ContainerSpec.Image = image <ide> } <ide> } <ide> <del>func setFailureAction(failureAction string) serviceConstructor { <add>func setFailureAction(failureAction string) daemon.ServiceConstructor { <ide> return func(s *swarm.Service) { <ide> s.Spec.UpdateConfig.FailureAction = failureAction <ide> } <ide> } <ide> <del>func setMaxFailureRatio(maxFailureRatio float32) serviceConstructor { <add>func setMaxFailureRatio(maxFailureRatio float32) daemon.ServiceConstructor { <ide> return func(s *swarm.Service) { <ide> s.Spec.UpdateConfig.MaxFailureRatio = maxFailureRatio <ide> } <ide> } <ide> <del>func setParallelism(parallelism uint64) serviceConstructor { <add>func setParallelism(parallelism uint64) daemon.ServiceConstructor { <ide> return func(s *swarm.Service) { <ide> s.Spec.UpdateConfig.Parallelism = parallelism <ide> } <ide> } <ide> <del>func setConstraints(constraints []string) serviceConstructor { <add>func setConstraints(constraints []string) daemon.ServiceConstructor { <ide> return func(s *swarm.Service) { <ide> if s.Spec.TaskTemplate.Placement == nil { <ide> s.Spec.TaskTemplate.Placement = &swarm.Placement{} <ide> func setGlobalMode(s *swarm.Service) { <ide> } <ide> } <ide> <del>func checkClusterHealth(c *check.C, cl []*SwarmDaemon, managerCount, workerCount int) { <add>func checkClusterHealth(c *check.C, cl []*daemon.Swarm, managerCount, workerCount int) { <ide> var totalMCount, totalWCount int <ide> <ide> for _, d := range cl { <ide> func checkClusterHealth(c *check.C, cl []*SwarmDaemon, managerCount, workerCount <ide> <ide> // check info in a waitAndAssert, because if the cluster doesn't have a leader, `info` will return an error <ide> checkInfo := func(c *check.C) (interface{}, check.CommentInterface) { <del> info, err = d.info() <add> info, err = d.SwarmInfo() <ide> return err, check.Commentf("cluster not ready in time") <ide> } <ide> waitAndAssert(c, defaultReconciliationTimeout, checkInfo, checker.IsNil) <ide> func checkClusterHealth(c *check.C, cl []*SwarmDaemon, managerCount, workerCount <ide> totalMCount++ <ide> var mCount, wCount int <ide> <del> for _, n := range d.listNodes(c) { <add> for _, n := range d.ListNodes(c) { <ide> waitReady := func(c *check.C) (interface{}, check.CommentInterface) { <ide> if n.Status.State == swarm.NodeStateReady { <ide> return true, nil <ide> } <del> nn := d.getNode(c, n.ID) <add> nn := d.GetNode(c, n.ID) <ide> n = *nn <ide> return n.Status.State == swarm.NodeStateReady, check.Commentf("state of node %s, reported by %s", n.ID, d.Info.NodeID) <ide> } <ide> func checkClusterHealth(c *check.C, cl []*SwarmDaemon, managerCount, workerCount <ide> if n.Spec.Availability == swarm.NodeAvailabilityActive { <ide> return true, nil <ide> } <del> nn := d.getNode(c, n.ID) <add> nn := d.GetNode(c, n.ID) <ide> n = *nn <ide> return n.Spec.Availability == swarm.NodeAvailabilityActive, check.Commentf("availability of node %s, reported by %s", n.ID, d.Info.NodeID) <ide> } <ide> func checkClusterHealth(c *check.C, cl []*SwarmDaemon, managerCount, workerCount <ide> func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) { <ide> mCount, wCount := 5, 1 <ide> <del> var nodes []*SwarmDaemon <add> var nodes []*daemon.Swarm <ide> for i := 0; i < mCount; i++ { <ide> manager := s.AddDaemon(c, true, true) <del> info, err := manager.info() <add> info, err := manager.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(info.ControlAvailable, checker.True) <ide> c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) <ide> func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) { <ide> <ide> for i := 0; i < wCount; i++ { <ide> worker := s.AddDaemon(c, true, false) <del> info, err := worker.info() <add> info, err := worker.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(info.ControlAvailable, checker.False) <ide> c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) <ide> func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) { <ide> errs := make(chan error, len(nodes)) <ide> <ide> for _, d := range nodes { <del> go func(daemon *SwarmDaemon) { <add> go func(daemon *daemon.Swarm) { <ide> defer wg.Done() <ide> if err := daemon.Stop(); err != nil { <ide> errs <- err <ide> } <add> // FIXME(vdemeester) This is duplicated… <ide> if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { <del> daemon.root = filepath.Dir(daemon.root) <add> daemon.Root = filepath.Dir(daemon.Root) <ide> } <ide> }(d) <ide> } <ide> func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) { <ide> errs := make(chan error, len(nodes)) <ide> <ide> for _, d := range nodes { <del> go func(daemon *SwarmDaemon) { <add> go func(daemon *daemon.Swarm) { <ide> defer wg.Done() <ide> if err := daemon.Start("--iptables=false"); err != nil { <ide> errs <- err <ide> func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateWithName(c *check.C) { <ide> d := s.AddDaemon(c, true, true) <ide> <ide> instances := 2 <del> id := d.createService(c, simpleTestService, setInstances(instances)) <del> waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) <add> id := d.CreateService(c, simpleTestService, setInstances(instances)) <add> waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) <ide> <del> service := d.getService(c, id) <add> service := d.GetService(c, id) <ide> instances = 5 <ide> <ide> setInstances(instances)(service) <ide> url := fmt.Sprintf("/services/%s/update?version=%d", service.Spec.Name, service.Version.Index) <ide> status, out, err := d.SockRequest("POST", url, service.Spec) <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) <del> waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) <add> waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) <ide> } <ide> <ide> func (s *DockerSwarmSuite) TestAPISwarmSecretsEmptyList(c *check.C) { <ide> d := s.AddDaemon(c, true, true) <ide> <del> secrets := d.listSecrets(c) <add> secrets := d.ListSecrets(c) <ide> c.Assert(secrets, checker.NotNil) <ide> c.Assert(len(secrets), checker.Equals, 0, check.Commentf("secrets: %#v", secrets)) <ide> } <ide> func (s *DockerSwarmSuite) TestAPISwarmSecretsCreate(c *check.C) { <ide> d := s.AddDaemon(c, true, true) <ide> <ide> testName := "test_secret" <del> id := d.createSecret(c, swarm.SecretSpec{ <add> id := d.CreateSecret(c, swarm.SecretSpec{ <ide> swarm.Annotations{ <ide> Name: testName, <ide> }, <ide> []byte("TESTINGDATA"), <ide> }) <ide> c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) <ide> <del> secrets := d.listSecrets(c) <add> secrets := d.ListSecrets(c) <ide> c.Assert(len(secrets), checker.Equals, 1, check.Commentf("secrets: %#v", secrets)) <ide> name := secrets[0].Spec.Annotations.Name <ide> c.Assert(name, checker.Equals, testName, check.Commentf("secret: %s", name)) <ide> func (s *DockerSwarmSuite) TestAPISwarmSecretsDelete(c *check.C) { <ide> d := s.AddDaemon(c, true, true) <ide> <ide> testName := "test_secret" <del> id := d.createSecret(c, swarm.SecretSpec{ <add> id := d.CreateSecret(c, swarm.SecretSpec{ <ide> swarm.Annotations{ <ide> Name: testName, <ide> }, <ide> []byte("TESTINGDATA"), <ide> }) <ide> c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) <ide> <del> secret := d.getSecret(c, id) <add> secret := d.GetSecret(c, id) <ide> c.Assert(secret.ID, checker.Equals, id, check.Commentf("secret: %v", secret)) <ide> <del> d.deleteSecret(c, secret.ID) <add> d.DeleteSecret(c, secret.ID) <ide> status, out, err := d.SockRequest("GET", "/secrets/"+id, nil) <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(status, checker.Equals, http.StatusNotFound, check.Commentf("secret delete: %s", string(out))) <ide><path>integration-cli/docker_api_test.go <ide> import ( <ide> "strings" <ide> <ide> "github.com/docker/docker/api" <add> "github.com/docker/docker/pkg/integration" <ide> "github.com/docker/docker/pkg/integration/checker" <ide> icmd "github.com/docker/docker/pkg/integration/cmd" <ide> "github.com/go-check/check" <ide> func (s *DockerSuite) TestAPIErrorJSON(c *check.C) { <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(httpResp.StatusCode, checker.Equals, http.StatusInternalServerError) <ide> c.Assert(httpResp.Header.Get("Content-Type"), checker.Equals, "application/json") <del> b, err := readBody(body) <add> b, err := integration.ReadBody(body) <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(getErrorMessage(c, b), checker.Equals, "Config cannot be empty in order to create a container") <ide> } <ide> func (s *DockerSuite) TestAPIErrorPlainText(c *check.C) { <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(httpResp.StatusCode, checker.Equals, http.StatusInternalServerError) <ide> c.Assert(httpResp.Header.Get("Content-Type"), checker.Contains, "text/plain") <del> b, err := readBody(body) <add> b, err := integration.ReadBody(body) <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(strings.TrimSpace(string(b)), checker.Equals, "Config cannot be empty in order to create a container") <ide> } <ide> func (s *DockerSuite) TestAPIErrorNotFoundJSON(c *check.C) { <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(httpResp.StatusCode, checker.Equals, http.StatusNotFound) <ide> c.Assert(httpResp.Header.Get("Content-Type"), checker.Equals, "application/json") <del> b, err := readBody(body) <add> b, err := integration.ReadBody(body) <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(getErrorMessage(c, b), checker.Equals, "page not found") <ide> } <ide> func (s *DockerSuite) TestAPIErrorNotFoundPlainText(c *check.C) { <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(httpResp.StatusCode, checker.Equals, http.StatusNotFound) <ide> c.Assert(httpResp.Header.Get("Content-Type"), checker.Contains, "text/plain") <del> b, err := readBody(body) <add> b, err := integration.ReadBody(body) <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(strings.TrimSpace(string(b)), checker.Equals, "page not found") <ide> } <ide><path>integration-cli/docker_cli_authz_plugin_v2_test.go <ide> import ( <ide> "fmt" <ide> "strings" <ide> <add> "github.com/docker/docker/integration-cli/daemon" <ide> "github.com/docker/docker/pkg/integration/checker" <ide> "github.com/go-check/check" <ide> ) <ide> func init() { <ide> <ide> type DockerAuthzV2Suite struct { <ide> ds *DockerSuite <del> d *Daemon <add> d *daemon.Daemon <ide> } <ide> <ide> func (s *DockerAuthzV2Suite) SetUpTest(c *check.C) { <ide> testRequires(c, DaemonIsLinux, Network) <del> s.d = NewDaemon(c) <add> s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ <add> Experimental: experimentalDaemon, <add> }) <ide> c.Assert(s.d.Start(), check.IsNil) <ide> } <ide> <ide><path>integration-cli/docker_cli_authz_unix_test.go <ide> import ( <ide> "net/http/httputil" <ide> "net/url" <ide> <add> "github.com/docker/docker/integration-cli/daemon" <ide> "github.com/docker/docker/pkg/authorization" <ide> "github.com/docker/docker/pkg/integration/checker" <ide> "github.com/docker/docker/pkg/plugins" <ide> func init() { <ide> type DockerAuthzSuite struct { <ide> server *httptest.Server <ide> ds *DockerSuite <del> d *Daemon <add> d *daemon.Daemon <ide> ctrl *authorizationController <ide> } <ide> <ide> type authorizationController struct { <ide> } <ide> <ide> func (s *DockerAuthzSuite) SetUpTest(c *check.C) { <del> s.d = NewDaemon(c) <add> s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ <add> Experimental: experimentalDaemon, <add> }) <ide> s.ctrl = &authorizationController{} <ide> } <ide> <ide> func (s *DockerAuthzSuite) TestAuthZPluginAPIDenyResponse(c *check.C) { <ide> s.ctrl.reqRes.Allow = false <ide> s.ctrl.resRes.Msg = unauthorizedMessage <ide> <del> daemonURL, err := url.Parse(s.d.sock()) <add> daemonURL, err := url.Parse(s.d.Sock()) <ide> <ide> conn, err := net.DialTimeout(daemonURL.Scheme, daemonURL.Path, time.Second*10) <ide> c.Assert(err, check.IsNil) <ide> func (s *DockerAuthzSuite) TestAuthZPluginAllowEventStream(c *check.C) { <ide> <ide> startTime := strconv.FormatInt(daemonTime(c).Unix(), 10) <ide> // Add another command to to enable event pipelining <del> eventsCmd := exec.Command(dockerBinary, "--host", s.d.sock(), "events", "--since", startTime) <add> eventsCmd := exec.Command(dockerBinary, "--host", s.d.Sock(), "events", "--since", startTime) <ide> stdout, err := eventsCmd.StdoutPipe() <ide> if err != nil { <ide> c.Assert(err, check.IsNil) <ide> func (s *DockerAuthzSuite) TestAuthZPluginAllowEventStream(c *check.C) { <ide> out, err := s.d.Cmd("run", "-d", "busybox", "top") <ide> c.Assert(err, check.IsNil, check.Commentf(out)) <ide> containerID := strings.TrimSpace(out) <del> c.Assert(s.d.waitRun(containerID), checker.IsNil) <add> c.Assert(s.d.WaitRun(containerID), checker.IsNil) <ide> <ide> events := map[string]chan bool{ <ide> "create": make(chan bool, 1), <ide> func (s *DockerAuthzSuite) TestAuthZPluginHeader(c *check.C) { <ide> s.ctrl.resRes.Allow = true <ide> c.Assert(s.d.LoadBusybox(), check.IsNil) <ide> <del> daemonURL, err := url.Parse(s.d.sock()) <add> daemonURL, err := url.Parse(s.d.Sock()) <ide> <ide> conn, err := net.DialTimeout(daemonURL.Scheme, daemonURL.Path, time.Second*10) <ide> c.Assert(err, check.IsNil) <ide><path>integration-cli/docker_cli_by_digest_test.go <ide> func (s *DockerRegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) { <ide> // digest verification for the target layer digest. <ide> <ide> // Remove distribution cache to force a re-pull of the blobs <del> if err := os.RemoveAll(filepath.Join(dockerBasePath, "image", s.d.storageDriver, "distribution")); err != nil { <add> if err := os.RemoveAll(filepath.Join(dockerBasePath, "image", s.d.StorageDriver(), "distribution")); err != nil { <ide> c.Fatalf("error clearing distribution cache: %v", err) <ide> } <ide> <ide> func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) { <ide> // digest verification for the target layer digest. <ide> <ide> // Remove distribution cache to force a re-pull of the blobs <del> if err := os.RemoveAll(filepath.Join(dockerBasePath, "image", s.d.storageDriver, "distribution")); err != nil { <add> if err := os.RemoveAll(filepath.Join(dockerBasePath, "image", s.d.StorageDriver(), "distribution")); err != nil { <ide> c.Fatalf("error clearing distribution cache: %v", err) <ide> } <ide> <ide><path>integration-cli/docker_cli_daemon_plugins_test.go <ide> func (s *DockerDaemonSuite) TestDaemonShutdownLiveRestoreWithPlugins(c *check.C) <ide> } <ide> }() <ide> <del> if err := s.d.cmd.Process.Signal(os.Interrupt); err != nil { <add> if err := s.d.Interrupt(); err != nil { <ide> c.Fatalf("Could not kill daemon: %v", err) <ide> } <ide> <ide> func (s *DockerDaemonSuite) TestDaemonShutdownWithPlugins(c *check.C) { <ide> } <ide> }() <ide> <del> if err := s.d.cmd.Process.Signal(os.Interrupt); err != nil { <add> if err := s.d.Interrupt(); err != nil { <ide> c.Fatalf("Could not kill daemon: %v", err) <ide> } <ide> <ide> for { <del> if err := syscall.Kill(s.d.cmd.Process.Pid, 0); err == syscall.ESRCH { <add> if err := syscall.Kill(s.d.Pid(), 0); err == syscall.ESRCH { <ide> break <ide> } <ide> } <ide><path>integration-cli/docker_cli_daemon_test.go <ide> import ( <ide> "syscall" <ide> "time" <ide> <add> "github.com/docker/docker/integration-cli/daemon" <ide> "github.com/docker/docker/pkg/integration/checker" <ide> icmd "github.com/docker/docker/pkg/integration/cmd" <ide> "github.com/docker/docker/pkg/mount" <ide> func (s *DockerDaemonSuite) TestDaemonRestartOnFailure(c *check.C) { <ide> c.Assert(err, check.IsNil, check.Commentf("run top1: %v", out)) <ide> <ide> // wait test1 to stop <del> hostArgs := []string{"--host", s.d.sock()} <add> hostArgs := []string{"--host", s.d.Sock()} <ide> err = waitInspectWithArgs("test1", "{{.State.Running}} {{.State.Restarting}}", "false false", 10*time.Second, hostArgs...) <ide> c.Assert(err, checker.IsNil, check.Commentf("test1 should exit but not")) <ide> <ide> func (s *DockerDaemonSuite) TestDaemonRestartWithInvalidBasesize(c *check.C) { <ide> testRequires(c, Devicemapper) <ide> c.Assert(s.d.Start(), check.IsNil) <ide> <del> oldBasesizeBytes := s.d.getBaseDeviceSize(c) <add> oldBasesizeBytes := s.d.GetBaseDeviceSize(c) <ide> var newBasesizeBytes int64 = 1073741824 //1GB in bytes <ide> <ide> if newBasesizeBytes < oldBasesizeBytes { <ide> func (s *DockerDaemonSuite) TestDaemonRestartWithIncreasedBasesize(c *check.C) { <ide> testRequires(c, Devicemapper) <ide> c.Assert(s.d.Start(), check.IsNil) <ide> <del> oldBasesizeBytes := s.d.getBaseDeviceSize(c) <add> oldBasesizeBytes := s.d.GetBaseDeviceSize(c) <ide> <ide> var newBasesizeBytes int64 = 53687091200 //50GB in bytes <ide> <ide> func (s *DockerDaemonSuite) TestDaemonRestartWithIncreasedBasesize(c *check.C) { <ide> err := s.d.Restart("--storage-opt", fmt.Sprintf("dm.basesize=%d", newBasesizeBytes)) <ide> c.Assert(err, check.IsNil, check.Commentf("we should have been able to start the daemon with increased base device size: %v", err)) <ide> <del> basesizeAfterRestart := s.d.getBaseDeviceSize(c) <add> basesizeAfterRestart := s.d.GetBaseDeviceSize(c) <ide> newBasesize, err := convertBasesize(newBasesizeBytes) <ide> c.Assert(err, check.IsNil, check.Commentf("Error in converting base device size: %v", err)) <ide> c.Assert(newBasesize, check.Equals, basesizeAfterRestart, check.Commentf("Basesize passed is not equal to Basesize set")) <ide> func (s *DockerDaemonSuite) TestDaemonLogLevelDebug(c *check.C) { <ide> if err := s.d.Start("--log-level=debug"); err != nil { <ide> c.Fatal(err) <ide> } <del> content, _ := ioutil.ReadFile(s.d.logFile.Name()) <add> content, err := s.d.ReadLogFile() <add> c.Assert(err, checker.IsNil) <ide> if !strings.Contains(string(content), `level=debug`) { <ide> c.Fatalf(`Missing level="debug" in log file:\n%s`, string(content)) <ide> } <ide> func (s *DockerDaemonSuite) TestDaemonLogLevelFatal(c *check.C) { <ide> if err := s.d.Start("--log-level=fatal"); err != nil { <ide> c.Fatal(err) <ide> } <del> content, _ := ioutil.ReadFile(s.d.logFile.Name()) <add> content, err := s.d.ReadLogFile() <add> c.Assert(err, checker.IsNil) <ide> if strings.Contains(string(content), `level=debug`) { <ide> c.Fatalf(`Should not have level="debug" in log file:\n%s`, string(content)) <ide> } <ide> func (s *DockerDaemonSuite) TestDaemonFlagD(c *check.C) { <ide> if err := s.d.Start("-D"); err != nil { <ide> c.Fatal(err) <ide> } <del> content, _ := ioutil.ReadFile(s.d.logFile.Name()) <add> content, err := s.d.ReadLogFile() <add> c.Assert(err, checker.IsNil) <ide> if !strings.Contains(string(content), `level=debug`) { <ide> c.Fatalf(`Should have level="debug" in log file using -D:\n%s`, string(content)) <ide> } <ide> func (s *DockerDaemonSuite) TestDaemonFlagDebug(c *check.C) { <ide> if err := s.d.Start("--debug"); err != nil { <ide> c.Fatal(err) <ide> } <del> content, _ := ioutil.ReadFile(s.d.logFile.Name()) <add> content, err := s.d.ReadLogFile() <add> c.Assert(err, checker.IsNil) <ide> if !strings.Contains(string(content), `level=debug`) { <ide> c.Fatalf(`Should have level="debug" in log file using --debug:\n%s`, string(content)) <ide> } <ide> func (s *DockerDaemonSuite) TestDaemonFlagDebugLogLevelFatal(c *check.C) { <ide> if err := s.d.Start("--debug", "--log-level=fatal"); err != nil { <ide> c.Fatal(err) <ide> } <del> content, _ := ioutil.ReadFile(s.d.logFile.Name()) <add> content, err := s.d.ReadLogFile() <add> c.Assert(err, checker.IsNil) <ide> if !strings.Contains(string(content), `level=debug`) { <ide> c.Fatalf(`Should have level="debug" in log file when using both --debug and --log-level=fatal:\n%s`, string(content)) <ide> } <ide> func (s *DockerDaemonSuite) TestDaemonBridgeExternal(c *check.C) { <ide> _, err = d.Cmd("run", "-d", "--name", "ExtContainer", "busybox", "top") <ide> c.Assert(err, check.IsNil) <ide> <del> containerIP := d.findContainerIP("ExtContainer") <add> containerIP := d.FindContainerIP("ExtContainer") <ide> ip := net.ParseIP(containerIP) <ide> c.Assert(bridgeIPNet.Contains(ip), check.Equals, true, <ide> check.Commentf("Container IP-Address must be in the same subnet range : %s", <ide> func (s *DockerDaemonSuite) TestDaemonBridgeIP(c *check.C) { <ide> out, err = d.Cmd("run", "-d", "--name", "test", "busybox", "top") <ide> c.Assert(err, check.IsNil) <ide> <del> containerIP := d.findContainerIP("test") <add> containerIP := d.FindContainerIP("test") <ide> ip = net.ParseIP(containerIP) <ide> c.Assert(bridgeIPNet.Contains(ip), check.Equals, true, <ide> check.Commentf("Container IP-Address must be in the same subnet range : %s", <ide> func (s *DockerDaemonSuite) TestDaemonLinksIpTablesRulesWhenLinkAndUnlink(c *che <ide> _, err = s.d.Cmd("run", "-d", "--name", "parent", "--link", "child:http", "busybox", "top") <ide> c.Assert(err, check.IsNil) <ide> <del> childIP := s.d.findContainerIP("child") <del> parentIP := s.d.findContainerIP("parent") <add> childIP := s.d.FindContainerIP("child") <add> parentIP := s.d.FindContainerIP("parent") <ide> <ide> sourceRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", childIP, "--sport", "80", "-d", parentIP, "-j", "ACCEPT"} <ide> destinationRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", parentIP, "--dport", "80", "-d", childIP, "-j", "ACCEPT"} <ide> func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefault(c *check.C) { <ide> <ide> out, err := s.d.Cmd("run", "--name=test", "busybox", "echo", "testline") <ide> c.Assert(err, check.IsNil, check.Commentf(out)) <del> id, err := s.d.getIDByName("test") <add> id, err := s.d.GetIDByName("test") <ide> c.Assert(err, check.IsNil) <ide> <del> logPath := filepath.Join(s.d.root, "containers", id, id+"-json.log") <add> logPath := filepath.Join(s.d.Root, "containers", id, id+"-json.log") <ide> <ide> if _, err := os.Stat(logPath); err != nil { <ide> c.Fatal(err) <ide> func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefaultOverride(c *check.C) { <ide> if err != nil { <ide> c.Fatal(out, err) <ide> } <del> id, err := s.d.getIDByName("test") <add> id, err := s.d.GetIDByName("test") <ide> c.Assert(err, check.IsNil) <ide> <del> logPath := filepath.Join(s.d.root, "containers", id, id+"-json.log") <add> logPath := filepath.Join(s.d.Root, "containers", id, id+"-json.log") <ide> <ide> if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) { <ide> c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err) <ide> func (s *DockerDaemonSuite) TestDaemonLoggingDriverNone(c *check.C) { <ide> if err != nil { <ide> c.Fatal(out, err) <ide> } <del> id, err := s.d.getIDByName("test") <add> id, err := s.d.GetIDByName("test") <ide> c.Assert(err, check.IsNil) <ide> <del> logPath := filepath.Join(s.d.folder, "graph", "containers", id, id+"-json.log") <add> logPath := filepath.Join(s.d.Root, "containers", id, id+"-json.log") <ide> <ide> if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) { <ide> c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err) <ide> func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneOverride(c *check.C) { <ide> if err != nil { <ide> c.Fatal(out, err) <ide> } <del> id, err := s.d.getIDByName("test") <add> id, err := s.d.GetIDByName("test") <ide> c.Assert(err, check.IsNil) <ide> <del> logPath := filepath.Join(s.d.root, "containers", id, id+"-json.log") <add> logPath := filepath.Join(s.d.Root, "containers", id, id+"-json.log") <ide> <ide> if _, err := os.Stat(logPath); err != nil { <ide> c.Fatal(err) <ide> func (s *DockerDaemonSuite) TestDaemonWithWrongkey(c *check.C) { <ide> c.Fatalf("It should not be successful to start daemon with wrong key: %v", err) <ide> } <ide> <del> content, _ := ioutil.ReadFile(s.d.logFile.Name()) <add> content, err := s.d.ReadLogFile() <add> c.Assert(err, checker.IsNil) <ide> <ide> if !strings.Contains(string(content), "Public Key ID does not match") { <ide> c.Fatal("Missing KeyID message from daemon logs") <ide> func (s *DockerDaemonSuite) TestHTTPSInfoRogueServerCert(c *check.C) { <ide> } <ide> } <ide> <del>func pingContainers(c *check.C, d *Daemon, expectFailure bool) { <add>func pingContainers(c *check.C, d *daemon.Daemon, expectFailure bool) { <ide> var dargs []string <ide> if d != nil { <del> dargs = []string{"--host", d.sock()} <add> dargs = []string{"--host", d.Sock()} <ide> } <ide> <ide> args := append(dargs, "run", "-d", "--name", "container1", "busybox", "top") <ide> func pingContainers(c *check.C, d *Daemon, expectFailure bool) { <ide> func (s *DockerDaemonSuite) TestDaemonRestartWithSocketAsVolume(c *check.C) { <ide> c.Assert(s.d.StartWithBusybox(), check.IsNil) <ide> <del> socket := filepath.Join(s.d.folder, "docker.sock") <add> // socket := filepath.Join(s.d.folder, "docker.sock") <add> socket := s.d.Sock() <ide> <ide> out, err := s.d.Cmd("run", "--restart=always", "-v", socket+":/sock", "busybox") <ide> c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) <ide> func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonAndContainerKill(c *chec <ide> out, err := s.d.Cmd("run", "-d", "busybox", "top") <ide> c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) <ide> id := strings.TrimSpace(out) <del> c.Assert(s.d.cmd.Process.Signal(os.Kill), check.IsNil) <add> c.Assert(s.d.Signal(os.Kill), check.IsNil) <ide> mountOut, err := ioutil.ReadFile("/proc/self/mountinfo") <ide> c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) <ide> <ide> // container mounts should exist even after daemon has crashed. <del> comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) <add> comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.Root, mountOut) <ide> c.Assert(strings.Contains(string(mountOut), id), check.Equals, true, comment) <ide> <ide> // kill the container <ide> func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonAndContainerKill(c *chec <ide> // Now, container mounts should be gone. <ide> mountOut, err = ioutil.ReadFile("/proc/self/mountinfo") <ide> c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) <del> comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) <add> comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.Root, mountOut) <ide> c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) <ide> } <ide> <ide> func (s *DockerDaemonSuite) TestCleanupMountsAfterGracefulShutdown(c *check.C) { <ide> id := strings.TrimSpace(out) <ide> <ide> // Send SIGINT and daemon should clean up <del> c.Assert(s.d.cmd.Process.Signal(os.Interrupt), check.IsNil) <add> c.Assert(s.d.Signal(os.Interrupt), check.IsNil) <ide> // Wait for the daemon to stop. <del> c.Assert(<-s.d.wait, checker.IsNil) <add> c.Assert(<-s.d.Wait, checker.IsNil) <ide> <ide> mountOut, err := ioutil.ReadFile("/proc/self/mountinfo") <ide> c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) <ide> <del> comment := check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) <add> comment := check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.Root, mountOut) <ide> c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) <ide> } <ide> <ide> func (s *DockerDaemonSuite) TestDaemonCorruptedFluentdAddress(c *check.C) { <ide> } <ide> } <ide> <add>// FIXME(vdemeester) Use a new daemon instance instead of the Suite one <ide> func (s *DockerDaemonSuite) TestDaemonStartWithoutHost(c *check.C) { <del> s.d.useDefaultHost = true <add> s.d.UseDefaultHost = true <ide> defer func() { <del> s.d.useDefaultHost = false <add> s.d.UseDefaultHost = false <ide> }() <ide> c.Assert(s.d.Start(), check.IsNil) <ide> } <ide> <add>// FIXME(vdemeester) Use a new daemon instance instead of the Suite one <ide> func (s *DockerDaemonSuite) TestDaemonStartWithDefalutTLSHost(c *check.C) { <del> s.d.useDefaultTLSHost = true <add> s.d.UseDefaultTLSHost = true <ide> defer func() { <del> s.d.useDefaultTLSHost = false <add> s.d.UseDefaultTLSHost = false <ide> }() <ide> if err := s.d.Start( <ide> "--tlsverify", <ide> func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonCrash(c *check.C) { <ide> c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) <ide> id := strings.TrimSpace(out) <ide> <del> c.Assert(s.d.cmd.Process.Signal(os.Kill), check.IsNil) <add> c.Assert(s.d.Signal(os.Kill), check.IsNil) <ide> mountOut, err := ioutil.ReadFile("/proc/self/mountinfo") <ide> c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) <ide> <ide> // container mounts should exist even after daemon has crashed. <del> comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) <add> comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.Root, mountOut) <ide> c.Assert(strings.Contains(string(mountOut), id), check.Equals, true, comment) <ide> <ide> // restart daemon. <ide> func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonCrash(c *check.C) { <ide> // Now, container mounts should be gone. <ide> mountOut, err = ioutil.ReadFile("/proc/self/mountinfo") <ide> c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) <del> comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) <add> comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.Root, mountOut) <ide> c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) <ide> } <ide> <ide> func (s *DockerDaemonSuite) TestDaemonDiscoveryBackendConfigReload(c *check.C) { <ide> _, err = configFile.Write([]byte(daemonConfig)) <ide> c.Assert(err, checker.IsNil) <ide> <del> err = s.d.reloadConfig() <add> err = s.d.ReloadConfig() <ide> c.Assert(err, checker.IsNil, check.Commentf("error reloading daemon config")) <ide> <ide> out, err := s.d.Cmd("info") <ide> func (s *DockerDaemonSuite) TestDaemonMaxConcurrency(c *check.C) { <ide> <ide> expectedMaxConcurrentUploads := `level=debug msg="Max Concurrent Uploads: 6"` <ide> expectedMaxConcurrentDownloads := `level=debug msg="Max Concurrent Downloads: 8"` <del> content, _ := ioutil.ReadFile(s.d.logFile.Name()) <add> content, err := s.d.ReadLogFile() <add> c.Assert(err, checker.IsNil) <ide> c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) <ide> c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) <ide> } <ide> func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFile(c *check.C) { <ide> <ide> expectedMaxConcurrentUploads := `level=debug msg="Max Concurrent Uploads: 5"` <ide> expectedMaxConcurrentDownloads := `level=debug msg="Max Concurrent Downloads: 8"` <del> content, _ := ioutil.ReadFile(s.d.logFile.Name()) <add> content, err := s.d.ReadLogFile() <add> c.Assert(err, checker.IsNil) <ide> c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) <ide> c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) <ide> <ide> func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFile(c *check.C) { <ide> fmt.Fprintf(configFile, "%s", daemonConfig) <ide> configFile.Close() <ide> <del> syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) <add> c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) <add> // syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) <ide> <ide> time.Sleep(3 * time.Second) <ide> <ide> expectedMaxConcurrentUploads = `level=debug msg="Reset Max Concurrent Uploads: 7"` <ide> expectedMaxConcurrentDownloads = `level=debug msg="Reset Max Concurrent Downloads: 9"` <del> content, _ = ioutil.ReadFile(s.d.logFile.Name()) <add> content, err = s.d.ReadLogFile() <add> c.Assert(err, checker.IsNil) <ide> c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) <ide> c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) <ide> } <ide> func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFileReload(c *chec <ide> <ide> expectedMaxConcurrentUploads := `level=debug msg="Max Concurrent Uploads: 5"` <ide> expectedMaxConcurrentDownloads := `level=debug msg="Max Concurrent Downloads: 3"` <del> content, _ := ioutil.ReadFile(s.d.logFile.Name()) <add> content, err := s.d.ReadLogFile() <add> c.Assert(err, checker.IsNil) <ide> c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) <ide> c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) <ide> <ide> func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFileReload(c *chec <ide> fmt.Fprintf(configFile, "%s", daemonConfig) <ide> configFile.Close() <ide> <del> syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) <add> c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) <add> // syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) <ide> <ide> time.Sleep(3 * time.Second) <ide> <ide> expectedMaxConcurrentUploads = `level=debug msg="Reset Max Concurrent Uploads: 1"` <ide> expectedMaxConcurrentDownloads = `level=debug msg="Reset Max Concurrent Downloads: 3"` <del> content, _ = ioutil.ReadFile(s.d.logFile.Name()) <add> content, err = s.d.ReadLogFile() <add> c.Assert(err, checker.IsNil) <ide> c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) <ide> c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) <ide> <ide> func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFileReload(c *chec <ide> fmt.Fprintf(configFile, "%s", daemonConfig) <ide> configFile.Close() <ide> <del> syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) <add> c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) <ide> <ide> time.Sleep(3 * time.Second) <ide> <ide> expectedMaxConcurrentUploads = `level=debug msg="Reset Max Concurrent Uploads: 5"` <ide> expectedMaxConcurrentDownloads = `level=debug msg="Reset Max Concurrent Downloads: 3"` <del> content, _ = ioutil.ReadFile(s.d.logFile.Name()) <add> content, err = s.d.ReadLogFile() <add> c.Assert(err, checker.IsNil) <ide> c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) <ide> c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) <ide> } <ide> <ide> func (s *DockerDaemonSuite) TestBuildOnDisabledBridgeNetworkDaemon(c *check.C) { <ide> err := s.d.StartWithBusybox("-b=none", "--iptables=false") <ide> c.Assert(err, check.IsNil) <del> s.d.c.Logf("dockerBinary %s", dockerBinary) <del> out, code, err := s.d.buildImageWithOut("busyboxs", <add> // s.d.c.Logf("dockerBinary %s", dockerBinary) <add> c.Logf("dockerBinary %s", dockerBinary) <add> out, code, err := s.d.BuildImageWithOut("busyboxs", <ide> `FROM busybox <ide> RUN cat /etc/hosts`, false) <ide> comment := check.Commentf("Failed to build image. output %s, exitCode %d, err %v", out, code, err) <ide> func (s *DockerDaemonSuite) TestRunWithRuntimeFromConfigFile(c *check.C) { <ide> } <ide> ` <ide> ioutil.WriteFile(configName, []byte(config), 0644) <del> syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) <add> c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) <ide> // Give daemon time to reload config <ide> <-time.After(1 * time.Second) <ide> <ide> func (s *DockerDaemonSuite) TestRunWithRuntimeFromConfigFile(c *check.C) { <ide> } <ide> ` <ide> ioutil.WriteFile(configName, []byte(config), 0644) <del> syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) <add> c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) <ide> // Give daemon time to reload config <ide> <-time.After(1 * time.Second) <ide> <del> content, _ := ioutil.ReadFile(s.d.logFile.Name()) <add> content, err := s.d.ReadLogFile() <add> c.Assert(err, checker.IsNil) <ide> c.Assert(string(content), checker.Contains, `file configuration validation failed (runtime name 'runc' is reserved)`) <ide> <ide> // Check that we can select a default runtime <ide> func (s *DockerDaemonSuite) TestRunWithRuntimeFromConfigFile(c *check.C) { <ide> } <ide> ` <ide> ioutil.WriteFile(configName, []byte(config), 0644) <del> syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) <add> c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) <ide> // Give daemon time to reload config <ide> <-time.After(1 * time.Second) <ide> <ide> func (s *DockerDaemonSuite) TestRunWithRuntimeFromCommandLine(c *check.C) { <ide> err = s.d.Start("--add-runtime", "runc=my-runc") <ide> c.Assert(err, check.NotNil) <ide> <del> content, _ := ioutil.ReadFile(s.d.logFile.Name()) <add> content, err := s.d.ReadLogFile() <add> c.Assert(err, checker.IsNil) <ide> c.Assert(string(content), checker.Contains, `runtime name 'runc' is reserved`) <ide> <ide> // Check that we can select a default runtime <ide> func (s *DockerDaemonSuite) TestDaemonBackcompatPre17Volumes(c *check.C) { <ide> out, err = d.Cmd("inspect", "--type=image", "--format={{.ID}}", "busybox:latest") <ide> c.Assert(err, checker.IsNil, check.Commentf(out)) <ide> c.Assert(d.Stop(), checker.IsNil) <del> <-d.wait <add> <-d.Wait <ide> <ide> imageID := strings.TrimSpace(out) <ide> volumeID := stringid.GenerateNonCryptoID() <del> vfsPath := filepath.Join(d.root, "vfs", "dir", volumeID) <add> vfsPath := filepath.Join(d.Root, "vfs", "dir", volumeID) <ide> c.Assert(os.MkdirAll(vfsPath, 0755), checker.IsNil) <ide> <ide> config := []byte(` <ide> { <ide> "ID": "` + id + `", <ide> "Name": "hello", <del> "Driver": "` + d.storageDriver + `", <add> "Driver": "` + d.StorageDriver() + `", <ide> "Image": "` + imageID + `", <ide> "Config": {"Image": "busybox:latest"}, <ide> "NetworkSettings": {}, <ide> func (s *DockerDaemonSuite) TestDaemonBackcompatPre17Volumes(c *check.C) { <ide> } <ide> `) <ide> <del> configPath := filepath.Join(d.root, "containers", id, "config.v2.json") <add> configPath := filepath.Join(d.Root, "containers", id, "config.v2.json") <ide> err = ioutil.WriteFile(configPath, config, 600) <ide> err = d.Start() <ide> c.Assert(err, checker.IsNil) <ide> func (s *DockerDaemonSuite) TestDaemonShutdownTimeout(c *check.C) { <ide> _, err := s.d.Cmd("run", "-d", "busybox", "top") <ide> c.Assert(err, check.IsNil) <ide> <del> syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGINT) <add> c.Assert(s.d.Signal(syscall.SIGINT), checker.IsNil) <ide> <ide> select { <del> case <-s.d.wait: <add> case <-s.d.Wait: <ide> case <-time.After(5 * time.Second): <ide> } <ide> <ide> expectedMessage := `level=debug msg="start clean shutdown of all containers with a 3 seconds timeout..."` <del> content, _ := ioutil.ReadFile(s.d.logFile.Name()) <add> content, err := s.d.ReadLogFile() <add> c.Assert(err, checker.IsNil) <ide> c.Assert(string(content), checker.Contains, expectedMessage) <ide> } <ide> <ide> func (s *DockerDaemonSuite) TestDaemonShutdownTimeoutWithConfigFile(c *check.C) <ide> fmt.Fprintf(configFile, "%s", daemonConfig) <ide> configFile.Close() <ide> <del> syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) <add> c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) <ide> <ide> select { <del> case <-s.d.wait: <add> case <-s.d.Wait: <ide> case <-time.After(3 * time.Second): <ide> } <ide> <ide> expectedMessage := `level=debug msg="Reset Shutdown Timeout: 5"` <del> content, _ := ioutil.ReadFile(s.d.logFile.Name()) <add> content, err := s.d.ReadLogFile() <add> c.Assert(err, checker.IsNil) <ide> c.Assert(string(content), checker.Contains, expectedMessage) <ide> } <ide><path>integration-cli/docker_cli_events_unix_test.go <ide> func (s *DockerDaemonSuite) TestDaemonEvents(c *check.C) { <ide> fmt.Fprintf(configFile, "%s", daemonConfig) <ide> configFile.Close() <ide> <del> syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) <add> c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) <ide> <ide> time.Sleep(3 * time.Second) <ide> <ide> func (s *DockerDaemonSuite) TestDaemonEventsWithFilters(c *check.C) { <ide> } <ide> c.Assert(daemonID, checker.Not(checker.Equals), "") <ide> <del> syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) <add> c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) <ide> <ide> time.Sleep(3 * time.Second) <ide> <ide><path>integration-cli/docker_cli_external_graphdriver_unix_test.go <ide> import ( <ide> <ide> "github.com/docker/docker/daemon/graphdriver" <ide> "github.com/docker/docker/daemon/graphdriver/vfs" <add> "github.com/docker/docker/integration-cli/daemon" <ide> "github.com/docker/docker/pkg/archive" <ide> "github.com/docker/docker/pkg/plugins" <ide> "github.com/go-check/check" <ide> type DockerExternalGraphdriverSuite struct { <ide> server *httptest.Server <ide> jserver *httptest.Server <ide> ds *DockerSuite <del> d *Daemon <add> d *daemon.Daemon <ide> ec map[string]*graphEventsCounter <ide> } <ide> <ide> type graphEventsCounter struct { <ide> } <ide> <ide> func (s *DockerExternalGraphdriverSuite) SetUpTest(c *check.C) { <del> s.d = NewDaemon(c) <add> s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ <add> Experimental: experimentalDaemon, <add> }) <ide> } <ide> <ide> func (s *DockerExternalGraphdriverSuite) TearDownTest(c *check.C) { <ide><path>integration-cli/docker_cli_external_volume_driver_unix_test.go <ide> import ( <ide> "time" <ide> <ide> "github.com/docker/docker/api/types" <add> "github.com/docker/docker/integration-cli/daemon" <ide> "github.com/docker/docker/pkg/integration/checker" <ide> "github.com/docker/docker/pkg/stringid" <ide> "github.com/docker/docker/volume" <ide> type eventCounter struct { <ide> <ide> type DockerExternalVolumeSuite struct { <ide> ds *DockerSuite <del> d *Daemon <add> d *daemon.Daemon <ide> *volumePlugin <ide> } <ide> <ide> func (s *DockerExternalVolumeSuite) SetUpTest(c *check.C) { <del> s.d = NewDaemon(c) <add> s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ <add> Experimental: experimentalDaemon, <add> }) <ide> s.ec = &eventCounter{} <ide> } <ide> <ide><path>integration-cli/docker_cli_info_test.go <ide> import ( <ide> "net" <ide> "strings" <ide> <add> "github.com/docker/docker/integration-cli/daemon" <ide> "github.com/docker/docker/pkg/integration/checker" <ide> "github.com/go-check/check" <ide> ) <ide> func (s *DockerSuite) TestInfoFormat(c *check.C) { <ide> func (s *DockerSuite) TestInfoDiscoveryBackend(c *check.C) { <ide> testRequires(c, SameHostDaemon, DaemonIsLinux) <ide> <del> d := NewDaemon(c) <add> d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ <add> Experimental: experimentalDaemon, <add> }) <ide> discoveryBackend := "consul://consuladdr:consulport/some/path" <ide> discoveryAdvertise := "1.1.1.1:2375" <ide> err := d.Start(fmt.Sprintf("--cluster-store=%s", discoveryBackend), fmt.Sprintf("--cluster-advertise=%s", discoveryAdvertise)) <ide> func (s *DockerSuite) TestInfoDiscoveryBackend(c *check.C) { <ide> func (s *DockerSuite) TestInfoDiscoveryInvalidAdvertise(c *check.C) { <ide> testRequires(c, SameHostDaemon, DaemonIsLinux) <ide> <del> d := NewDaemon(c) <add> d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ <add> Experimental: experimentalDaemon, <add> }) <ide> discoveryBackend := "consul://consuladdr:consulport/some/path" <ide> <ide> // --cluster-advertise with an invalid string is an error <ide> func (s *DockerSuite) TestInfoDiscoveryInvalidAdvertise(c *check.C) { <ide> func (s *DockerSuite) TestInfoDiscoveryAdvertiseInterfaceName(c *check.C) { <ide> testRequires(c, SameHostDaemon, Network, DaemonIsLinux) <ide> <del> d := NewDaemon(c) <add> d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ <add> Experimental: experimentalDaemon, <add> }) <ide> discoveryBackend := "consul://consuladdr:consulport/some/path" <ide> discoveryAdvertise := "eth0" <ide> <ide> func (s *DockerSuite) TestInfoDisplaysStoppedContainers(c *check.C) { <ide> func (s *DockerSuite) TestInfoDebug(c *check.C) { <ide> testRequires(c, SameHostDaemon, DaemonIsLinux) <ide> <del> d := NewDaemon(c) <add> d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ <add> Experimental: experimentalDaemon, <add> }) <ide> err := d.Start("--debug") <ide> c.Assert(err, checker.IsNil) <ide> defer d.Stop() <ide> func (s *DockerSuite) TestInsecureRegistries(c *check.C) { <ide> registryCIDR := "192.168.1.0/24" <ide> registryHost := "insecurehost.com:5000" <ide> <del> d := NewDaemon(c) <add> d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ <add> Experimental: experimentalDaemon, <add> }) <ide> err := d.Start("--insecure-registry="+registryCIDR, "--insecure-registry="+registryHost) <ide> c.Assert(err, checker.IsNil) <ide> defer d.Stop() <ide><path>integration-cli/docker_cli_network_unix_test.go <ide> import ( <ide> <ide> "github.com/docker/docker/api/types" <ide> "github.com/docker/docker/api/types/versions/v1p20" <add> "github.com/docker/docker/integration-cli/daemon" <ide> "github.com/docker/docker/pkg/integration/checker" <ide> icmd "github.com/docker/docker/pkg/integration/cmd" <ide> "github.com/docker/docker/pkg/stringid" <ide> func init() { <ide> type DockerNetworkSuite struct { <ide> server *httptest.Server <ide> ds *DockerSuite <del> d *Daemon <add> d *daemon.Daemon <ide> } <ide> <ide> func (s *DockerNetworkSuite) SetUpTest(c *check.C) { <del> s.d = NewDaemon(c) <add> s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ <add> Experimental: experimentalDaemon, <add> }) <ide> } <ide> <ide> func (s *DockerNetworkSuite) TearDownTest(c *check.C) { <ide> func (s *DockerNetworkSuite) TestDockerNetworkDriverUngracefulRestart(c *check.C <ide> c.Assert(err, checker.IsNil) <ide> <ide> // Kill daemon and restart <del> if err = s.d.cmd.Process.Kill(); err != nil { <del> c.Fatal(err) <del> } <add> c.Assert(s.d.Kill(), checker.IsNil) <ide> <ide> server.Close() <ide> <ide> func (s *DockerSuite) TestInspectAPIMultipleNetworks(c *check.C) { <ide> c.Assert(bridge.IPAddress, checker.Equals, inspect121.NetworkSettings.IPAddress) <ide> } <ide> <del>func connectContainerToNetworks(c *check.C, d *Daemon, cName string, nws []string) { <add>func connectContainerToNetworks(c *check.C, d *daemon.Daemon, cName string, nws []string) { <ide> // Run a container on the default network <ide> out, err := d.Cmd("run", "-d", "--name", cName, "busybox", "top") <ide> c.Assert(err, checker.IsNil, check.Commentf(out)) <ide> func connectContainerToNetworks(c *check.C, d *Daemon, cName string, nws []strin <ide> } <ide> } <ide> <del>func verifyContainerIsConnectedToNetworks(c *check.C, d *Daemon, cName string, nws []string) { <add>func verifyContainerIsConnectedToNetworks(c *check.C, d *daemon.Daemon, cName string, nws []string) { <ide> // Verify container is connected to all the networks <ide> for _, nw := range nws { <ide> out, err := d.Cmd("inspect", "-f", fmt.Sprintf("{{.NetworkSettings.Networks.%s}}", nw), cName) <ide> func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksUngracefulDaemonRe <ide> verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) <ide> <ide> // Kill daemon and restart <del> if err := s.d.cmd.Process.Kill(); err != nil { <del> c.Fatal(err) <del> } <del> s.d.Restart() <add> c.Assert(s.d.Kill(), checker.IsNil) <add> c.Assert(s.d.Restart(), checker.IsNil) <ide> <ide> // Restart container <ide> _, err := s.d.Cmd("start", cName) <ide> func (s *DockerNetworkSuite) TestDockerNetworkHostModeUngracefulDaemonRestart(c <ide> c.Assert(err, checker.IsNil, check.Commentf(out)) <ide> <ide> // verfiy container has finished starting before killing daemon <del> err = s.d.waitRun(cName) <add> err = s.d.WaitRun(cName) <ide> c.Assert(err, checker.IsNil) <ide> } <ide> <ide> // Kill daemon ungracefully and restart <del> if err := s.d.cmd.Process.Kill(); err != nil { <del> c.Fatal(err) <del> } <del> if err := s.d.Restart(); err != nil { <del> c.Fatal(err) <del> } <add> c.Assert(s.d.Kill(), checker.IsNil) <add> c.Assert(s.d.Restart(), checker.IsNil) <ide> <ide> // make sure all the containers are up and running <ide> for i := 0; i < 10; i++ { <del> err := s.d.waitRun(fmt.Sprintf("hostc-%d", i)) <add> err := s.d.WaitRun(fmt.Sprintf("hostc-%d", i)) <ide> c.Assert(err, checker.IsNil) <ide> } <ide> } <ide><path>integration-cli/docker_cli_prune_unix_test.go <ide> import ( <ide> "strconv" <ide> "strings" <ide> <add> "github.com/docker/docker/integration-cli/daemon" <ide> "github.com/docker/docker/pkg/integration/checker" <ide> "github.com/go-check/check" <ide> ) <ide> <del>func pruneNetworkAndVerify(c *check.C, d *SwarmDaemon, kept, pruned []string) { <add>func pruneNetworkAndVerify(c *check.C, d *daemon.Swarm, kept, pruned []string) { <ide> _, err := d.Cmd("network", "prune", "--force") <ide> c.Assert(err, checker.IsNil) <ide> out, err := d.Cmd("network", "ls", "--format", "{{.Name}}") <ide> func (s *DockerSwarmSuite) TestPruneNetwork(c *check.C) { <ide> "busybox", "top") <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") <del> waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, replicas+1) <add> waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, replicas+1) <ide> <ide> // prune and verify <ide> pruneNetworkAndVerify(c, d, []string{"n1", "n3"}, []string{"n2", "n4"}) <ide> func (s *DockerSwarmSuite) TestPruneNetwork(c *check.C) { <ide> c.Assert(err, checker.IsNil) <ide> _, err = d.Cmd("service", "rm", serviceName) <ide> c.Assert(err, checker.IsNil) <del> waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 0) <add> waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 0) <ide> pruneNetworkAndVerify(c, d, []string{}, []string{"n1", "n3"}) <ide> } <ide> <ide> func (s *DockerDaemonSuite) TestPruneImageDangling(c *check.C) { <ide> c.Assert(s.d.StartWithBusybox(), checker.IsNil) <ide> <del> out, _, err := s.d.buildImageWithOut("test", <add> out, _, err := s.d.BuildImageWithOut("test", <ide> `FROM busybox <ide> LABEL foo=bar`, true, "-q") <ide> c.Assert(err, checker.IsNil) <ide><path>integration-cli/docker_cli_run_test.go <ide> func (s *DockerDaemonSuite) TestRunWithUlimitAndDaemonDefault(c *check.C) { <ide> name := "test-A" <ide> _, err := s.d.Cmd("run", "--name", name, "-d", "busybox", "top") <ide> c.Assert(err, checker.IsNil) <del> c.Assert(s.d.waitRun(name), check.IsNil) <add> c.Assert(s.d.WaitRun(name), check.IsNil) <ide> <ide> out, err := s.d.Cmd("inspect", "--format", "{{.HostConfig.Ulimits}}", name) <ide> c.Assert(err, checker.IsNil) <ide> func (s *DockerDaemonSuite) TestRunWithUlimitAndDaemonDefault(c *check.C) { <ide> name = "test-B" <ide> _, err = s.d.Cmd("run", "--name", name, "--ulimit=nofile=42", "-d", "busybox", "top") <ide> c.Assert(err, checker.IsNil) <del> c.Assert(s.d.waitRun(name), check.IsNil) <add> c.Assert(s.d.WaitRun(name), check.IsNil) <ide> <ide> out, err = s.d.Cmd("inspect", "--format", "{{.HostConfig.Ulimits}}", name) <ide> c.Assert(err, checker.IsNil) <ide><path>integration-cli/docker_cli_secret_create_test.go <ide> func (s *DockerSwarmSuite) TestSecretCreate(c *check.C) { <ide> d := s.AddDaemon(c, true, true) <ide> <ide> testName := "test_secret" <del> id := d.createSecret(c, swarm.SecretSpec{ <add> id := d.CreateSecret(c, swarm.SecretSpec{ <ide> swarm.Annotations{ <ide> Name: testName, <ide> }, <ide> []byte("TESTINGDATA"), <ide> }) <ide> c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) <ide> <del> secret := d.getSecret(c, id) <add> secret := d.GetSecret(c, id) <ide> c.Assert(secret.Spec.Name, checker.Equals, testName) <ide> } <ide> <ide> func (s *DockerSwarmSuite) TestSecretCreateWithLabels(c *check.C) { <ide> d := s.AddDaemon(c, true, true) <ide> <ide> testName := "test_secret" <del> id := d.createSecret(c, swarm.SecretSpec{ <add> id := d.CreateSecret(c, swarm.SecretSpec{ <ide> swarm.Annotations{ <ide> Name: testName, <ide> Labels: map[string]string{ <ide> func (s *DockerSwarmSuite) TestSecretCreateWithLabels(c *check.C) { <ide> }) <ide> c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) <ide> <del> secret := d.getSecret(c, id) <add> secret := d.GetSecret(c, id) <ide> c.Assert(secret.Spec.Name, checker.Equals, testName) <ide> c.Assert(len(secret.Spec.Labels), checker.Equals, 2) <ide> c.Assert(secret.Spec.Labels["key1"], checker.Equals, "value1") <ide> func (s *DockerSwarmSuite) TestSecretCreateResolve(c *check.C) { <ide> d := s.AddDaemon(c, true, true) <ide> <ide> name := "foo" <del> id := d.createSecret(c, swarm.SecretSpec{ <add> id := d.CreateSecret(c, swarm.SecretSpec{ <ide> swarm.Annotations{ <ide> Name: name, <ide> }, <ide> []byte("foo"), <ide> }) <ide> c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) <ide> <del> fake := d.createSecret(c, swarm.SecretSpec{ <add> fake := d.CreateSecret(c, swarm.SecretSpec{ <ide> swarm.Annotations{ <ide> Name: id, <ide> }, <ide><path>integration-cli/docker_cli_secret_inspect_test.go <ide> func (s *DockerSwarmSuite) TestSecretInspect(c *check.C) { <ide> d := s.AddDaemon(c, true, true) <ide> <ide> testName := "test_secret" <del> id := d.createSecret(c, swarm.SecretSpec{ <add> id := d.CreateSecret(c, swarm.SecretSpec{ <ide> swarm.Annotations{ <ide> Name: testName, <ide> }, <ide> []byte("TESTINGDATA"), <ide> }) <ide> c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) <ide> <del> secret := d.getSecret(c, id) <add> secret := d.GetSecret(c, id) <ide> c.Assert(secret.Spec.Name, checker.Equals, testName) <ide> <ide> out, err := d.Cmd("secret", "inspect", testName) <ide> func (s *DockerSwarmSuite) TestSecretInspectMultiple(c *check.C) { <ide> "test1", <ide> } <ide> for _, n := range testNames { <del> id := d.createSecret(c, swarm.SecretSpec{ <add> id := d.CreateSecret(c, swarm.SecretSpec{ <ide> swarm.Annotations{ <ide> Name: n, <ide> }, <ide> []byte("TESTINGDATA"), <ide> }) <ide> c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) <ide> <del> secret := d.getSecret(c, id) <add> secret := d.GetSecret(c, id) <ide> c.Assert(secret.Spec.Name, checker.Equals, n) <ide> <ide> } <ide><path>integration-cli/docker_cli_service_create_test.go <ide> func (s *DockerSwarmSuite) TestServiceCreateMountVolume(c *check.C) { <ide> <ide> var tasks []swarm.Task <ide> waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { <del> tasks = d.getServiceTasks(c, id) <add> tasks = d.GetServiceTasks(c, id) <ide> return len(tasks) > 0, nil <ide> }, checker.Equals, true) <ide> <ide> task := tasks[0] <ide> waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { <ide> if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { <del> task = d.getTask(c, task.ID) <add> task = d.GetTask(c, task.ID) <ide> } <ide> return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil <ide> }, checker.Equals, true) <ide> func (s *DockerSwarmSuite) TestServiceCreateWithSecretSimple(c *check.C) { <ide> <ide> serviceName := "test-service-secret" <ide> testName := "test_secret" <del> id := d.createSecret(c, swarm.SecretSpec{ <add> id := d.CreateSecret(c, swarm.SecretSpec{ <ide> swarm.Annotations{ <ide> Name: testName, <ide> }, <ide> func (s *DockerSwarmSuite) TestServiceCreateWithSecretSourceTarget(c *check.C) { <ide> <ide> serviceName := "test-service-secret" <ide> testName := "test_secret" <del> id := d.createSecret(c, swarm.SecretSpec{ <add> id := d.CreateSecret(c, swarm.SecretSpec{ <ide> swarm.Annotations{ <ide> Name: testName, <ide> }, <ide> func (s *DockerSwarmSuite) TestServiceCreateMountTmpfs(c *check.C) { <ide> <ide> var tasks []swarm.Task <ide> waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { <del> tasks = d.getServiceTasks(c, id) <add> tasks = d.GetServiceTasks(c, id) <ide> return len(tasks) > 0, nil <ide> }, checker.Equals, true) <ide> <ide> task := tasks[0] <ide> waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { <ide> if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { <del> task = d.getTask(c, task.ID) <add> task = d.GetTask(c, task.ID) <ide> } <ide> return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil <ide> }, checker.Equals, true) <ide><path>integration-cli/docker_cli_service_health_test.go <ide> func (s *DockerSwarmSuite) TestServiceHealthRun(c *check.C) { <ide> // build image with health-check <ide> // note: use `daemon.buildImageWithOut` to build, do not use `buildImage` to build <ide> imageName := "testhealth" <del> _, _, err := d.buildImageWithOut(imageName, <add> _, _, err := d.BuildImageWithOut(imageName, <ide> `FROM busybox <ide> RUN touch /status <ide> HEALTHCHECK --interval=1s --timeout=1s --retries=1\ <ide> func (s *DockerSwarmSuite) TestServiceHealthRun(c *check.C) { <ide> <ide> var tasks []swarm.Task <ide> waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { <del> tasks = d.getServiceTasks(c, id) <add> tasks = d.GetServiceTasks(c, id) <ide> return tasks, nil <ide> }, checker.HasLen, 1) <ide> <ide> task := tasks[0] <ide> <ide> // wait for task to start <ide> waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { <del> task = d.getTask(c, task.ID) <add> task = d.GetTask(c, task.ID) <ide> return task.Status.State, nil <ide> }, checker.Equals, swarm.TaskStateRunning) <ide> containerID := task.Status.ContainerStatus.ContainerID <ide> func (s *DockerSwarmSuite) TestServiceHealthRun(c *check.C) { <ide> <ide> // Task should be terminated <ide> waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { <del> task = d.getTask(c, task.ID) <add> task = d.GetTask(c, task.ID) <ide> return task.Status.State, nil <ide> }, checker.Equals, swarm.TaskStateFailed) <ide> <ide> func (s *DockerSwarmSuite) TestServiceHealthStart(c *check.C) { <ide> <ide> // service started from this image won't pass health check <ide> imageName := "testhealth" <del> _, _, err := d.buildImageWithOut(imageName, <add> _, _, err := d.BuildImageWithOut(imageName, <ide> `FROM busybox <ide> HEALTHCHECK --interval=1s --timeout=1s --retries=1024\ <ide> CMD cat /status`, <ide> func (s *DockerSwarmSuite) TestServiceHealthStart(c *check.C) { <ide> <ide> var tasks []swarm.Task <ide> waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { <del> tasks = d.getServiceTasks(c, id) <add> tasks = d.GetServiceTasks(c, id) <ide> return tasks, nil <ide> }, checker.HasLen, 1) <ide> <ide> task := tasks[0] <ide> <ide> // wait for task to start <ide> waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { <del> task = d.getTask(c, task.ID) <add> task = d.GetTask(c, task.ID) <ide> return task.Status.State, nil <ide> }, checker.Equals, swarm.TaskStateStarting) <ide> <ide> func (s *DockerSwarmSuite) TestServiceHealthStart(c *check.C) { <ide> }, checker.GreaterThan, 0) <ide> <ide> // task should be blocked at starting status <del> task = d.getTask(c, task.ID) <add> task = d.GetTask(c, task.ID) <ide> c.Assert(task.Status.State, check.Equals, swarm.TaskStateStarting) <ide> <ide> // make it healthy <ide> d.Cmd("exec", containerID, "touch", "/status") <ide> <ide> // Task should be at running status <ide> waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { <del> task = d.getTask(c, task.ID) <add> task = d.GetTask(c, task.ID) <ide> return task.Status.State, nil <ide> }, checker.Equals, swarm.TaskStateRunning) <ide> } <ide> func (s *DockerSwarmSuite) TestServiceHealthUpdate(c *check.C) { <ide> <ide> // service started from this image won't pass health check <ide> imageName := "testhealth" <del> _, _, err := d.buildImageWithOut(imageName, <add> _, _, err := d.BuildImageWithOut(imageName, <ide> `FROM busybox <ide> HEALTHCHECK --interval=1s --timeout=1s --retries=1024\ <ide> CMD cat /status`, <ide> func (s *DockerSwarmSuite) TestServiceHealthUpdate(c *check.C) { <ide> <ide> var tasks []swarm.Task <ide> waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { <del> tasks = d.getServiceTasks(c, id) <add> tasks = d.GetServiceTasks(c, id) <ide> return tasks, nil <ide> }, checker.HasLen, 1) <ide> <ide> task := tasks[0] <ide> <ide> // wait for task to start <ide> waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { <del> task = d.getTask(c, task.ID) <add> task = d.GetTask(c, task.ID) <ide> return task.Status.State, nil <ide> }, checker.Equals, swarm.TaskStateStarting) <ide> <ide> func (s *DockerSwarmSuite) TestServiceHealthUpdate(c *check.C) { <ide> }, checker.GreaterThan, 0) <ide> <ide> // task should be blocked at starting status <del> task = d.getTask(c, task.ID) <add> task = d.GetTask(c, task.ID) <ide> c.Assert(task.Status.State, check.Equals, swarm.TaskStateStarting) <ide> <ide> // make it healthy <ide> d.Cmd("exec", containerID, "touch", "/status") <ide> // Task should be at running status <ide> waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { <del> task = d.getTask(c, task.ID) <add> task = d.GetTask(c, task.ID) <ide> return task.Status.State, nil <ide> }, checker.Equals, swarm.TaskStateRunning) <ide> } <ide><path>integration-cli/docker_cli_service_logs_experimental_test.go <ide> func (s *DockerSwarmSuite) TestServiceLogs(c *check.C) { <ide> <ide> // make sure task has been deployed. <ide> waitAndAssert(c, defaultReconciliationTimeout, <del> d.checkActiveContainerCount, checker.Equals, len(services)) <add> d.CheckActiveContainerCount, checker.Equals, len(services)) <ide> <ide> for name, message := range services { <ide> out, err := d.Cmd("service", "logs", name) <ide> func (s *DockerSwarmSuite) TestServiceLogsFollow(c *check.C) { <ide> c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") <ide> <ide> // make sure task has been deployed. <del> waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) <add> waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) <ide> <ide> args := []string{"service", "logs", "-f", name} <del> cmd := exec.Command(dockerBinary, d.prependHostArg(args)...) <add> cmd := exec.Command(dockerBinary, d.PrependHostArg(args)...) <ide> r, w := io.Pipe() <ide> cmd.Stdout = w <ide> cmd.Stderr = w <ide><path>integration-cli/docker_cli_service_update_test.go <ide> func (s *DockerSwarmSuite) TestServiceUpdatePort(c *check.C) { <ide> // Create a service with a port mapping of 8080:8081. <ide> out, err := d.Cmd(serviceArgs...) <ide> c.Assert(err, checker.IsNil) <del> waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) <add> waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) <ide> <ide> // Update the service: changed the port mapping from 8080:8081 to 8082:8083. <ide> _, err = d.Cmd("service", "update", "--publish-add", "8082:8083", "--publish-rm", "8081", serviceName) <ide> func (s *DockerSwarmSuite) TestServiceUpdateLabel(c *check.C) { <ide> d := s.AddDaemon(c, true, true) <ide> out, err := d.Cmd("service", "create", "--name=test", "busybox", "top") <ide> c.Assert(err, checker.IsNil, check.Commentf(out)) <del> service := d.getService(c, "test") <add> service := d.GetService(c, "test") <ide> c.Assert(service.Spec.Labels, checker.HasLen, 0) <ide> <ide> // add label to empty set <ide> out, err = d.Cmd("service", "update", "test", "--label-add", "foo=bar") <ide> c.Assert(err, checker.IsNil, check.Commentf(out)) <del> service = d.getService(c, "test") <add> service = d.GetService(c, "test") <ide> c.Assert(service.Spec.Labels, checker.HasLen, 1) <ide> c.Assert(service.Spec.Labels["foo"], checker.Equals, "bar") <ide> <ide> // add label to non-empty set <ide> out, err = d.Cmd("service", "update", "test", "--label-add", "foo2=bar") <ide> c.Assert(err, checker.IsNil, check.Commentf(out)) <del> service = d.getService(c, "test") <add> service = d.GetService(c, "test") <ide> c.Assert(service.Spec.Labels, checker.HasLen, 2) <ide> c.Assert(service.Spec.Labels["foo2"], checker.Equals, "bar") <ide> <ide> out, err = d.Cmd("service", "update", "test", "--label-rm", "foo2") <ide> c.Assert(err, checker.IsNil, check.Commentf(out)) <del> service = d.getService(c, "test") <add> service = d.GetService(c, "test") <ide> c.Assert(service.Spec.Labels, checker.HasLen, 1) <ide> c.Assert(service.Spec.Labels["foo2"], checker.Equals, "") <ide> <ide> out, err = d.Cmd("service", "update", "test", "--label-rm", "foo") <ide> c.Assert(err, checker.IsNil, check.Commentf(out)) <del> service = d.getService(c, "test") <add> service = d.GetService(c, "test") <ide> c.Assert(service.Spec.Labels, checker.HasLen, 0) <ide> c.Assert(service.Spec.Labels["foo"], checker.Equals, "") <ide> <ide> // now make sure we can add again <ide> out, err = d.Cmd("service", "update", "test", "--label-add", "foo=bar") <ide> c.Assert(err, checker.IsNil, check.Commentf(out)) <del> service = d.getService(c, "test") <add> service = d.GetService(c, "test") <ide> c.Assert(service.Spec.Labels, checker.HasLen, 1) <ide> c.Assert(service.Spec.Labels["foo"], checker.Equals, "bar") <ide> } <ide> <ide> func (s *DockerSwarmSuite) TestServiceUpdateSecrets(c *check.C) { <ide> d := s.AddDaemon(c, true, true) <ide> testName := "test_secret" <del> id := d.createSecret(c, swarm.SecretSpec{ <add> id := d.CreateSecret(c, swarm.SecretSpec{ <ide> swarm.Annotations{ <ide> Name: testName, <ide> }, <ide> func (s *DockerSwarmSuite) TestServiceUpdateSecrets(c *check.C) { <ide> c.Assert(err, checker.IsNil, check.Commentf(out)) <ide> <ide> // add secret <del> out, err = d.cmdRetryOutOfSequence("service", "update", "test", "--secret-add", fmt.Sprintf("source=%s,target=%s", testName, testTarget)) <add> out, err = d.CmdRetryOutOfSequence("service", "update", "test", "--secret-add", fmt.Sprintf("source=%s,target=%s", testName, testTarget)) <ide> c.Assert(err, checker.IsNil, check.Commentf(out)) <ide> <ide> out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) <ide> func (s *DockerSwarmSuite) TestServiceUpdateSecrets(c *check.C) { <ide> c.Assert(refs[0].File.Name, checker.Equals, testTarget) <ide> <ide> // remove <del> out, err = d.cmdRetryOutOfSequence("service", "update", "test", "--secret-rm", testName) <add> out, err = d.CmdRetryOutOfSequence("service", "update", "test", "--secret-rm", testName) <ide> c.Assert(err, checker.IsNil, check.Commentf(out)) <ide> <ide> out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) <ide><path>integration-cli/docker_cli_swarm_test.go <ide> import ( <ide> "time" <ide> <ide> "github.com/docker/docker/api/types/swarm" <add> "github.com/docker/docker/integration-cli/daemon" <ide> "github.com/docker/docker/pkg/integration/checker" <ide> "github.com/docker/libnetwork/driverapi" <ide> "github.com/docker/libnetwork/ipamapi" <ide> func (s *DockerSwarmSuite) TestSwarmUpdate(c *check.C) { <ide> d := s.AddDaemon(c, true, true) <ide> <ide> getSpec := func() swarm.Spec { <del> sw := d.getSwarm(c) <add> sw := d.GetSwarm(c) <ide> return sw.Spec <ide> } <ide> <ide> func (s *DockerSwarmSuite) TestSwarmInit(c *check.C) { <ide> d := s.AddDaemon(c, false, false) <ide> <ide> getSpec := func() swarm.Spec { <del> sw := d.getSwarm(c) <add> sw := d.GetSwarm(c) <ide> return sw.Spec <ide> } <ide> <ide> func (s *DockerSwarmSuite) TestSwarmInitUnspecifiedAdvertiseAddr(c *check.C) { <ide> func (s *DockerSwarmSuite) TestSwarmIncompatibleDaemon(c *check.C) { <ide> // init swarm mode and stop a daemon <ide> d := s.AddDaemon(c, true, true) <del> info, err := d.info() <add> info, err := d.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) <ide> c.Assert(d.Stop(), checker.IsNil) <ide> <ide> // start a daemon with --cluster-store and --cluster-advertise <ide> err = d.Start("--cluster-store=consul://consuladdr:consulport/some/path", "--cluster-advertise=1.1.1.1:2375") <ide> c.Assert(err, checker.NotNil) <del> content, _ := ioutil.ReadFile(d.logFile.Name()) <add> content, err := d.ReadLogFile() <add> c.Assert(err, checker.IsNil) <ide> c.Assert(string(content), checker.Contains, "--cluster-store and --cluster-advertise daemon configurations are incompatible with swarm mode") <ide> <ide> // start a daemon with --live-restore <ide> err = d.Start("--live-restore") <ide> c.Assert(err, checker.NotNil) <del> content, _ = ioutil.ReadFile(d.logFile.Name()) <add> content, err = d.ReadLogFile() <add> c.Assert(err, checker.IsNil) <ide> c.Assert(string(content), checker.Contains, "--live-restore daemon configuration is incompatible with swarm mode") <ide> // restart for teardown <ide> c.Assert(d.Start(), checker.IsNil) <ide> func (s *DockerSwarmSuite) TestSwarmServiceTemplatingHostname(c *check.C) { <ide> c.Assert(err, checker.IsNil, check.Commentf(out)) <ide> <ide> // make sure task has been deployed. <del> waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) <add> waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) <ide> <del> containers := d.activeContainers() <add> containers := d.ActiveContainers() <ide> out, err = d.Cmd("inspect", "--type", "container", "--format", "{{.Config.Hostname}}", containers[0]) <ide> c.Assert(err, checker.IsNil, check.Commentf(out)) <ide> c.Assert(strings.Split(out, "\n")[0], checker.Equals, "test-1", check.Commentf("hostname with templating invalid")) <ide> func (s *DockerSwarmSuite) TestSwarmNodeTaskListFilter(c *check.C) { <ide> c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") <ide> <ide> // make sure task has been deployed. <del> waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 3) <add> waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 3) <ide> <ide> filter := "name=redis-cluster" <ide> <ide> func (s *DockerSwarmSuite) TestSwarmPublishAdd(c *check.C) { <ide> out, err = d.Cmd("service", "update", "--publish-add", "80:80", name) <ide> c.Assert(err, checker.IsNil) <ide> <del> out, err = d.cmdRetryOutOfSequence("service", "update", "--publish-add", "80:80", name) <add> out, err = d.CmdRetryOutOfSequence("service", "update", "--publish-add", "80:80", name) <ide> c.Assert(err, checker.IsNil) <ide> <del> out, err = d.cmdRetryOutOfSequence("service", "update", "--publish-add", "80:80", "--publish-add", "80:20", name) <add> out, err = d.CmdRetryOutOfSequence("service", "update", "--publish-add", "80:80", "--publish-add", "80:20", name) <ide> c.Assert(err, checker.NotNil) <ide> <ide> out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.EndpointSpec.Ports }}", name) <ide> func (s *DockerSwarmSuite) TestSwarmServiceWithGroup(c *check.C) { <ide> c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") <ide> <ide> // make sure task has been deployed. <del> waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) <add> waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) <ide> <ide> out, err = d.Cmd("ps", "-q") <ide> c.Assert(err, checker.IsNil) <ide> func (s *DockerSwarmSuite) TestSwarmContainerAttachByNetworkId(c *check.C) { <ide> out, err = d.Cmd("run", "-d", "--net", networkID, "busybox", "top") <ide> c.Assert(err, checker.IsNil) <ide> cID := strings.TrimSpace(out) <del> d.waitRun(cID) <add> d.WaitRun(cID) <ide> <ide> _, err = d.Cmd("rm", "-f", cID) <ide> c.Assert(err, checker.IsNil) <ide> func (s *DockerSwarmSuite) TestPsListContainersFilterIsTask(c *check.C) { <ide> c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") <ide> <ide> // make sure task has been deployed. <del> waitAndAssert(c, defaultReconciliationTimeout, d.checkServiceRunningTasks(name), checker.Equals, 1) <add> waitAndAssert(c, defaultReconciliationTimeout, d.CheckServiceRunningTasks(name), checker.Equals, 1) <ide> <ide> // Filter non-tasks <ide> out, err = d.Cmd("ps", "-a", "-q", "--filter=is-task=false") <ide> func (s *DockerSwarmSuite) TestSwarmNetworkPlugin(c *check.C) { <ide> func (s *DockerSwarmSuite) TestSwarmServiceEnvFile(c *check.C) { <ide> d := s.AddDaemon(c, true, true) <ide> <del> path := filepath.Join(d.folder, "env.txt") <add> path := filepath.Join(d.Folder, "env.txt") <ide> err := ioutil.WriteFile(path, []byte("VAR1=A\nVAR2=A\n"), 0644) <ide> c.Assert(err, checker.IsNil) <ide> <ide> func (s *DockerSwarmSuite) TestSwarmServiceTTY(c *check.C) { <ide> c.Assert(err, checker.IsNil) <ide> <ide> // Make sure task has been deployed. <del> waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) <add> waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) <ide> <ide> // We need to get the container id. <ide> out, err = d.Cmd("ps", "-a", "-q", "--no-trunc") <ide> func (s *DockerSwarmSuite) TestSwarmServiceTTY(c *check.C) { <ide> out, err = d.Cmd("service", "rm", name) <ide> c.Assert(err, checker.IsNil) <ide> // Make sure container has been destroyed. <del> waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 0) <add> waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 0) <ide> <ide> // With --tty <ide> expectedOutput = "TTY" <ide> out, err = d.Cmd("service", "create", "--name", name, "--tty", "busybox", "sh", "-c", ttyCheck) <ide> c.Assert(err, checker.IsNil) <ide> <ide> // Make sure task has been deployed. <del> waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) <add> waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) <ide> <ide> // We need to get the container id. <ide> out, err = d.Cmd("ps", "-a", "-q", "--no-trunc") <ide> func (s *DockerSwarmSuite) TestSwarmServiceTTYUpdate(c *check.C) { <ide> c.Assert(err, checker.IsNil) <ide> <ide> // Make sure task has been deployed. <del> waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) <add> waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) <ide> <ide> out, err := d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.TTY }}", name) <ide> c.Assert(err, checker.IsNil) <ide> func (s *DockerSwarmSuite) TestDNSConfig(c *check.C) { <ide> c.Assert(err, checker.IsNil) <ide> <ide> // Make sure task has been deployed. <del> waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) <add> waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) <ide> <ide> // We need to get the container id. <ide> out, err := d.Cmd("ps", "-a", "-q", "--no-trunc") <ide> func (s *DockerSwarmSuite) TestDNSConfigUpdate(c *check.C) { <ide> c.Assert(err, checker.IsNil) <ide> <ide> // Make sure task has been deployed. <del> waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) <add> waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) <ide> <ide> _, err = d.Cmd("service", "update", "--dns-add=1.2.3.4", "--dns-search-add=example.com", "--dns-option-add=timeout:3", name) <ide> c.Assert(err, checker.IsNil) <ide> func (s *DockerSwarmSuite) TestDNSConfigUpdate(c *check.C) { <ide> c.Assert(strings.TrimSpace(out), checker.Equals, "{[1.2.3.4] [example.com] [timeout:3]}") <ide> } <ide> <del>func getNodeStatus(c *check.C, d *SwarmDaemon) swarm.LocalNodeState { <del> info, err := d.info() <add>func getNodeStatus(c *check.C, d *daemon.Swarm) swarm.LocalNodeState { <add> info, err := d.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> return info.LocalNodeState <ide> } <ide> <del>func checkSwarmLockedToUnlocked(c *check.C, d *SwarmDaemon, unlockKey string) { <add>func checkSwarmLockedToUnlocked(c *check.C, d *daemon.Swarm, unlockKey string) { <ide> c.Assert(d.Restart(), checker.IsNil) <ide> status := getNodeStatus(c, d) <ide> if status == swarm.LocalNodeStateLocked { <ide> // it must not have updated to be unlocked in time - unlock, wait 3 seconds, and try again <del> cmd := d.command("swarm", "unlock") <add> cmd := d.Command("swarm", "unlock") <ide> cmd.Stdin = bytes.NewBufferString(unlockKey) <ide> out, err := cmd.CombinedOutput() <ide> c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out))) <ide> func checkSwarmLockedToUnlocked(c *check.C, d *SwarmDaemon, unlockKey string) { <ide> c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) <ide> } <ide> <del>func checkSwarmUnlockedToLocked(c *check.C, d *SwarmDaemon) { <add>func checkSwarmUnlockedToLocked(c *check.C, d *daemon.Swarm) { <ide> c.Assert(d.Restart(), checker.IsNil) <ide> status := getNodeStatus(c, d) <ide> if status == swarm.LocalNodeStateActive { <ide> func (s *DockerSwarmSuite) TestSwarmInitLocked(c *check.C) { <ide> c.Assert(d.Restart(), checker.IsNil) <ide> c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked) <ide> <del> cmd := d.command("swarm", "unlock") <add> cmd := d.Command("swarm", "unlock") <ide> cmd.Stdin = bytes.NewBufferString("wrong-secret-key") <ide> out, err := cmd.CombinedOutput() <ide> c.Assert(err, checker.NotNil, check.Commentf("out: %v", string(out))) <ide> c.Assert(string(out), checker.Contains, "invalid key") <ide> <ide> c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked) <ide> <del> cmd = d.command("swarm", "unlock") <add> cmd = d.Command("swarm", "unlock") <ide> cmd.Stdin = bytes.NewBufferString(unlockKey) <ide> out, err = cmd.CombinedOutput() <ide> c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out))) <ide> func (s *DockerSwarmSuite) TestSwarmLeaveLocked(c *check.C) { <ide> // It starts off locked <ide> c.Assert(d.Restart("--swarm-default-advertise-addr=lo"), checker.IsNil) <ide> <del> info, err := d.info() <add> info, err := d.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateLocked) <ide> <ide> func (s *DockerSwarmSuite) TestSwarmLeaveLocked(c *check.C) { <ide> outs, err = d.Cmd("swarm", "leave", "--force") <ide> c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) <ide> <del> info, err = d.info() <add> info, err = d.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) <ide> <ide> outs, err = d.Cmd("swarm", "init") <ide> c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) <ide> <del> info, err = d.info() <add> info, err = d.SwarmInfo() <ide> c.Assert(err, checker.IsNil) <ide> c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) <ide> } <ide> func (s *DockerSwarmSuite) TestSwarmLockUnlockCluster(c *check.C) { <ide> c.Assert(outs, checker.Equals, unlockKey+"\n") <ide> <ide> // The ones that got the cluster update should be set to locked <del> for _, d := range []*SwarmDaemon{d1, d3} { <add> for _, d := range []*daemon.Swarm{d1, d3} { <ide> checkSwarmUnlockedToLocked(c, d) <ide> <del> cmd := d.command("swarm", "unlock") <add> cmd := d.Command("swarm", "unlock") <ide> cmd.Stdin = bytes.NewBufferString(unlockKey) <ide> out, err := cmd.CombinedOutput() <ide> c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out))) <ide> func (s *DockerSwarmSuite) TestSwarmLockUnlockCluster(c *check.C) { <ide> c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) <ide> <ide> // the ones that got the update are now set to unlocked <del> for _, d := range []*SwarmDaemon{d1, d3} { <add> for _, d := range []*daemon.Swarm{d1, d3} { <ide> checkSwarmLockedToUnlocked(c, d, unlockKey) <ide> } <ide> <ide> // d2 still locked <ide> c.Assert(getNodeStatus(c, d2), checker.Equals, swarm.LocalNodeStateLocked) <ide> <ide> // unlock it <del> cmd := d2.command("swarm", "unlock") <add> cmd := d2.Command("swarm", "unlock") <ide> cmd.Stdin = bytes.NewBufferString(unlockKey) <ide> out, err := cmd.CombinedOutput() <ide> c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out))) <ide> func (s *DockerSwarmSuite) TestSwarmJoinPromoteLocked(c *check.C) { <ide> d3 := s.AddDaemon(c, true, true) <ide> <ide> // both new nodes are locked <del> for _, d := range []*SwarmDaemon{d2, d3} { <add> for _, d := range []*daemon.Swarm{d2, d3} { <ide> checkSwarmUnlockedToLocked(c, d) <ide> <del> cmd := d.command("swarm", "unlock") <add> cmd := d.Command("swarm", "unlock") <ide> cmd.Stdin = bytes.NewBufferString(unlockKey) <ide> out, err := cmd.CombinedOutput() <ide> c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out))) <ide> c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) <ide> } <ide> <ide> // get d3's cert <del> d3cert, err := ioutil.ReadFile(filepath.Join(d3.folder, "root", "swarm", "certificates", "swarm-node.crt")) <add> d3cert, err := ioutil.ReadFile(filepath.Join(d3.Folder, "root", "swarm", "certificates", "swarm-node.crt")) <ide> c.Assert(err, checker.IsNil) <ide> <ide> // demote manager back to worker - workers are not locked <ide> func (s *DockerSwarmSuite) TestSwarmJoinPromoteLocked(c *check.C) { <ide> // to be replaced, then the node still has the manager TLS key which is still locked <ide> // (because we never want a manager TLS key to be on disk unencrypted if the cluster <ide> // is set to autolock) <del> waitAndAssert(c, defaultReconciliationTimeout, d3.checkControlAvailable, checker.False) <add> waitAndAssert(c, defaultReconciliationTimeout, d3.CheckControlAvailable, checker.False) <ide> waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { <del> cert, err := ioutil.ReadFile(filepath.Join(d3.folder, "root", "swarm", "certificates", "swarm-node.crt")) <add> cert, err := ioutil.ReadFile(filepath.Join(d3.Folder, "root", "swarm", "certificates", "swarm-node.crt")) <ide> if err != nil { <ide> return "", check.Commentf("error: %v", err) <ide> } <ide> func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *check.C) { <ide> outs, _ = d.Cmd("node", "ls") <ide> c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") <ide> <del> cmd := d.command("swarm", "unlock") <add> cmd := d.Command("swarm", "unlock") <ide> cmd.Stdin = bytes.NewBufferString(unlockKey) <ide> out, err := cmd.CombinedOutput() <ide> <ide> func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *check.C) { <ide> <ide> c.Assert(d.Restart(), checker.IsNil) <ide> <del> cmd = d.command("swarm", "unlock") <add> cmd = d.Command("swarm", "unlock") <ide> cmd.Stdin = bytes.NewBufferString(unlockKey) <ide> out, err = cmd.CombinedOutput() <ide> } <ide> func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *check.C) { <ide> outs, _ = d.Cmd("node", "ls") <ide> c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") <ide> <del> cmd = d.command("swarm", "unlock") <add> cmd = d.Command("swarm", "unlock") <ide> cmd.Stdin = bytes.NewBufferString(newUnlockKey) <ide> out, err = cmd.CombinedOutput() <ide> c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out))) <ide> func (s *DockerSwarmSuite) TestSwarmClusterRotateUnlockKey(c *check.C) { <ide> c.Assert(d2.Restart(), checker.IsNil) <ide> c.Assert(d3.Restart(), checker.IsNil) <ide> <del> for _, d := range []*SwarmDaemon{d2, d3} { <add> for _, d := range []*daemon.Swarm{d2, d3} { <ide> c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked) <ide> <ide> outs, _ := d.Cmd("node", "ls") <ide> c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") <ide> <del> cmd := d.command("swarm", "unlock") <add> cmd := d.Command("swarm", "unlock") <ide> cmd.Stdin = bytes.NewBufferString(unlockKey) <ide> out, err := cmd.CombinedOutput() <ide> <ide> func (s *DockerSwarmSuite) TestSwarmClusterRotateUnlockKey(c *check.C) { <ide> <ide> c.Assert(d.Restart(), checker.IsNil) <ide> <del> cmd = d.command("swarm", "unlock") <add> cmd = d.Command("swarm", "unlock") <ide> cmd.Stdin = bytes.NewBufferString(unlockKey) <ide> out, err = cmd.CombinedOutput() <ide> } <ide> func (s *DockerSwarmSuite) TestSwarmClusterRotateUnlockKey(c *check.C) { <ide> outs, _ = d.Cmd("node", "ls") <ide> c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") <ide> <del> cmd = d.command("swarm", "unlock") <add> cmd = d.Command("swarm", "unlock") <ide> cmd.Stdin = bytes.NewBufferString(newUnlockKey) <ide> out, err = cmd.CombinedOutput() <ide> c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out))) <ide> func (s *DockerSwarmSuite) TestSwarmAlternateLockUnlock(c *check.C) { <ide> c.Assert(unlockKey, checker.Not(checker.Equals), "") <ide> checkSwarmUnlockedToLocked(c, d) <ide> <del> cmd := d.command("swarm", "unlock") <add> cmd := d.Command("swarm", "unlock") <ide> cmd.Stdin = bytes.NewBufferString(unlockKey) <ide> out, err := cmd.CombinedOutput() <ide> c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out))) <ide> func (s *DockerSwarmSuite) TestExtraHosts(c *check.C) { <ide> c.Assert(err, checker.IsNil) <ide> <ide> // Make sure task has been deployed. <del> waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) <add> waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) <ide> <ide> // We need to get the container id. <ide> out, err := d.Cmd("ps", "-a", "-q", "--no-trunc") <ide> func (s *DockerSwarmSuite) TestSwarmManagerAddress(c *check.C) { <ide> d3 := s.AddDaemon(c, true, false) <ide> <ide> // Manager Addresses will always show Node 1's address <del> expectedOutput := fmt.Sprintf("Manager Addresses:\n 127.0.0.1:%d\n", d1.port) <add> expectedOutput := fmt.Sprintf("Manager Addresses:\n 127.0.0.1:%d\n", d1.Port) <ide> <ide> out, err := d1.Cmd("info") <ide> c.Assert(err, checker.IsNil) <ide><path>integration-cli/docker_cli_userns_test.go <ide> func (s *DockerDaemonSuite) TestDaemonUserNamespaceRootSetting(c *check.C) { <ide> defer os.RemoveAll(tmpDirNotExists) <ide> <ide> // we need to find the uid and gid of the remapped root from the daemon's root dir info <del> uidgid := strings.Split(filepath.Base(s.d.root), ".") <del> c.Assert(uidgid, checker.HasLen, 2, check.Commentf("Should have gotten uid/gid strings from root dirname: %s", filepath.Base(s.d.root))) <add> uidgid := strings.Split(filepath.Base(s.d.Root), ".") <add> c.Assert(uidgid, checker.HasLen, 2, check.Commentf("Should have gotten uid/gid strings from root dirname: %s", filepath.Base(s.d.Root))) <ide> uid, err := strconv.Atoi(uidgid[0]) <ide> c.Assert(err, checker.IsNil, check.Commentf("Can't parse uid")) <ide> gid, err := strconv.Atoi(uidgid[1]) <ide><path>integration-cli/docker_deprecated_api_v124_test.go <ide> import ( <ide> "net/http" <ide> "strings" <ide> <add> "github.com/docker/docker/pkg/integration" <ide> "github.com/docker/docker/pkg/integration/checker" <ide> "github.com/go-check/check" <ide> ) <ide> func (s *DockerSuite) TestDeprecatedStartWithTooLowMemoryLimit(c *check.C) { <ide> <ide> res, body, err := sockRequestRaw("POST", formatV123StartAPIURL("/containers/"+containerID+"/start"), strings.NewReader(config), "application/json") <ide> c.Assert(err, checker.IsNil) <del> b, err2 := readBody(body) <add> b, err2 := integration.ReadBody(body) <ide> c.Assert(err2, checker.IsNil) <ide> c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) <ide> c.Assert(string(b), checker.Contains, "Minimum memory limit allowed is 4MB") <ide><path>integration-cli/docker_hub_pull_suite_test.go <ide> import ( <ide> "runtime" <ide> "strings" <ide> <add> "github.com/docker/docker/integration-cli/daemon" <ide> "github.com/docker/docker/pkg/integration/checker" <ide> "github.com/go-check/check" <ide> ) <ide> func init() { <ide> // relative impact of each individual operation. As part of this suite, all <ide> // images are removed after each test. <ide> type DockerHubPullSuite struct { <del> d *Daemon <add> d *daemon.Daemon <ide> ds *DockerSuite <ide> } <ide> <ide> func newDockerHubPullSuite() *DockerHubPullSuite { <ide> // SetUpSuite starts the suite daemon. <ide> func (s *DockerHubPullSuite) SetUpSuite(c *check.C) { <ide> testRequires(c, DaemonIsLinux) <del> s.d = NewDaemon(c) <add> s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ <add> Experimental: experimentalDaemon, <add> }) <ide> err := s.d.Start() <ide> c.Assert(err, checker.IsNil, check.Commentf("starting push/pull test daemon: %v", err)) <ide> } <ide> func (s *DockerHubPullSuite) CmdWithError(name string, arg ...string) (string, e <ide> <ide> // MakeCmd returns an exec.Cmd command to run against the suite daemon. <ide> func (s *DockerHubPullSuite) MakeCmd(name string, arg ...string) *exec.Cmd { <del> args := []string{"--host", s.d.sock(), name} <add> args := []string{"--host", s.d.Sock(), name} <ide> args = append(args, arg...) <ide> return exec.Command(dockerBinary, args...) <ide> } <ide><path>integration-cli/docker_utils.go <ide> package main <ide> import ( <ide> "bufio" <ide> "bytes" <del> "crypto/tls" <ide> "encoding/json" <ide> "errors" <ide> "fmt" <ide> import ( <ide> <ide> "github.com/docker/docker/api/types" <ide> volumetypes "github.com/docker/docker/api/types/volume" <add> "github.com/docker/docker/integration-cli/daemon" <ide> "github.com/docker/docker/opts" <ide> "github.com/docker/docker/pkg/httputils" <add> "github.com/docker/docker/pkg/integration" <ide> "github.com/docker/docker/pkg/integration/checker" <ide> icmd "github.com/docker/docker/pkg/integration/cmd" <ide> "github.com/docker/docker/pkg/ioutils" <ide> "github.com/docker/docker/pkg/stringutils" <del> "github.com/docker/go-connections/tlsconfig" <ide> "github.com/docker/go-units" <ide> "github.com/go-check/check" <ide> ) <ide> func daemonHost() string { <ide> return daemonURLStr <ide> } <ide> <del>func getTLSConfig() (*tls.Config, error) { <del> dockerCertPath := os.Getenv("DOCKER_CERT_PATH") <del> <del> if dockerCertPath == "" { <del> return nil, fmt.Errorf("DOCKER_TLS_VERIFY specified, but no DOCKER_CERT_PATH environment variable") <del> } <del> <del> option := &tlsconfig.Options{ <del> CAFile: filepath.Join(dockerCertPath, "ca.pem"), <del> CertFile: filepath.Join(dockerCertPath, "cert.pem"), <del> KeyFile: filepath.Join(dockerCertPath, "key.pem"), <del> } <del> tlsConfig, err := tlsconfig.Client(*option) <del> if err != nil { <del> return nil, err <del> } <del> <del> return tlsConfig, nil <del>} <del> <del>func sockConn(timeout time.Duration, daemon string) (net.Conn, error) { <del> if daemon == "" { <del> daemon = daemonHost() <del> } <del> daemonURL, err := url.Parse(daemon) <del> if err != nil { <del> return nil, fmt.Errorf("could not parse url %q: %v", daemon, err) <del> } <del> <del> var c net.Conn <del> switch daemonURL.Scheme { <del> case "npipe": <del> return npipeDial(daemonURL.Path, timeout) <del> case "unix": <del> return net.DialTimeout(daemonURL.Scheme, daemonURL.Path, timeout) <del> case "tcp": <del> if os.Getenv("DOCKER_TLS_VERIFY") != "" { <del> // Setup the socket TLS configuration. <del> tlsConfig, err := getTLSConfig() <del> if err != nil { <del> return nil, err <del> } <del> dialer := &net.Dialer{Timeout: timeout} <del> return tls.DialWithDialer(dialer, daemonURL.Scheme, daemonURL.Host, tlsConfig) <del> } <del> return net.DialTimeout(daemonURL.Scheme, daemonURL.Host, timeout) <del> default: <del> return c, fmt.Errorf("unknown scheme %v (%s)", daemonURL.Scheme, daemon) <add>// FIXME(vdemeester) should probably completely move to daemon struct/methods <add>func sockConn(timeout time.Duration, daemonStr string) (net.Conn, error) { <add> if daemonStr == "" { <add> daemonStr = daemonHost() <ide> } <add> return daemon.SockConn(timeout, daemonStr) <ide> } <ide> <ide> func sockRequest(method, endpoint string, data interface{}) (int, []byte, error) { <ide> func sockRequest(method, endpoint string, data interface{}) (int, []byte, error) <ide> if err != nil { <ide> return -1, nil, err <ide> } <del> b, err := readBody(body) <add> b, err := integration.ReadBody(body) <ide> return res.StatusCode, b, err <ide> } <ide> <ide> func newRequestClient(method, endpoint string, data io.Reader, ct, daemon string <ide> return req, client, nil <ide> } <ide> <del>func readBody(b io.ReadCloser) ([]byte, error) { <del> defer b.Close() <del> return ioutil.ReadAll(b) <del>} <del> <ide> func deleteContainer(container ...string) error { <ide> result := icmd.RunCommand(dockerBinary, append([]string{"rm", "-fv"}, container...)...) <ide> return result.Compare(icmd.Success) <ide> func getContainerState(c *check.C, id string) (int, bool, error) { <ide> } <ide> <ide> func buildImageCmd(name, dockerfile string, useCache bool, buildFlags ...string) *exec.Cmd { <del> return buildImageCmdWithHost(name, dockerfile, "", useCache, buildFlags...) <del>} <del> <del>func buildImageCmdWithHost(name, dockerfile, host string, useCache bool, buildFlags ...string) *exec.Cmd { <del> args := []string{} <del> if host != "" { <del> args = append(args, "--host", host) <del> } <del> args = append(args, "build", "-t", name) <del> if !useCache { <del> args = append(args, "--no-cache") <del> } <del> args = append(args, buildFlags...) <del> args = append(args, "-") <del> buildCmd := exec.Command(dockerBinary, args...) <del> buildCmd.Stdin = strings.NewReader(dockerfile) <del> return buildCmd <add> return daemon.BuildImageCmdWithHost(dockerBinary, name, dockerfile, "", useCache, buildFlags...) <ide> } <ide> <ide> func buildImageWithOut(name, dockerfile string, useCache bool, buildFlags ...string) (string, string, error) { <ide> func waitInspect(name, expr, expected string, timeout time.Duration) error { <ide> } <ide> <ide> func waitInspectWithArgs(name, expr, expected string, timeout time.Duration, arg ...string) error { <del> after := time.After(timeout) <del> <del> args := append(arg, "inspect", "-f", expr, name) <del> for { <del> result := icmd.RunCommand(dockerBinary, args...) <del> if result.Error != nil { <del> if !strings.Contains(result.Stderr(), "No such") { <del> return fmt.Errorf("error executing docker inspect: %v\n%s", <del> result.Stderr(), result.Stdout()) <del> } <del> select { <del> case <-after: <del> return result.Error <del> default: <del> time.Sleep(10 * time.Millisecond) <del> continue <del> } <del> } <del> <del> out := strings.TrimSpace(result.Stdout()) <del> if out == expected { <del> break <del> } <del> <del> select { <del> case <-after: <del> return fmt.Errorf("condition \"%q == %q\" not true in time", out, expected) <del> default: <del> } <del> <del> time.Sleep(100 * time.Millisecond) <del> } <del> return nil <add> return daemon.WaitInspectWithArgs(dockerBinary, name, expr, expected, timeout, arg...) <ide> } <ide> <ide> func getInspectBody(c *check.C, version, id string) []byte { <ide><path>pkg/integration/utils.go <ide> import ( <ide> "errors" <ide> "fmt" <ide> "io" <add> "io/ioutil" <ide> "os" <ide> "os/exec" <ide> "path/filepath" <ide> func RunAtDifferentDate(date time.Time, block func()) { <ide> block() <ide> return <ide> } <add> <add>// ReadBody read the specified ReadCloser content and returns it <add>func ReadBody(b io.ReadCloser) ([]byte, error) { <add> defer b.Close() <add> return ioutil.ReadAll(b) <add>}
39
Ruby
Ruby
add falcon to the available rack servers
de089db0330f6d9fdd5820d984f725a5de9e5d6c
<ide><path>railties/lib/rails/commands/server/server_command.rb <ide> class ServerCommand < Base # :nodoc: <ide> <ide> # Hard-coding a bunch of handlers here as we don't have a public way of <ide> # querying them from the Rack::Handler registry. <del> RACK_SERVERS = %w(cgi fastcgi webrick lsws scgi thin puma unicorn) <add> RACK_SERVERS = %w(cgi fastcgi webrick lsws scgi thin puma unicorn falcon) <ide> <ide> DEFAULT_PORT = 3000 <ide> DEFAULT_PIDFILE = "tmp/pids/server.pid"
1
Ruby
Ruby
improve readability of ar explain result
c1ea574b276389e565c38ff0584962afaefdeb2d
<ide><path>activerecord/lib/active_record/explain.rb <ide> def collecting_queries_for_explain # :nodoc: <ide> # Makes the adapter execute EXPLAIN for the tuples of queries and bindings. <ide> # Returns a formatted string ready to be logged. <ide> def exec_explain(queries) # :nodoc: <del> queries && queries.map do |sql, bind| <add> str = queries && queries.map do |sql, bind| <ide> [].tap do |msg| <ide> msg << "EXPLAIN for: #{sql}" <ide> unless bind.empty? <ide> def exec_explain(queries) # :nodoc: <ide> msg << connection.explain(sql, bind) <ide> end.join("\n") <ide> end.join("\n") <add> <add> # overriding inspect to be more human readable <add> def str.inspect <add> self <add> end <add> str <ide> end <ide> <ide> # Silences automatic EXPLAIN logging for the duration of the block.
1
Javascript
Javascript
use var for all locales
4616c1d55c676cd51377bc6e1d12de037b872cdd
<ide><path>src/locale/ca-ES.js <ide> import "locale"; <ide> <del>d3.locale.ca_ES = d3.locale({ <add>var d3_locale_caES = d3.locale({ <ide> decimal: ",", <ide> thousands: ".", <ide> grouping: [3], <ide><path>src/locale/de-DE.js <ide> import "locale"; <ide> <del>d3.locale.de_DE = d3.locale({ <add>var d3_locale_deDE = d3.locale({ <ide> decimal: ",", <ide> thousands: ".", <ide> grouping: [3], <ide><path>src/locale/en-GB.js <ide> import "locale"; <ide> <del>d3.locale.en_GB = d3.locale({ <add>var d3_locale_enGB = d3.locale({ <ide> decimal: ".", <ide> thousands: ",", <ide> grouping: [3], <ide><path>src/locale/es-ES.js <ide> import "locale"; <ide> <del>d3.locale.es_ES = d3.locale({ <add>var d3_locale_esES = d3.locale({ <ide> decimal: ",", <ide> thousands: ".", <ide> grouping: [3], <ide><path>src/locale/fi-FI.js <ide> import "locale"; <ide> <del>d3.locale.fi_FI = d3.locale({ <add>var d3_locale_fiFI = d3.locale({ <ide> decimal: ",", <ide> thousands: "\xa0", <ide> grouping: [3], <ide><path>src/locale/fr-FR.js <ide> import "locale"; <ide> <del>d3.locale.fr_FR = d3.locale({ <add>var d3_locale_frFR = d3.locale({ <ide> decimal: ",", <ide> thousands: ".", <ide> grouping: [3], <ide><path>src/locale/he-IL.js <ide> import "locale"; <ide> <del>d3.locale.he_IL = d3.locale({ <add>var d3_locale_heIL = d3.locale({ <ide> decimal: ".", <ide> thousands: ",", <ide> grouping: [3], <ide><path>src/locale/ko-KR.js <ide> import "locale"; <ide> <del>d3.locale.ko_KR = d3.locale({ <add>var d3_locale_koKR = d3.locale({ <ide> decimal: ".", <ide> thousands: ",", <ide> grouping: [3], <ide><path>src/locale/mk-MK.js <ide> import "locale"; <ide> <del>d3.locale.mk_MK = d3.locale({ <add>var d3_locale_mkMK = d3.locale({ <ide> decimal: ",", <ide> thousands: ".", <ide> grouping: [3], <ide><path>src/locale/nl-NL.js <ide> import "locale"; <ide> <del>d3.locale.nl_NL = d3.locale({ <add>var d3_locale_nlNL = d3.locale({ <ide> decimal: ",", <ide> thousands: ".", <ide> grouping: [3], <ide><path>src/locale/pl-PL.js <ide> import "locale"; <ide> <del>var d3_locale_pl_PL = d3.locale({ <add>var d3_locale_plPL = d3.locale({ <ide> decimal: ",", <ide> thousands: ".", <ide> grouping: [3], <ide><path>src/locale/pt-BR.js <ide> import "locale"; <ide> <del>d3.locale.pt_BR = d3.locale({ <add>var d3_locale_ptBR = d3.locale({ <ide> decimal: ',', <ide> thousands: '.', <ide> grouping: [3], <ide><path>src/locale/ru-RU.js <ide> import "locale"; <ide> <del>d3.locale.ru_RU = d3.locale({ <add>var d3_locale_ruRU = d3.locale({ <ide> decimal: ",", <ide> thousands: "\xa0", <ide> grouping: [3], <ide><path>src/locale/zh-CN.js <ide> import "locale"; <ide> <del>d3.locale.zh_CN = d3.locale({ <add>var d3_locale_zhCN = d3.locale({ <ide> decimal: ".", <ide> thousands: ",", <ide> grouping: [3], <ide><path>test/locale/locale-caes-test.js <ide> var suite = vows.describe("d3.locale"); <ide> <ide> suite.addBatch({ <ide> "locale": { <del> topic: load("locale/ca-ES").expression("d3.locale.ca_ES"), <add> topic: load("locale/ca-ES").expression("d3_locale_caES"), <ide> <ide> "numberFormat": { <ide> topic: function(locale) { <ide><path>test/locale/locale-fifi-test.js <ide> var suite = vows.describe("d3.locale"); <ide> <ide> suite.addBatch({ <ide> "locale": { <del> topic: load("locale/fi-FI").expression("d3.locale.fi_FI"), <add> topic: load("locale/fi-FI").expression("d3_locale_fiFI"), <ide> <ide> "numberFormat": { <ide> topic: function(locale) { <ide><path>test/locale/locale-ptbr-test.js <ide> var suite = vows.describe("d3.locale"); <ide> <ide> suite.addBatch({ <ide> "locale": { <del> topic: load("locale/pt-BR").expression("d3.locale.pt_BR"), <add> topic: load("locale/pt-BR").expression("d3_locale_ptBR"), <ide> <ide> "numberFormat": { <ide> topic: function(locale) { <ide><path>test/locale/locale-test.js <ide> var suite = vows.describe("d3.locale"); <ide> <ide> suite.addBatch({ <ide> "locale": { <del> topic: load("locale/ru-RU").expression("d3.locale.ru_RU"), <add> topic: load("locale/ru-RU").expression("d3_locale_ruRU"), <ide> <ide> "numberFormat": { <ide> topic: function(locale) {
18
PHP
PHP
fix identation for entity clean() test
fb442f6f8a9222095a975968d2492b2f213ca6cc
<ide><path>tests/TestCase/ORM/TableTest.php <ide> public function testEntityClean() <ide> $entity->clean(); <ide> <ide> $this->assertEmpty($entity->errors()); <del> $this->assertFalse($entity->dirty()); <add> $this->assertFalse($entity->dirty()); <ide> $this->assertEquals([], $entity->invalid()); <ide> $this->assertSame($entity->getOriginal('title'), 'alex'); <ide> }
1
Ruby
Ruby
remove spdx-id as a attr_reader attribute
ba824d9488de3ad4e8efd58478f3ba3d9aaf1684
<ide><path>Library/Homebrew/dev-cmd/audit.rb <ide> def reverse_line_number(regex) <ide> class FormulaAuditor <ide> include FormulaCellarChecks <ide> <del> attr_reader :formula, :text, :problems, :new_formula_problems, :spdx_ids <add> attr_reader :formula, :text, :problems, :new_formula_problems <ide> <ide> def initialize(formula, options = {}) <ide> @formula = formula <ide> def audit_license <ide> return if github_license && (github_license == formula.license) <ide> <ide> problem "License mismatch - Github license is: #{github_license}, \ <del>but Formulae license states: #{formula.license}" <add>but Formulae license states: #{formula.license}." <ide> else <ide> problem "#{formula.license} is not a standard SPDX license id." <ide> end
1
PHP
PHP
fix inconsistent type
6bc2d104c2fba91ba33e84bc6e487c448c6e1d19
<ide><path>src/Database/Schema/TableSchema.php <ide> public function indexes(): array <ide> /** <ide> * {@inheritDoc} <ide> */ <del> public function getIndex($name): array <add> public function getIndex($name): ?array <ide> { <ide> if (!isset($this->_indexes[$name])) { <ide> return null;
1
Text
Text
move openssl maintenance guide to doc
5eb85dd972b1b69aebf16d331df6761847a31b2a
<ide><path>deps/openssl/README.md <ide> Please refer [config/opensslconf_asm.h](config/opensslconf_asm.h) for details. <ide> <ide> ### Upgrading OpenSSL <ide> <del>Please refer [config/README.md](config/README.md). <add>Please refer to [maintaining-openssl](../../doc/guides/maintaining-openssl.md). <add><path>doc/guides/maintaining-openssl.md <del><path>deps/openssl/config/README.md <del>## Upgrading OpenSSL <add># Maintaining OpenSSL <ide> <del>### Requirements <del>- Linux environment (Only CentOS7.1 and Ubuntu16 are tested) <del>- `perl` Only Perl version 5 is tested. <del>- `nasm` (http://www.nasm.us/) The version of 2.11 or higher is needed. <del>- GNU `as` in binutils. The version of 2.26 or higher is needed. <add>This document describes how to update `deps/openssl/`. <ide> <del>### 0. Check Requirements <add>## Requirements <add>* Linux environment <add>* `perl` Only Perl version 5 is tested. <add>* `nasm` (http://www.nasm.us/) The version of 2.11 or higher is needed. <add>* GNU `as` in binutils. The version of 2.26 or higher is needed. <add> <add>## 0. Check Requirements <ide> <ide> ```sh <del>$ perl -v <add>% perl -v <ide> <ide> This is perl 5, version 22, subversion 1 (v5.22.1) built for <ide> x86_64-linux-gnu-thread-multi <ide> (with 60 registered patches, see perl -V for more detail) <ide> <del>$ as --version <add>% as --version <ide> GNU assembler (GNU Binutils for Ubuntu) 2.26.1 <ide> Copyright (C) 2015 Free Software Foundation, Inc. <ide> ... <del>$ nasm -v <add>% nasm -v <ide> NASM version 2.11.08 <ide> ``` <ide> <del>### 1. Obtain and extract new OpenSSL sources <add>## 1. Obtain and extract new OpenSSL sources <ide> <ide> Get a new source from https://www.openssl.org/source/ and extract <ide> all files into `deps/openssl/openssl`. Then add all files and commit <ide> them. <ide> ```sh <del>$ cd deps/openssl/ <del>$ rm -rf openssl <del>$ tar zxf ~/tmp/openssl-1.1.0h.tar.gz <del>$ mv openssl-1.1.0h openssl <del>$ git add --all openssl <del>$ git commit openssl <add>% cd deps/openssl/ <add>% rm -rf openssl <add>% tar zxf ~/tmp/openssl-1.1.0h.tar.gz <add>% mv openssl-1.1.0h openssl <add>% git add --all openssl <add>% git commit openssl <ide> ```` <ide> <ide> The commit message can be (with the openssl version set to the relevant value): <del>``` <add>```text <ide> deps: upgrade openssl sources to 1.1.0h <ide> <ide> This updates all sources in deps/openssl/openssl by: <ide> This updates all sources in deps/openssl/openssl by: <ide> $ git commit openssl <ide> ``` <ide> <del>### 2. Execute `make` in `deps/openssl/config` directory <add>## 2. Execute `make` in `deps/openssl/config` directory <ide> <ide> Use `make` to regenerate all platform dependent files in <ide> `deps/openssl/config/archs/`: <ide> ```sh <del>$ cd deps/openssl/config; make <add>% cd deps/openssl/config; make <ide> ``` <ide> <del>### 3. Check diffs <add>## 3. Check diffs <ide> <ide> Check diffs if updates are right. Even if no updates in openssl <ide> sources, `buildinf.h` files will be updated for they have a timestamp <ide> data in them. <ide> ```sh <del>$ cd deps/openssl/config <del>$ git diff <add>% cd deps/openssl/config <add>% git diff <ide> ``` <ide> <ide> *Note*: On Windows, OpenSSL Configure generates `makefile` that can be <ide> created. When source files or build options are updated in Windows, <ide> it needs to change these two Makefiles by hand. If you are not sure, <ide> please ask @shigeki for details. <ide> <del>### 4. Commit and make test <add>## 4. Commit and make test <ide> <ide> Update all architecture dependent files. Do not forget to git add or remove <ide> files if they are changed before commit: <ide> ```sh <del>$ git add deps/openssl/config/archs <del>$ git add deps/openssl/openssl/crypto/include/internal/bn_conf.h <del>$ git add deps/openssl/openssl/crypto/include/internal/dso_conf.h <del>$ git add deps/openssl/openssl/include/openssl/opensslconf.h <del>$ git commit <add>% git add deps/openssl/config/archs <add>% git add deps/openssl/openssl/crypto/include/internal/bn_conf.h <add>% git add deps/openssl/openssl/crypto/include/internal/dso_conf.h <add>% git add deps/openssl/openssl/include/openssl/opensslconf.h <add>% git commit <ide> ``` <ide> <ide> The commit message can be (with the openssl version set to the relevant value): <del>``` <add>```text <ide> deps: update archs files for OpenSSL-1.1.0 <ide> <ide> After an OpenSSL source update, all the config files need to be regenerated and <ide> The commit message can be (with the openssl version set to the relevant value): <ide> $ git commit <ide> ``` <ide> <del>Finally, build Node and run tests. <add>Finally, build Node.js and run tests.
2
Text
Text
add documentation for project guidelines
90c15b1982dc57d258cf7cb098776e0601cb9a5c
<ide><path>dev/PROJECT_GUIDELINES.md <add><!-- <add> Licensed to the Apache Software Foundation (ASF) under one <add> or more contributor license agreements. See the NOTICE file <add> distributed with this work for additional information <add> regarding copyright ownership. The ASF licenses this file <add> to you under the Apache License, Version 2.0 (the <add> "License"); you may not use this file except in compliance <add> with the License. You may obtain a copy of the License at <add> <add> http://www.apache.org/licenses/LICENSE-2.0 <add> <add> Unless required by applicable law or agreed to in writing, <add> software distributed under the License is distributed on an <add> "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY <add> KIND, either express or implied. See the License for the <add> specific language governing permissions and limitations <add> under the License. <add>--> <add> <add><!-- START doctoc generated TOC please keep comment here to allow auto update --> <add><!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE --> <add> <add>- [Overview](#overview) <add> - [Adding a Committer or PMC Member](#adding-a-committer-or-pmc-member) <add> - [Airflow Improvement Proposals (AIPs)](#airflow-improvement-proposals-aips) <add> - [Support for Airflow 1.10.x releases](#support-for-airflow-110x-releases) <add> - [Support for Backport Providers](#support-for-backport-providers) <add> - [Release Guidelines](#release-guidelines) <add> <add><!-- END doctoc generated TOC please keep comment here to allow auto update --> <add> <add> <add># Overview <add> <add>This document describes all the guidelines that have been agreed upon by the committers and PMC <add>members on the mailing list. <add> <add>## Adding a Committer or PMC Member <add> <add>[COMMITTERS.rst](../COMMITTERS.rst) contains the guidelines for adding a new Committer and promoting an existing <add>Committer to a PMC member. <add> <add>## Airflow Improvement Proposals (AIPs) <add> <add>When voting on AIPs, both PMC members and committers have a binding vote. <add>([Link](https://lists.apache.org/thread.html/ra22cb7799e62e451fc285dee29f9df1eb17c000535ca2911c322c797%40%3Cdev.airflow.apache.org%3E)) <add> <add>## Support for Airflow 1.10.x releases <add> <add>The Airflow 1.10.x series will be supported for six months (June 17, 2021) from Airflow 2.0.0 <add>release date (Dec 17, 2020). Specifically, only 'critical fixes' defined as fixes to bugs <add>that take down Production systems, will be backported to 1.10.x until June 17, 2021. <add> <add>## Support for Backport Providers <add> <add>Backport providers within 1.10.x, will be supported for critical fixes for three months (March 17, 2021) <add>from Airflow 2.0.0 release date (Dec 17, 2020). <add> <add>## Release Guidelines <add> <add>### Apache Airflow (core) <add> <add>- Follow Semantic Versioning ([SEMVER](https://semver.org/)) <add>- Changing the version of dependency should not count as breaking change
1
Ruby
Ruby
add available transformations to docs [ci skip]
0e883d163974cad2d7d0e0cfdc6cf38f1042571f
<ide><path>activerecord/lib/active_record/connection_adapters/abstract/schema_definitions.rb <ide> def add_column(name, type, options) <ide> # t.date <ide> # t.binary <ide> # t.boolean <add> # t.foreign_key <add> # t.json <add> # t.virtual <ide> # t.remove <ide> # t.remove_references <ide> # t.remove_belongs_to
1
Python
Python
set bigger maxdiff for ogrinspect tests
8eb84081ada2a1a71ca701c8d3f0db0d4c61ae28
<ide><path>django/contrib/gis/tests/inspectapp/tests.py <ide> <ide> <ide> class OGRInspectTest(TestCase): <add> maxDiff = 1024 <add> <ide> def test_poly(self): <ide> shp_file = os.path.join(TEST_DATA, 'test_poly', 'test_poly.shp') <ide> model_def = ogrinspect(shp_file, 'MyModel')
1
Javascript
Javascript
remove usage of require('util') in `esm/loader.js`
ad661ca209f68e7aa0e7396fe0d04499daeb71a9
<ide><path>lib/internal/modules/esm/loader.js <ide> const translators = require('internal/modules/esm/translators'); <ide> <ide> const FunctionBind = Function.call.bind(Function.prototype.bind); <ide> <del>const debug = require('util').debuglog('esm'); <add>const debug = require('internal/util/debuglog').debuglog('esm'); <ide> <ide> /* A Loader instance is used as the main entry point for loading ES modules. <ide> * Currently, this is a singleton -- there is only one used for loading
1
Javascript
Javascript
change var to const in ./doc/addon-verify
bce27c2cf37ed96abecd7625ff1d22c72c3b99c8
<ide><path>tools/doc/addon-verify.js <ide> ${files[name].replace('Release', "' + common.buildType + '")} <ide> fs.mkdir(dir, function() { <ide> // Ignore errors <ide> <del> var done = once(ondone); <add> const done = once(ondone); <ide> var waiting = files.length; <ide> files.forEach(function(file) { <ide> fs.writeFile(file.path, file.content, function(err) {
1
Text
Text
fix typo in readme.md
46dfe99e44887e40b821b0232dcfe6a52d31c3c4
<ide><path>examples/pytorch/README.md <ide> A few notes on this integration: <ide> ## Distributed training and mixed precision <ide> <ide> All the PyTorch scripts mentioned above work out of the box with distributed training and mixed precision, thanks to <del>the [Trainer API](https://huggingface.co/transformers/main_classes/trainer.html). To launch one of them on _n_ GPUS, <add>the [Trainer API](https://huggingface.co/transformers/main_classes/trainer.html). To launch one of them on _n_ GPUs, <ide> use the following command: <ide> <ide> ```bash <ide> and reply to the questions asked. Then <ide> accelerate test <ide> ``` <ide> <del>that will check everything is ready for training. Finally, you cam launch training with <add>that will check everything is ready for training. Finally, you can launch training with <ide> <ide> ```bash <ide> accelerate launch path_to_script.py --args_to_script <ide> Advanced configuration is possible by setting environment variables: <ide> <ide> | Environment Variable | Value | <ide> |---|---| <del>| WANDB_LOG_MODEL | Log the model as artifact (log the model as artifact at the end of training (`false` by default) | <add>| WANDB_LOG_MODEL | Log the model as artifact (log the model as artifact at the end of training) (`false` by default) | <ide> | WANDB_WATCH | one of `gradients` (default) to log histograms of gradients, `all` to log histograms of both gradients and parameters, or `false` for no histogram logging | <ide> | WANDB_PROJECT | Organize runs by project | <ide>
1
Text
Text
update security release onboarding
dfdfd2471f66dbb70e977410e728eb7302d5132c
<ide><path>doc/contributing/security-steward-on-off-boarding.md <ide> to the project and not to use/disclose to their employer. <ide> * Add them to the security-stewards team in the GitHub nodejs-private <ide> organization. <add>* Add them to the [public website team](https://github.com/orgs/nodejs/teams/website). <ide> * Ensure they have 2FA enabled in H1. <ide> * Add them to the standard team in H1 using this <ide> [page](https://hackerone.com/nodejs/team_members). <ide> <ide> * Remove them from security-stewards team in the GitHub nodejs-private <ide> organization. <add>* Remove them from public website team <ide> * Unless they have access for another reason, remove them from the <ide> standard team in H1 using this <ide> [page](https://hackerone.com/nodejs/team_members).
1
Text
Text
add release notes for 1.7.1
05170bf3716359f81b32bf5113330dd6240a0cf7
<ide><path>CHANGELOG.md <add><a name="1.7.1"></a> <add># 1.7.1 momentum-defiance (2018-06-08) <add> <add> <add>## Bug Fixes <add>- **$compile:** support transcluding multi-element directives <add> ([789db8](https://github.com/angular/angular.js/commit/789db83a8ae0e2db5db13289b2c29e56093d967a), <add> [#15554](https://github.com/angular/angular.js.git/issues/15554), <add> [#15555](https://github.com/angular/angular.js.git/issues/15555)) <add>- **ngModel:** do not throw if view value changes on destroyed scope <add> ([2b6c98](https://github.com/angular/angular.js/commit/2b6c9867369fd3ef1ddb687af1153478ab62ee1b), <add> [#16583](https://github.com/angular/angular.js.git/issues/16583), <add> [#16585](https://github.com/angular/angular.js.git/issues/16585)) <add> <add> <add>## New Features <add>- **$compile:** add one-way collection bindings <add> ([f9d1ca](https://github.com/angular/angular.js/commit/f9d1ca20c38f065f15769fbe23aee5314cb58bd4), <add> [#14039](https://github.com/angular/angular.js.git/issues/14039), <add> [#16553](https://github.com/angular/angular.js.git/issues/16553), <add> [#15874](https://github.com/angular/angular.js.git/issues/15874)) <add>- **ngRef:** add directive to publish controller, or element into scope <add> ([bf841d](https://github.com/angular/angular.js/commit/bf841d35120bf3c4655fde46af4105c85a0f1cdc), <add> [#16511](https://github.com/angular/angular.js.git/issues/16511)) <add>- **errorHandlingConfig:** add option to exclude error params from url <add> ([3d6c45](https://github.com/angular/angular.js/commit/3d6c45d76e30b1b3c4eb9672cf4a93e5251c06b3), <add> [#14744](https://github.com/angular/angular.js.git/issues/14744), <add> [#15707](https://github.com/angular/angular.js.git/issues/15707), <add> [#16283](https://github.com/angular/angular.js.git/issues/16283), <add> [#16299](https://github.com/angular/angular.js.git/issues/16299), <add> [#16591](https://github.com/angular/angular.js.git/issues/16591)) <add>- **ngAria:** add support for ignoring a specific element <add> ([7d9d38](https://github.com/angular/angular.js/commit/7d9d387195292cb5e04984602b752d31853cfea6), <add> [#14602](https://github.com/angular/angular.js.git/issues/14602), <add> [#14672](https://github.com/angular/angular.js.git/issues/14672), <add> [#14833](https://github.com/angular/angular.js.git/issues/14833)) <add>- **ngCookies:** support samesite option <add> ([10a229](https://github.com/angular/angular.js/commit/10a229ce1befdeaf6295d1635dc11391c252a91a), <add> [#16543](https://github.com/angular/angular.js.git/issues/16543), <add> [#16544](https://github.com/angular/angular.js.git/issues/16544)) <add>- **ngMessages:** add support for default message <add> ([a8c263](https://github.com/angular/angular.js/commit/a8c263c1947cc85ee60b4732f7e4bcdc7ba463e8), <add> [#12008](https://github.com/angular/angular.js.git/issues/12008), <add> [#12213](https://github.com/angular/angular.js.git/issues/12213), <add> [#16587](https://github.com/angular/angular.js.git/issues/16587)) <add>- **ngMock, ngMockE2E:** add option to match latest definition for `$httpBackend` request <add> ([773f39](https://github.com/angular/angular.js/commit/773f39c9345479f5f8b6321236ce6ad96f77aa92), <add> [#16251](https://github.com/angular/angular.js.git/issues/16251), <add> [#11637](https://github.com/angular/angular.js.git/issues/11637), <add> [#16560](https://github.com/angular/angular.js.git/issues/16560)) <add>- **$route:** add support for the `reloadOnUrl` configuration option <add> ([f4f571](https://github.com/angular/angular.js/commit/f4f571efdf86d6acbcd5c6b1de66b4b33a259125), <add> [#7925](https://github.com/angular/angular.js.git/issues/7925), <add> [#15002](https://github.com/angular/angular.js.git/issues/15002)) <add> <add> <ide> <a name="1.7.0"></a> <ide> # 1.7.0 nonexistent-physiology (2018-05-11) <ide>
1
Ruby
Ruby
give higher priority to assets.cache_store
86e2f888e7a3b96f880ab33bfcf4fbf275a40544
<ide><path>actionpack/lib/sprockets/railtie.rb <ide> def asset_environment(app) <ide> env.logger = Rails.logger <ide> <ide> if env.respond_to?(:cache) <del> env.cache = Rails.cache <add> env.cache = assets.cache_store || Rails.cache <ide> end <ide> <ide> if assets.compress
1
Javascript
Javascript
decorate resolve and reject to clear timeout
fa3c6d22f7a5e0a963119109fccb8d86ad981c03
<ide><path>lib/adapters/http.js <ide> var enhanceError = require('../core/enhanceError'); <ide> <ide> /*eslint consistent-return:0*/ <ide> module.exports = function httpAdapter(config) { <del> return new Promise(function dispatchHttpRequest(resolve, reject) { <add> return new Promise(function dispatchHttpRequest(resolvePromise, rejectPromise) { <add> var timer; <add> var resolve = function resolve(value) { <add> clearTimeout(timer); <add> resolvePromise(value); <add> }; <add> var reject = function reject(value) { <add> clearTimeout(timer); <add> rejectPromise(value); <add> }; <ide> var data = config.data; <ide> var headers = config.headers; <del> var timer; <ide> <ide> // Set User-Agent (required by some servers) <ide> // Only set header if it hasn't been set in config <ide> module.exports = function httpAdapter(config) { <ide> var req = transport.request(options, function handleResponse(res) { <ide> if (req.aborted) return; <ide> <del> // Response has been received so kill timer that handles request timeout <del> clearTimeout(timer); <del> timer = null; <del> <ide> // uncompress the response body transparently if required <ide> var stream = res; <ide> switch (res.headers['content-encoding']) { <ide> module.exports = function httpAdapter(config) { <ide> }); <ide> <ide> // Handle request timeout <del> if (config.timeout && !timer) { <add> if (config.timeout) { <ide> timer = setTimeout(function handleRequestTimeout() { <ide> req.abort(); <ide> reject(createError('timeout of ' + config.timeout + 'ms exceeded', config, 'ECONNABORTED', req));
1
Ruby
Ruby
add automatic tap repair
399388d8b9a29333135bd7bf2c4f7f159569d89b
<ide><path>Library/Homebrew/cmd/pull.rb <ide> def pull <ide> ohai 'Patch changed:' <ide> safe_system "git", "diff-tree", "-r", "--stat", revision, "HEAD" <ide> <add> safe_system "brew", "tap", "--repair" if tap_name <add> <ide> if ARGV.include? '--install' <ide> changed_formulae.each do |f| <ide> ohai "Installing #{f.name}"
1
Javascript
Javascript
preserve assets from concatenated module
ab9e9383674ba84cddf4fd48d13c64c73282969a
<ide><path>lib/optimize/ConcatenatedModule.js <ide> class ConcatenatedModule extends Module { <ide> this.dependenciesErrors = []; <ide> this.warnings = []; <ide> this.errors = []; <add> this.assets = {}; <ide> for(const m of modules) { <ide> // populate dependencies <ide> m.dependencies.filter(dep => !modulesSet.has(dep.module)) <ide> class ConcatenatedModule extends Module { <ide> m.warnings.forEach(warning => this.warnings.push(warning)); <ide> // populate errors <ide> m.errors.forEach(error => this.errors.push(error)); <add> <add> Object.assign(this.assets, m.assets); <ide> } <ide> } <ide>
1
Javascript
Javascript
check map.format for transparency in gltf2loader
08d40df55b87e14b1a5b7104cb715eab1b4d9f77
<ide><path>examples/js/loaders/GLTF2Loader.js <ide> THREE.GLTF2Loader = ( function () { <ide> <ide> } <ide> <del> // set transparent true if map baseColorTexture is specified because <del> // A channel values of it can be less than 1.0. <del> if ( materialParams.opacity < 1.0 || materialParams.map !== undefined ) materialParams.transparent = true; <add> if ( materialParams.opacity < 1.0 || <add> ( materialParams.map !== undefined && <add> ( materialParams.map.format === THREE.AlphaFormat || <add> materialParams.map.format === THREE.RGBAFormat || <add> materialParams.map.format === THREE.LuminanceAlphaFormat ) ) ) { <add> <add> materialParams.transparent = true; <add> <add> } <ide> <ide> materialParams.metalness = metallicRoughness.metallicFactor !== undefined ? metallicRoughness.metallicFactor : 1.0; <ide> materialParams.roughness = metallicRoughness.roughnessFactor !== undefined ? metallicRoughness.roughnessFactor : 1.0;
1
PHP
PHP
fix some compiled stuff
6020b7c2937df9cae48b5f4e3ebd0c623d1c79b1
<ide><path>src/Illuminate/Foundation/Console/Optimize/config.php <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/Contracts/Auth/UserProvider.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/Auth/EloquentUserProvider.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/Container/Container.php', <del> $basePath.'/vendor/symfony/http-kernel/Symfony/Component/HttpKernel/HttpKernelInterface.php', <add> $basePath.'/vendor/symfony/http-kernel/HttpKernelInterface.php', <ide> $basePath.'/vendor/symfony/http-kernel/Symfony/Component/HttpKernel/TerminableInterface.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/Foundation/Application.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/Foundation/EnvironmentDetector.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/Foundation/Bootstrap/LoadConfiguration.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/Foundation/Bootstrap/DetectEnvironment.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/Foundation/Http/Kernel.php', <del> $basePath.'/vendor/laravel/framework/src/Illuminate/Foundation/Auth/AuthenticatesAndRegistersUsers.php', <add> $basePath.'/vendor/laravel/framework/src/Illuminate/Foundation/Auth/AuthenticatesUsers.php', <add> $basePath.'/vendor/laravel/framework/src/Illuminate/Foundation/Auth/RedirectsUsers.php', <add> $basePath.'/vendor/laravel/framework/src/Illuminate/Foundation/Auth/RegistersUsers.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/Foundation/Auth/ResetsPasswords.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/Http/Request.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/Http/Middleware/FrameGuard.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/Foundation/Http/Middleware/VerifyCsrfToken.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/Foundation/Http/Middleware/CheckForMaintenanceMode.php', <del> $basePath.'/vendor/symfony/http-foundation/Symfony/Component/HttpFoundation/Request.php', <del> $basePath.'/vendor/symfony/http-foundation/Symfony/Component/HttpFoundation/ParameterBag.php', <del> $basePath.'/vendor/symfony/http-foundation/Symfony/Component/HttpFoundation/FileBag.php', <del> $basePath.'/vendor/symfony/http-foundation/Symfony/Component/HttpFoundation/ServerBag.php', <del> $basePath.'/vendor/symfony/http-foundation/Symfony/Component/HttpFoundation/HeaderBag.php', <del> $basePath.'/vendor/symfony/http-foundation/Symfony/Component/HttpFoundation/Session/SessionInterface.php', <del> $basePath.'/vendor/symfony/http-foundation/Symfony/Component/HttpFoundation/Session/SessionBagInterface.php', <del> $basePath.'/vendor/symfony/http-foundation/Symfony/Component/HttpFoundation/Session/Attribute/AttributeBagInterface.php', <del> $basePath.'/vendor/symfony/http-foundation/Symfony/Component/HttpFoundation/Session/Attribute/AttributeBag.php', <del> $basePath.'/vendor/symfony/http-foundation/Symfony/Component/HttpFoundation/Session/Storage/MetadataBag.php', <del> $basePath.'/vendor/symfony/http-foundation/Symfony/Component/HttpFoundation/AcceptHeaderItem.php', <del> $basePath.'/vendor/symfony/http-foundation/Symfony/Component/HttpFoundation/AcceptHeader.php', <add> $basePath.'/vendor/symfony/http-foundation/Request.php', <add> $basePath.'/vendor/symfony/http-foundation/ParameterBag.php', <add> $basePath.'/vendor/symfony/http-foundation/FileBag.php', <add> $basePath.'/vendor/symfony/http-foundation/ServerBag.php', <add> $basePath.'/vendor/symfony/http-foundation/HeaderBag.php', <add> $basePath.'/vendor/symfony/http-foundation/Session/SessionInterface.php', <add> $basePath.'/vendor/symfony/http-foundation/Session/SessionBagInterface.php', <add> $basePath.'/vendor/symfony/http-foundation/Session/Attribute/AttributeBagInterface.php', <add> $basePath.'/vendor/symfony/http-foundation/Session/Attribute/AttributeBag.php', <add> $basePath.'/vendor/symfony/http-foundation/Session/Storage/MetadataBag.php', <add> $basePath.'/vendor/symfony/http-foundation/AcceptHeaderItem.php', <add> $basePath.'/vendor/symfony/http-foundation/AcceptHeader.php', <ide> $basePath.'/vendor/symfony/debug/Symfony/Component/Debug/ExceptionHandler.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/Support/ServiceProvider.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/Support/AggregateServiceProvider.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/Routing/Router.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/Routing/Route.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/Routing/RouteCollection.php', <del> $basePath.'/vendor/symfony/routing/Symfony/Component/Routing/CompiledRoute.php', <del> $basePath.'/vendor/symfony/routing/Symfony/Component/Routing/RouteCompilerInterface.php', <del> $basePath.'/vendor/symfony/routing/Symfony/Component/Routing/RouteCompiler.php', <del> $basePath.'/vendor/symfony/routing/Symfony/Component/Routing/Route.php', <add> $basePath.'/vendor/symfony/routing/CompiledRoute.php', <add> $basePath.'/vendor/symfony/routing/RouteCompilerInterface.php', <add> $basePath.'/vendor/symfony/routing/RouteCompiler.php', <add> $basePath.'/vendor/symfony/routing/Route.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/Routing/Controller.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/Routing/ControllerDispatcher.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/Routing/ControllerInspector.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/Cookie/CookieJar.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/Cookie/Middleware/EncryptCookies.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/Cookie/Middleware/AddQueuedCookiesToResponse.php', <add> $basePath.'/vendor/laravel/framework/src/Illuminate/Encryption/BaseEncrypter.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/Encryption/Encrypter.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/Support/Facades/Log.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/Log/LogServiceProvider.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/View/Compilers/CompilerInterface.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/View/Compilers/Compiler.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/View/Compilers/BladeCompiler.php', <del> $basePath.'/vendor/symfony/http-foundation/Symfony/Component/HttpFoundation/Response.php', <add> $basePath.'/vendor/symfony/http-foundation/Response.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/Http/ResponseTrait.php', <ide> $basePath.'/vendor/laravel/framework/src/Illuminate/Http/Response.php', <del> $basePath.'/vendor/symfony/http-foundation/Symfony/Component/HttpFoundation/ResponseHeaderBag.php', <del> $basePath.'/vendor/symfony/http-foundation/Symfony/Component/HttpFoundation/Cookie.php', <add> $basePath.'/vendor/symfony/http-foundation/ResponseHeaderBag.php', <add> $basePath.'/vendor/symfony/http-foundation/Cookie.php', <ide> $basePath.'/vendor/symfony/finder/Symfony/Component/Finder/SplFileInfo.php', <ide> $basePath.'/vendor/symfony/finder/Symfony/Component/Finder/Expression/Regex.php', <ide> $basePath.'/vendor/symfony/finder/Symfony/Component/Finder/Expression/ValueInterface.php',
1
Ruby
Ruby
fix usage of lambda as a rack endpoint
dbacb95a2187e853c0615f30919a407744162595
<ide><path>actionpack/lib/action_dispatch/routing/mapper.rb <ide> def root(options = {}) <ide> # A pattern can also point to a +Rack+ endpoint i.e. anything that <ide> # responds to +call+: <ide> # <del> # match 'photos/:id', to: lambda {|hash| [200, {}, "Coming soon"] } <add> # match 'photos/:id', to: lambda {|hash| [200, {}, ["Coming soon"]] } <ide> # match 'photos/:id', to: PhotoRackApp <ide> # # Yes, controller actions are just rack endpoints <ide> # match 'photos/:id', to: PhotosController.action(:show) <ide> def root(options = {}) <ide> # +call+ or a string representing a controller's action. <ide> # <ide> # match 'path', to: 'controller#action' <del> # match 'path', to: lambda { |env| [200, {}, "Success!"] } <add> # match 'path', to: lambda { |env| [200, {}, ["Success!"]] } <ide> # match 'path', to: RackApp <ide> # <ide> # [:on] <ide><path>railties/test/application/assets_test.rb <ide> def assert_no_file_exists(filename) <ide> <ide> app_file 'config/routes.rb', <<-RUBY <ide> AppTemplate::Application.routes.draw do <del> get '*path', to: lambda { |env| [200, { "Content-Type" => "text/html" }, "Not an asset"] } <add> get '*path', to: lambda { |env| [200, { "Content-Type" => "text/html" }, ["Not an asset"]] } <ide> end <ide> RUBY <ide> <ide><path>railties/test/railties/engine_test.rb <ide> class Engine < ::Rails::Engine <ide> @plugin.write "lib/bukkits.rb", <<-RUBY <ide> module Bukkits <ide> class Engine < ::Rails::Engine <del> endpoint lambda { |env| [200, {'Content-Type' => 'text/html'}, 'hello'] } <add> endpoint lambda { |env| [200, {'Content-Type' => 'text/html'}, ['hello']] } <ide> end <ide> end <ide> RUBY
3
PHP
PHP
add line break for plain text mails
3dafb5a3840053420d850e1ccb249efc980af94e
<ide><path>src/Illuminate/Notifications/resources/views/email.blade.php <ide> @if (! empty($salutation)) <ide> {{ $salutation }} <ide> @else <del>@lang('Regards'),<br>{{ config('app.name') }} <add>@lang('Regards'),<br> <add>{{ config('app.name') }} <ide> @endif <ide> <ide> {{-- Subcopy --}}
1
Java
Java
improve tcp connection info logging
37b0ed9fcb74c42dd6a8de24834d5ac7e75392f6
<ide><path>spring-messaging/src/main/java/org/springframework/messaging/simp/stomp/ReactorNettyTcpStompClient.java <ide> public void shutdown() { <ide> this.tcpClient.shutdown(); <ide> } <ide> <add> @Override <add> public String toString() { <add> return "ReactorNettyTcpStompClient[" + this.tcpClient + "]"; <add> } <ide> } <ide><path>spring-messaging/src/main/java/org/springframework/messaging/simp/stomp/StompBrokerRelayMessageHandler.java <ide> public class StompBrokerRelayMessageHandler extends AbstractBrokerMessageHandler <ide> <ide> public static final String SYSTEM_SESSION_ID = "_system_"; <ide> <del> // STOMP recommends error of margin for receiving heartbeats <add> /** STOMP recommended error of margin for receiving heartbeats */ <ide> private static final long HEARTBEAT_MULTIPLIER = 3; <ide> <ide> /** <del> * A heartbeat is setup once a CONNECTED frame is received which contains the heartbeat settings <del> * we need. If we don't receive CONNECTED within a minute, the connection is closed proactively. <add> * Heartbeat starts once CONNECTED frame with heartbeat settings is received. <add> * If CONNECTED doesn't arrive within a minute, we'll close the connection. <ide> */ <ide> private static final int MAX_TIME_TO_CONNECTED_FRAME = 60 * 1000; <ide> <ide> protected void startInternal() { <ide> } <ide> <ide> if (logger.isInfoEnabled()) { <del> logger.info("Connecting \"system\" session to " + this.relayHost + ":" + this.relayPort); <add> logger.info("Starting \"system\" session, " + toString()); <ide> } <ide> <ide> StompHeaderAccessor accessor = StompHeaderAccessor.create(StompCommand.CONNECT); <ide> else if (StompCommand.DISCONNECT.equals(command)) { <ide> <ide> @Override <ide> public String toString() { <del> return "StompBrokerRelay[" + this.relayHost + ":" + this.relayPort + "]"; <add> return "StompBrokerRelay[" + getTcpClientInfo() + "]"; <add> } <add> <add> private String getTcpClientInfo() { <add> return this.tcpClient != null ? this.tcpClient.toString() : this.relayHost + ":" + this.relayPort; <ide> } <ide> <ide> <ide> public ListenableFuture<Void> forward(Message<?> message, StompHeaderAccessor ac <ide> private static class VoidCallable implements Callable<Void> { <ide> <ide> @Override <del> public Void call() throws Exception { <add> public Void call() { <ide> return null; <ide> } <ide> } <ide> public void incrementDisconnectCount() { <ide> } <ide> <ide> public String toString() { <del> return (connectionHandlers.size() + " sessions, " + relayHost + ":" + relayPort + <add> return (connectionHandlers.size() + " sessions, " + getTcpClientInfo() + <ide> (isBrokerAvailable() ? " (available)" : " (not available)") + <ide> ", processed CONNECT(" + this.connect.get() + ")-CONNECTED(" + <ide> this.connected.get() + ")-DISCONNECT(" + this.disconnect.get() + ")"); <ide><path>spring-messaging/src/main/java/org/springframework/messaging/tcp/reactor/ReactorNettyTcpClient.java <ide> /* <del> * Copyright 2002-2017 the original author or authors. <add> * Copyright 2002-2018 the original author or authors. <ide> * <ide> * Licensed under the Apache License, Version 2.0 (the "License"); <ide> * you may not use this file except in compliance with the License. <ide> import io.netty.channel.group.DefaultChannelGroup; <ide> import io.netty.handler.codec.ByteToMessageDecoder; <ide> import io.netty.util.concurrent.ImmediateEventExecutor; <add>import org.apache.commons.logging.Log; <add>import org.apache.commons.logging.LogFactory; <ide> import org.reactivestreams.Publisher; <ide> import reactor.core.publisher.DirectProcessor; <ide> import reactor.core.publisher.Flux; <ide> */ <ide> public class ReactorNettyTcpClient<P> implements TcpOperations<P> { <ide> <add> private static Log logger = LogFactory.getLog(ReactorNettyTcpClient.class); <add> <ide> private static final int PUBLISH_ON_BUFFER_SIZE = 16; <ide> <ide> <ide> public ListenableFuture<Void> connect(TcpConnectionHandler<P> handler, Reconnect <ide> .doOnNext(updateConnectMono(connectMono)) <ide> .doOnError(updateConnectMono(connectMono)) <ide> .doOnError(handler::afterConnectFailure) // report all connect failures to the handler <del> .flatMap(NettyContext::onClose) // post-connect issues <add> .flatMap(NettyContext::onClose) // post-connect issues <ide> .retryWhen(reconnectFunction(strategy)) <ide> .repeatWhen(reconnectFunction(strategy)) <ide> .subscribe(); <ide> private Mono<Void> stopScheduler() { <ide> }); <ide> } <ide> <add> @Override <add> public String toString() { <add> return "ReactorNettyTcpClient[" + this.tcpClient + "]"; <add> } <add> <ide> <ide> private class ReactorNettyHandler implements BiFunction<NettyInbound, NettyOutbound, Publisher<Void>> { <ide> <ide> private class ReactorNettyHandler implements BiFunction<NettyInbound, NettyOutbo <ide> @Override <ide> @SuppressWarnings("unchecked") <ide> public Publisher<Void> apply(NettyInbound inbound, NettyOutbound outbound) { <add> if (logger.isDebugEnabled()) { <add> logger.debug("Connected to " + inbound.remoteAddress()); <add> } <ide> DirectProcessor<Void> completion = DirectProcessor.create(); <ide> TcpConnection<P> connection = new ReactorNettyTcpConnection<>(inbound, outbound, codec, completion); <ide> scheduler.schedule(() -> connectionHandler.afterConnected(connection)); <ide> public StompMessageDecoder(ReactorNettyCodec<P> codec) { <ide> } <ide> <ide> @Override <del> protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception { <add> protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) { <ide> Collection<Message<P>> messages = codec.decode(in); <ide> out.addAll(messages); <ide> } <ide><path>spring-websocket/src/test/java/org/springframework/web/socket/config/MessageBrokerBeanDefinitionParserTests.java <ide> public void stompBrokerRelay() { <ide> assertEquals(5000, messageBroker.getSystemHeartbeatSendInterval()); <ide> assertThat(messageBroker.getDestinationPrefixes(), Matchers.containsInAnyOrder("/topic","/queue")); <ide> <del> List<Class<? extends MessageHandler>> subscriberTypes = <del> Arrays.<Class<? extends MessageHandler>>asList(SimpAnnotationMethodMessageHandler.class, <del> UserDestinationMessageHandler.class, StompBrokerRelayMessageHandler.class); <add> List<Class<? extends MessageHandler>> subscriberTypes = Arrays.asList( <add> SimpAnnotationMethodMessageHandler.class, UserDestinationMessageHandler.class, <add> StompBrokerRelayMessageHandler.class); <ide> testChannel("clientInboundChannel", subscriberTypes, 2); <ide> testExecutor("clientInboundChannel", Runtime.getRuntime().availableProcessors() * 2, Integer.MAX_VALUE, 60); <ide> <ide> subscriberTypes = Collections.singletonList(SubProtocolWebSocketHandler.class); <ide> testChannel("clientOutboundChannel", subscriberTypes, 1); <ide> testExecutor("clientOutboundChannel", Runtime.getRuntime().availableProcessors() * 2, Integer.MAX_VALUE, 60); <ide> <del> subscriberTypes = Arrays.<Class<? extends MessageHandler>>asList( <del> StompBrokerRelayMessageHandler.class, UserDestinationMessageHandler.class); <add> subscriberTypes = Arrays.asList(StompBrokerRelayMessageHandler.class, UserDestinationMessageHandler.class); <ide> testChannel("brokerChannel", subscriberTypes, 1); <ide> try { <ide> this.appContext.getBean("brokerChannelExecutor", ThreadPoolTaskExecutor.class);
4
Javascript
Javascript
fix error string in codegennativecomponent
deaaab908b24a0fae9fee73822198cc4972a98f4
<ide><path>Libraries/Utilities/codegenNativeComponent.js <ide> function codegenNativeComponent<Props>( <ide> componentNameInUse = options.paperComponentNameDeprecated; <ide> } else { <ide> throw new Error( <del> 'Failed to find native component for either "::_COMPONENT_NAME_::" or "::_COMPONENT_NAME_DEPRECATED_::"', <add> `Failed to find native component for either ${componentName} or ${options.paperComponentNameDeprecated || <add> '(unknown)'}`, <ide> ); <ide> } <ide> }
1
Python
Python
add missing softirq stat against total cpu usage
6acd6cc6caf2df7e30eb2291b6d3a447e3555271
<ide><path>glances/glances.py <ide> def __update__(self, input_stats): <ide> if hasattr(self.cputime_new, 'irq'): <ide> self.cpu['irq'] = (self.cputime_new.irq - <ide> self.cputime_old.irq) * percent <add> if hasattr(self.cputime_new, 'softirq'): <add> self.cpu['softirq'] = (self.cputime_new.softirq - <add> self.cputime_old.softirq) * percent <ide> self.cputime_old = self.cputime_new <ide> self.cputime_total_old = self.cputime_total_new <ide> except Exception:
1
Java
Java
add mockserverconfigurer to webtestclient
4db0ce12e1df049d651a03894b1307bd002833cf
<ide><path>spring-test/src/main/java/org/springframework/test/web/reactive/server/AbstractMockServerSpec.java <ide> <ide> private final List<WebFilter> filters = new ArrayList<>(4); <ide> <add> private final List<MockServerConfigurer> configurers = new ArrayList<>(4); <add> <ide> <ide> @Override <ide> public <T extends B> T webFilter(WebFilter... filter) { <ide> this.filters.addAll(Arrays.asList(filter)); <ide> return self(); <ide> } <ide> <add> @Override <add> public <T extends B> T apply(MockServerConfigurer configurer) { <add> configurer.afterConfigureAdded(this); <add> this.configurers.add(configurer); <add> return self(); <add> } <add> <ide> @SuppressWarnings("unchecked") <ide> private <T extends B> T self() { <ide> return (T) this; <ide> } <ide> <del> <ide> @Override <ide> public WebTestClient.Builder configureClient() { <ide> WebHttpHandlerBuilder builder = initHttpHandlerBuilder(); <del> builder.filters(currentFilters -> { <del> List<WebFilter> toPrepend = new ArrayList<>(this.filters); <del> Collections.reverse(toPrepend); <del> toPrepend.forEach(filter -> currentFilters.add(0, filter)); <del> }); <add> builder.filters(theFilters -> theFilters.addAll(0, this.filters)); <add> this.configurers.forEach(configurer -> configurer.beforeServerCreated(builder)); <ide> return new DefaultWebTestClientBuilder(builder.build()); <ide> } <ide> <ide> public WebTestClient.Builder configureClient() { <ide> */ <ide> protected abstract WebHttpHandlerBuilder initHttpHandlerBuilder(); <ide> <del> <ide> @Override <ide> public WebTestClient build() { <ide> return configureClient().build(); <ide><path>spring-test/src/main/java/org/springframework/test/web/reactive/server/MockServerConfigurer.java <add>/* <add> * Copyright 2002-2017 the original author or authors. <add> * <add> * Licensed under the Apache License, Version 2.0 (the "License"); <add> * you may not use this file except in compliance with the License. <add> * You may obtain a copy of the License at <add> * <add> * http://www.apache.org/licenses/LICENSE-2.0 <add> * <add> * Unless required by applicable law or agreed to in writing, software <add> * distributed under the License is distributed on an "AS IS" BASIS, <add> * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. <add> * See the License for the specific language governing permissions and <add> * limitations under the License. <add> */ <add>package org.springframework.test.web.reactive.server; <add> <add>import org.springframework.web.server.adapter.WebHttpHandlerBuilder; <add> <add>/** <add> * Contract that frameworks or applications can use to pre-package a set of <add> * customizations to a {@link WebTestClient.MockServerSpec} and expose that <add> * as a shortcut. <add> * <add> * <p>An implementation of this interface can be plugged in via <add> * {@link WebTestClient.MockServerSpec#apply} where instances are likely obtained <add> * via static methods, e.g.: <add> * <add> * <pre class="code"> <add> * import static org.example.ExampleSetup.securitySetup; <add> * <add> * // ... <add> * <add> * WebTestClient.bindToController(new TestController()) <add> * .apply(securitySetup("foo","bar")) <add> * .build(); <add> * </pre> <add> * <add> * @author Rossen Stoyanchev <add> * @since 5.0 <add> */ <add>public interface MockServerConfigurer { <add> <add> /** <add> * Invoked immediately, i.e. before this method returns. <add> * @param serverSpec the serverSpec to which the configurer is added <add> */ <add> default void afterConfigureAdded(WebTestClient.MockServerSpec<?> serverSpec) { <add> } <add> <add> /** <add> * Invoked just before the mock server is built. Use this hook to inspect <add> * and/or modify application-declared filtes and exception handlers, <add> * @param builder the builder for the {@code HttpHandler} that will handle <add> * requests (i.e. the mock server) <add> */ <add> default void beforeServerCreated(WebHttpHandlerBuilder builder) { <add> } <add> <add>} <ide><path>spring-test/src/main/java/org/springframework/test/web/reactive/server/WebTestClient.java <ide> static Builder bindToServer() { <ide> */ <ide> <T extends B> T webFilter(WebFilter... filter); <ide> <add> /** <add> * Shortcut for pre-packaged customizations to the mock server setup. <add> * @param configurer the configurer to apply <add> */ <add> <T extends B> T apply(MockServerConfigurer configurer); <add> <ide> /** <ide> * Proceed to configure and build the test client. <ide> */ <ide><path>spring-test/src/test/java/org/springframework/test/web/reactive/server/MockServerSpecTests.java <add>/* <add> * Copyright 2002-2017 the original author or authors. <add> * <add> * Licensed under the Apache License, Version 2.0 (the "License"); <add> * you may not use this file except in compliance with the License. <add> * You may obtain a copy of the License at <add> * <add> * http://www.apache.org/licenses/LICENSE-2.0 <add> * <add> * Unless required by applicable law or agreed to in writing, software <add> * distributed under the License is distributed on an "AS IS" BASIS, <add> * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. <add> * See the License for the specific language governing permissions and <add> * limitations under the License. <add> */ <add>package org.springframework.test.web.reactive.server; <add> <add>import java.nio.charset.StandardCharsets; <add> <add>import org.junit.Test; <add>import reactor.core.publisher.Mono; <add> <add>import org.springframework.core.io.buffer.DataBuffer; <add>import org.springframework.core.io.buffer.DefaultDataBufferFactory; <add>import org.springframework.web.server.ServerWebExchange; <add>import org.springframework.web.server.WebFilter; <add>import org.springframework.web.server.WebFilterChain; <add>import org.springframework.web.server.adapter.WebHttpHandlerBuilder; <add> <add>/** <add> * Unit tests for {@link AbstractMockServerSpec}. <add> * @author Rossen Stoyanchev <add> */ <add>public class MockServerSpecTests { <add> <add> private final TestMockServerSpec serverSpec = new TestMockServerSpec(); <add> <add> <add> @Test <add> public void applyFiltersAfterConfigurerAdded() { <add> <add> this.serverSpec.webFilter(new TestWebFilter("A")); <add> <add> this.serverSpec.apply(new MockServerConfigurer() { <add> <add> @Override <add> public void afterConfigureAdded(WebTestClient.MockServerSpec<?> spec) { <add> spec.webFilter(new TestWebFilter("B")); <add> } <add> }); <add> <add> this.serverSpec.build().get().uri("/").exchange().expectBody(String.class) <add> .isEqualTo("{test-attribute=:A:B}"); <add> } <add> <add> @Test <add> public void applyFiltersBeforeServerCreated() { <add> <add> this.serverSpec.webFilter(new TestWebFilter("App-A")); <add> this.serverSpec.webFilter(new TestWebFilter("App-B")); <add> <add> this.serverSpec.apply(new MockServerConfigurer() { <add> <add> @Override <add> public void beforeServerCreated(WebHttpHandlerBuilder builder) { <add> builder.filters(filters -> { <add> filters.add(0, new TestWebFilter("Fwk-A")); <add> filters.add(1, new TestWebFilter("Fwk-B")); <add> }); <add> } <add> }); <add> <add> this.serverSpec.build().get().uri("/").exchange().expectBody(String.class) <add> .isEqualTo("{test-attribute=:Fwk-A:Fwk-B:App-A:App-B}"); <add> } <add> <add> <add> private static class TestMockServerSpec extends AbstractMockServerSpec<TestMockServerSpec> { <add> <add> @Override <add> protected WebHttpHandlerBuilder initHttpHandlerBuilder() { <add> return WebHttpHandlerBuilder.webHandler(exchange -> { <add> DefaultDataBufferFactory factory = new DefaultDataBufferFactory(); <add> String text = exchange.getAttributes().toString(); <add> DataBuffer buffer = factory.wrap(text.getBytes(StandardCharsets.UTF_8)); <add> return exchange.getResponse().writeWith(Mono.just(buffer)); <add> }); <add> } <add> } <add> <add> private static class TestWebFilter implements WebFilter { <add> <add> private final String name; <add> <add> TestWebFilter(String name) { <add> this.name = name; <add> } <add> <add> @Override <add> public Mono<Void> filter(ServerWebExchange exchange, WebFilterChain chain) { <add> String name = "test-attribute"; <add> String value = (String) exchange.getAttribute(name).orElse(""); <add> exchange.getAttributes().put(name, value + ":" + this.name); <add> return chain.filter(exchange); <add> } <add> } <add> <add>}
4
Text
Text
add link to gulpjs
b59a8d94cc5f7253a4b8ad8be8d4b3072faf375e
<ide><path>docs/README.md <ide> $ npm install <ide> <ide> ### Instructions <ide> <del>The site requires React, so first make sure you've built the project (via `grunt`). <add>The site requires React, so first make sure you've built the project (via [`grunt`](http://gruntjs.com/getting-started)). <ide> <ide> Use Jekyll to serve the website locally (by default, at `http://localhost:4000`): <ide>
1
Ruby
Ruby
add source modified time metadata
fab16b83e94cb38d9f90834c87b7f1604f19f14e
<ide><path>Library/Homebrew/tab.rb <ide> def self.clear_cache <ide> CACHE.clear <ide> end <ide> <del> def self.create(formula, compiler, stdlib, build) <add> def self.create(formula, compiler, stdlib, build, source_modified_time) <ide> attributes = { <ide> "used_options" => build.used_options.as_flags, <ide> "unused_options" => build.unused_options.as_flags, <ide> "tabfile" => formula.prefix.join(FILENAME), <ide> "built_as_bottle" => build.bottle?, <ide> "poured_from_bottle" => false, <ide> "time" => Time.now.to_i, <add> "source_modified_time" => source_modified_time.to_i, <ide> "HEAD" => Homebrew.git_head, <ide> "compiler" => compiler, <ide> "stdlib" => stdlib, <ide> def self.from_file(path) <ide> def self.from_file_content(content, path) <ide> attributes = Utils::JSON.load(content) <ide> attributes["tabfile"] = path <add> attributes["source_modified_time"] ||= 0 <ide> attributes["source"] ||= {} <ide> <ide> tapped_from = attributes["tapped_from"] <ide> def self.empty <ide> "built_as_bottle" => false, <ide> "poured_from_bottle" => false, <ide> "time" => nil, <add> "source_modified_time" => 0, <ide> "HEAD" => nil, <ide> "stdlib" => nil, <ide> "compiler" => "clang", <ide> def spec <ide> source["spec"].to_sym <ide> end <ide> <add> def source_modified_time <add> Time.at(super) <add> end <add> <ide> def to_json <ide> attributes = { <ide> "used_options" => used_options.as_flags, <ide> "unused_options" => unused_options.as_flags, <ide> "built_as_bottle" => built_as_bottle, <ide> "poured_from_bottle" => poured_from_bottle, <ide> "time" => time, <add> "source_modified_time" => source_modified_time.to_i, <ide> "HEAD" => self.HEAD, <ide> "stdlib" => (stdlib.to_s if stdlib), <ide> "compiler" => (compiler.to_s if compiler), <ide><path>Library/Homebrew/test/test_tab.rb <ide> def setup <ide> @used = Options.create(%w[--with-foo --without-bar]) <ide> @unused = Options.create(%w[--with-baz --without-qux]) <ide> <del> @tab = Tab.new("used_options" => @used.as_flags, <del> "unused_options" => @unused.as_flags, <del> "built_as_bottle" => false, <del> "poured_from_bottle" => true, <del> "time" => nil, <del> "HEAD" => TEST_SHA1, <del> "compiler" => "clang", <del> "stdlib" => "libcxx", <del> "source" => { <add> @tab = Tab.new("used_options" => @used.as_flags, <add> "unused_options" => @unused.as_flags, <add> "built_as_bottle" => false, <add> "poured_from_bottle" => true, <add> "time" => nil, <add> "source_modified_time" => 0, <add> "HEAD" => TEST_SHA1, <add> "compiler" => "clang", <add> "stdlib" => "libcxx", <add> "source" => { <ide> "tap" => "Homebrew/homebrew", <ide> "path" => nil, <ide> "spec" => "stable"
2
Python
Python
fix a encoding error
835f60d8b4b4948c908b10aaafa9828db3c3071a
<ide><path>utils/build/build.py <ide> def main(argv=None): <ide> sourcemap = sourcemapping = sourcemapargs = '' <ide> <ide> fd, path = tempfile.mkstemp() <del> tmp = open(path, 'w') <add> tmp = open(path, 'w', encoding="utf-8") <ide> sources = [] <ide> <ide> if args.amd: <ide> tmp.write('( function ( root, factory ) {\n\n\tif ( typeof define === \'function\' && define.amd ) {\n\n\t\tdefine( [ \'exports\' ], factory );\n\n\t} else if ( typeof exports === \'object\' ) {\n\n\t\tfactory( exports );\n\n\t} else {\n\n\t\tfactory( root );\n\n\t}\n\n}( this, function ( exports ) {\n\n') <ide> <ide> for include in args.include: <del> with open('includes/' + include + '.json','r') as f: <add> with open('includes/' + include + '.json','r', encoding="utf-8") as f: <ide> files = json.load(f) <ide> for filename in files: <ide> tmp.write('// File:' + filename) <ide> tmp.write('\n\n') <ide> filename = '../../' + filename <ide> sources.append(filename) <del> with open(filename, 'r') as f: <add> with open(filename, 'r', encoding="utf-8") as f: <ide> if filename.endswith(".glsl"): <ide> tmp.write('THREE.ShaderChunk[ \'' + os.path.splitext(os.path.basename(filename))[0] + '\'] = "') <ide> tmp.write(f.read().replace('\n','\\n')) <ide> def main(argv=None): <ide> else: <ide> backup = '' <ide> if os.path.exists(output): <del> with open(output,'r') as f: backup = f.read() <add> with open(output, 'r', encoding="utf-8") as f: backup = f.read() <ide> os.remove(output) <ide> <ide> externs = ' --externs '.join(args.externs) <ide> def main(argv=None): <ide> # header <ide> <ide> if os.path.exists(output): <del> with open(output,'r') as f: text = f.read() <del> with open(output,'w') as f: f.write('// threejs.org/license\n' + text + sourcemapping) <add> with open(output, 'r', encoding="utf-8") as f: text = f.read() <add> with open(output, 'w', encoding="utf-8") as f: f.write('// threejs.org/license\n' + text + sourcemapping) <ide> else: <ide> print("Minification with Closure compiler failed. Check your Java runtime version.") <del> with open(output,'w') as f: f.write(backup) <add> with open(output, 'w', encoding="utf-8") as f: f.write(backup) <ide> <ide> os.close(fd) <ide> os.remove(path)
1
PHP
PHP
fix vulnerability in encrypter mac address
65d9a061d9209694e8addd9c6ce94dcf5bfe7180
<ide><path>src/Illuminate/Encryption/Encrypter.php <ide> public function encrypt($value) <ide> // Once we have the encrypted value we will go ahead base64_encode the input <ide> // vector and create the MAC for the encrypted value so we can verify its <ide> // authenticity. Then, we'll JSON encode the data in a "payload" array. <del> $iv = base64_encode($iv); <del> <del> $mac = $this->hash($value); <add> $mac = $this->hash($iv = base64_encode($iv), $value); <ide> <ide> return base64_encode(json_encode(compact('iv', 'value', 'mac'))); <ide> } <ide> protected function getJsonPayload($payload) <ide> // to decrypt the given value. We'll also check the MAC for this encryption. <ide> if ( ! $payload or $this->invalidPayload($payload)) <ide> { <del> throw new DecryptException("Invalid data passed to encrypter."); <add> throw new DecryptException("Invalid data."); <ide> } <ide> <del> if ($payload['mac'] != $this->hash($payload['value'])) <add> if ($payload['mac'] !== $this->hash($payload['iv'], $payload['value'])) <ide> { <del> throw new DecryptException("MAC for payload is invalid."); <add> throw new DecryptException("MAC is invalid."); <ide> } <ide> <ide> return $payload; <ide> protected function getJsonPayload($payload) <ide> /** <ide> * Create a MAC for the given value. <ide> * <add> * @param stirng $iv <ide> * @param string $value <ide> * @return string <ide> */ <del> protected function hash($value) <add> protected function hash($iv, $value) <ide> { <del> return hash_hmac('sha256', $value, $this->key); <add> return hash_hmac('sha256', $iv.$value, $this->key); <ide> } <ide> <ide> /**
1
Javascript
Javascript
fix collectionview.arraydidchange documentation
d7bcbf0628d6b177ea854ba7a9b274b4295caff1
<ide><path>packages/ember-views/lib/views/collection_view.js <ide> Ember.CollectionView = Ember.ContainerView.extend( <ide> This array observer is added in `contentDidChange`. <ide> <ide> @method arrayDidChange <del> @param {Array} addedObjects the objects that were added to the content <del> @param {Array} removedObjects the objects that were removed from the content <del> @param {Number} changeIndex the index at which the changes occurred <add> @param {Array} content the managed collection of objects <add> @param {Number} start the index at which the changes occurred <add> @param {Number} removed number of object removed from content <add> @param {Number} added number of object added to content <ide> */ <ide> arrayDidChange: function(content, start, removed, added) { <ide> var itemViewClass = get(this, 'itemViewClass'),
1
Go
Go
remove unused mock code and use gotest.tools
746c51b54f0d891333016012c3d04e6deb724757
<ide><path>registry/auth_test.go <ide> import ( <ide> <ide> "github.com/docker/docker/api/types" <ide> registrytypes "github.com/docker/docker/api/types/registry" <add> "gotest.tools/v3/assert" <ide> ) <ide> <ide> func buildAuthConfigs() map[string]types.AuthConfig { <ide> func TestResolveAuthConfigIndexServer(t *testing.T) { <ide> } <ide> <ide> resolved := ResolveAuthConfig(authConfigs, officialIndex) <del> assertEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to return IndexServer") <add> assert.Equal(t, resolved, indexConfig, "Expected ResolveAuthConfig to return IndexServer") <ide> <ide> resolved = ResolveAuthConfig(authConfigs, privateIndex) <del> assertNotEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to not return IndexServer") <add> assert.Check(t, resolved != indexConfig, "Expected ResolveAuthConfig to not return IndexServer") <ide> } <ide> <ide> func TestResolveAuthConfigFullURL(t *testing.T) { <ide><path>registry/registry_mock_test.go <ide> package registry // import "github.com/docker/docker/registry" <ide> import ( <ide> "encoding/json" <ide> "errors" <del> "fmt" <ide> "io" <ide> "net" <ide> "net/http" <ide> "net/http/httptest" <del> "net/url" <del> "strconv" <del> "strings" <ide> "testing" <del> "time" <ide> <del> "github.com/docker/distribution/reference" <ide> registrytypes "github.com/docker/docker/api/types/registry" <ide> "github.com/gorilla/mux" <del> <ide> "github.com/sirupsen/logrus" <add> "gotest.tools/v3/assert" <ide> ) <ide> <ide> var ( <ide> testHTTPServer *httptest.Server <ide> testHTTPSServer *httptest.Server <del> testLayers = map[string]map[string]string{ <del> "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20": { <del> "json": `{"id":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", <del> "comment":"test base image","created":"2013-03-23T12:53:11.10432-07:00", <del> "container_config":{"Hostname":"","User":"","Memory":0,"MemorySwap":0, <del> "CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false, <del> "Tty":false,"OpenStdin":false,"StdinOnce":false, <del> "Env":null,"Cmd":null,"Dns":null,"Image":"","Volumes":null, <del> "VolumesFrom":"","Entrypoint":null},"Size":424242}`, <del> "checksum_simple": "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", <del> "checksum_tarsum": "tarsum+sha256:4409a0685741ca86d38df878ed6f8cbba4c99de5dc73cd71aef04be3bb70be7c", <del> "ancestry": `["77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20"]`, <del> "layer": string([]byte{ <del> 0x1f, 0x8b, 0x08, 0x08, 0x0e, 0xb0, 0xee, 0x51, 0x02, 0x03, 0x6c, 0x61, 0x79, 0x65, <del> 0x72, 0x2e, 0x74, 0x61, 0x72, 0x00, 0xed, 0xd2, 0x31, 0x0e, 0xc2, 0x30, 0x0c, 0x05, <del> 0x50, 0xcf, 0x9c, 0xc2, 0x27, 0x48, 0xed, 0x38, 0x4e, 0xce, 0x13, 0x44, 0x2b, 0x66, <del> 0x62, 0x24, 0x8e, 0x4f, 0xa0, 0x15, 0x63, 0xb6, 0x20, 0x21, 0xfc, 0x96, 0xbf, 0x78, <del> 0xb0, 0xf5, 0x1d, 0x16, 0x98, 0x8e, 0x88, 0x8a, 0x2a, 0xbe, 0x33, 0xef, 0x49, 0x31, <del> 0xed, 0x79, 0x40, 0x8e, 0x5c, 0x44, 0x85, 0x88, 0x33, 0x12, 0x73, 0x2c, 0x02, 0xa8, <del> 0xf0, 0x05, 0xf7, 0x66, 0xf5, 0xd6, 0x57, 0x69, 0xd7, 0x7a, 0x19, 0xcd, 0xf5, 0xb1, <del> 0x6d, 0x1b, 0x1f, 0xf9, 0xba, 0xe3, 0x93, 0x3f, 0x22, 0x2c, 0xb6, 0x36, 0x0b, 0xf6, <del> 0xb0, 0xa9, 0xfd, 0xe7, 0x94, 0x46, 0xfd, 0xeb, 0xd1, 0x7f, 0x2c, 0xc4, 0xd2, 0xfb, <del> 0x97, 0xfe, 0x02, 0x80, 0xe4, 0xfd, 0x4f, 0x77, 0xae, 0x6d, 0x3d, 0x81, 0x73, 0xce, <del> 0xb9, 0x7f, 0xf3, 0x04, 0x41, 0xc1, 0xab, 0xc6, 0x00, 0x0a, 0x00, 0x00, <del> }), <del> }, <del> "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d": { <del> "json": `{"id":"42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", <del> "parent":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", <del> "comment":"test base image","created":"2013-03-23T12:55:11.10432-07:00", <del> "container_config":{"Hostname":"","User":"","Memory":0,"MemorySwap":0, <del> "CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false, <del> "Tty":false,"OpenStdin":false,"StdinOnce":false, <del> "Env":null,"Cmd":null,"Dns":null,"Image":"","Volumes":null, <del> "VolumesFrom":"","Entrypoint":null},"Size":424242}`, <del> "checksum_simple": "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", <del> "checksum_tarsum": "tarsum+sha256:68fdb56fb364f074eec2c9b3f85ca175329c4dcabc4a6a452b7272aa613a07a2", <del> "ancestry": `["42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", <del> "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20"]`, <del> "layer": string([]byte{ <del> 0x1f, 0x8b, 0x08, 0x08, 0xbd, 0xb3, 0xee, 0x51, 0x02, 0x03, 0x6c, 0x61, 0x79, 0x65, <del> 0x72, 0x2e, 0x74, 0x61, 0x72, 0x00, 0xed, 0xd1, 0x31, 0x0e, 0xc2, 0x30, 0x0c, 0x05, <del> 0x50, 0xcf, 0x9c, 0xc2, 0x27, 0x48, 0x9d, 0x38, 0x8e, 0xcf, 0x53, 0x51, 0xaa, 0x56, <del> 0xea, 0x44, 0x82, 0xc4, 0xf1, 0x09, 0xb4, 0xea, 0x98, 0x2d, 0x48, 0x08, 0xbf, 0xe5, <del> 0x2f, 0x1e, 0xfc, 0xf5, 0xdd, 0x00, 0xdd, 0x11, 0x91, 0x8a, 0xe0, 0x27, 0xd3, 0x9e, <del> 0x14, 0xe2, 0x9e, 0x07, 0xf4, 0xc1, 0x2b, 0x0b, 0xfb, 0xa4, 0x82, 0xe4, 0x3d, 0x93, <del> 0x02, 0x0a, 0x7c, 0xc1, 0x23, 0x97, 0xf1, 0x5e, 0x5f, 0xc9, 0xcb, 0x38, 0xb5, 0xee, <del> 0xea, 0xd9, 0x3c, 0xb7, 0x4b, 0xbe, 0x7b, 0x9c, 0xf9, 0x23, 0xdc, 0x50, 0x6e, 0xb9, <del> 0xb8, 0xf2, 0x2c, 0x5d, 0xf7, 0x4f, 0x31, 0xb6, 0xf6, 0x4f, 0xc7, 0xfe, 0x41, 0x55, <del> 0x63, 0xdd, 0x9f, 0x89, 0x09, 0x90, 0x6c, 0xff, 0xee, 0xae, 0xcb, 0xba, 0x4d, 0x17, <del> 0x30, 0xc6, 0x18, 0xf3, 0x67, 0x5e, 0xc1, 0xed, 0x21, 0x5d, 0x00, 0x0a, 0x00, 0x00, <del> }), <del> }, <del> } <del> testRepositories = map[string]map[string]string{ <del> "foo42/bar": { <del> "latest": "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", <del> "test": "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", <del> }, <del> } <del> mockHosts = map[string][]net.IP{ <del> "": {net.ParseIP("0.0.0.0")}, <del> "localhost": {net.ParseIP("127.0.0.1"), net.ParseIP("::1")}, <del> "example.com": {net.ParseIP("42.42.42.42")}, <del> "other.com": {net.ParseIP("43.43.43.43")}, <del> } <ide> ) <ide> <ide> func init() { <ide> r := mux.NewRouter() <ide> <ide> // /v1/ <ide> r.HandleFunc("/v1/_ping", handlerGetPing).Methods(http.MethodGet) <del> r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|ancestry}", handlerGetImage).Methods(http.MethodGet) <del> r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|checksum}", handlerPutImage).Methods(http.MethodPut) <del> r.HandleFunc("/v1/repositories/{repository:.+}/tags", handlerGetDeleteTags).Methods(http.MethodGet, http.MethodDelete) <del> r.HandleFunc("/v1/repositories/{repository:.+}/tags/{tag:.+}", handlerGetTag).Methods(http.MethodGet) <del> r.HandleFunc("/v1/repositories/{repository:.+}/tags/{tag:.+}", handlerPutTag).Methods(http.MethodPut) <del> r.HandleFunc("/v1/users{null:.*}", handlerUsers).Methods(http.MethodGet, http.MethodPost, http.MethodPut) <del> r.HandleFunc("/v1/repositories/{repository:.+}{action:/images|/}", handlerImages).Methods(http.MethodGet, http.MethodPut, http.MethodDelete) <del> r.HandleFunc("/v1/repositories/{repository:.+}/auth", handlerAuth).Methods(http.MethodPut) <ide> r.HandleFunc("/v1/search", handlerSearch).Methods(http.MethodGet) <ide> <ide> // /v2/ <ide> func init() { <ide> // I believe in future Go versions this will fail, so let's fix it later <ide> return net.LookupIP(host) <ide> } <add> mockHosts := map[string][]net.IP{ <add> "": {net.ParseIP("0.0.0.0")}, <add> "localhost": {net.ParseIP("127.0.0.1"), net.ParseIP("::1")}, <add> "example.com": {net.ParseIP("42.42.42.42")}, <add> "other.com": {net.ParseIP("43.43.43.43")}, <add> } <ide> for h, addrs := range mockHosts { <ide> if host == h { <ide> return addrs, nil <ide> func init() { <ide> <ide> func handlerAccessLog(handler http.Handler) http.Handler { <ide> logHandler := func(w http.ResponseWriter, r *http.Request) { <del> logrus.Debugf("%s \"%s %s\"", r.RemoteAddr, r.Method, r.URL) <add> logrus.Debugf(`%s "%s %s"`, r.RemoteAddr, r.Method, r.URL) <ide> handler.ServeHTTP(w, r) <ide> } <ide> return http.HandlerFunc(logHandler) <ide> func writeResponse(w http.ResponseWriter, message interface{}, code int) { <ide> w.Write(body) <ide> } <ide> <del>func readJSON(r *http.Request, dest interface{}) error { <del> body, err := io.ReadAll(r.Body) <del> if err != nil { <del> return err <del> } <del> return json.Unmarshal(body, dest) <del>} <del> <del>func apiError(w http.ResponseWriter, message string, code int) { <del> body := map[string]string{ <del> "error": message, <del> } <del> writeResponse(w, body, code) <del>} <del> <del>func assertEqual(t *testing.T, a interface{}, b interface{}, message string) { <del> if a == b { <del> return <del> } <del> if len(message) == 0 { <del> message = fmt.Sprintf("%v != %v", a, b) <del> } <del> t.Fatal(message) <del>} <del> <del>func assertNotEqual(t *testing.T, a interface{}, b interface{}, message string) { <del> if a != b { <del> return <del> } <del> if len(message) == 0 { <del> message = fmt.Sprintf("%v == %v", a, b) <del> } <del> t.Fatal(message) <del>} <del> <del>// Similar to assertEqual, but does not stop test <del>func checkEqual(t *testing.T, a interface{}, b interface{}, messagePrefix string) { <del> if a == b { <del> return <del> } <del> message := fmt.Sprintf("%v != %v", a, b) <del> if len(messagePrefix) != 0 { <del> message = messagePrefix + ": " + message <del> } <del> t.Error(message) <del>} <del> <del>// Similar to assertNotEqual, but does not stop test <del>func checkNotEqual(t *testing.T, a interface{}, b interface{}, messagePrefix string) { <del> if a != b { <del> return <del> } <del> message := fmt.Sprintf("%v == %v", a, b) <del> if len(messagePrefix) != 0 { <del> message = messagePrefix + ": " + message <del> } <del> t.Error(message) <del>} <del> <del>func requiresAuth(w http.ResponseWriter, r *http.Request) bool { <del> writeCookie := func() { <del> value := fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano()) <del> cookie := &http.Cookie{Name: "session", Value: value, MaxAge: 3600} <del> http.SetCookie(w, cookie) <del> // FIXME(sam): this should be sent only on Index routes <del> value = fmt.Sprintf("FAKE-TOKEN-%d", time.Now().UnixNano()) <del> w.Header().Add("X-Docker-Token", value) <del> } <del> if len(r.Cookies()) > 0 { <del> writeCookie() <del> return true <del> } <del> if len(r.Header.Get("Authorization")) > 0 { <del> writeCookie() <del> return true <del> } <del> w.Header().Add("WWW-Authenticate", "token") <del> apiError(w, "Wrong auth", http.StatusUnauthorized) <del> return false <del>} <del> <ide> func handlerGetPing(w http.ResponseWriter, r *http.Request) { <ide> writeResponse(w, true, http.StatusOK) <ide> } <ide> <del>func handlerGetImage(w http.ResponseWriter, r *http.Request) { <del> if !requiresAuth(w, r) { <del> return <del> } <del> vars := mux.Vars(r) <del> layer, exists := testLayers[vars["image_id"]] <del> if !exists { <del> http.NotFound(w, r) <del> return <del> } <del> writeHeaders(w) <del> layerSize := len(layer["layer"]) <del> w.Header().Add("X-Docker-Size", strconv.Itoa(layerSize)) <del> io.WriteString(w, layer[vars["action"]]) <del>} <del> <del>func handlerPutImage(w http.ResponseWriter, r *http.Request) { <del> if !requiresAuth(w, r) { <del> return <del> } <del> vars := mux.Vars(r) <del> imageID := vars["image_id"] <del> action := vars["action"] <del> layer, exists := testLayers[imageID] <del> if !exists { <del> if action != "json" { <del> http.NotFound(w, r) <del> return <del> } <del> layer = make(map[string]string) <del> testLayers[imageID] = layer <del> } <del> if checksum := r.Header.Get("X-Docker-Checksum"); checksum != "" { <del> if checksum != layer["checksum_simple"] && checksum != layer["checksum_tarsum"] { <del> apiError(w, "Wrong checksum", http.StatusBadRequest) <del> return <del> } <del> } <del> body, err := io.ReadAll(r.Body) <del> if err != nil { <del> apiError(w, fmt.Sprintf("Error: %s", err), http.StatusInternalServerError) <del> return <del> } <del> layer[action] = string(body) <del> writeResponse(w, true, http.StatusOK) <del>} <del> <del>func handlerGetDeleteTags(w http.ResponseWriter, r *http.Request) { <del> if !requiresAuth(w, r) { <del> return <del> } <del> repositoryName, err := reference.WithName(mux.Vars(r)["repository"]) <del> if err != nil { <del> apiError(w, "Could not parse repository", http.StatusBadRequest) <del> return <del> } <del> tags, exists := testRepositories[repositoryName.String()] <del> if !exists { <del> apiError(w, "Repository not found", http.StatusNotFound) <del> return <del> } <del> if r.Method == http.MethodDelete { <del> delete(testRepositories, repositoryName.String()) <del> writeResponse(w, true, http.StatusOK) <del> return <del> } <del> writeResponse(w, tags, http.StatusOK) <del>} <del> <del>func handlerGetTag(w http.ResponseWriter, r *http.Request) { <del> if !requiresAuth(w, r) { <del> return <del> } <del> vars := mux.Vars(r) <del> repositoryName, err := reference.WithName(vars["repository"]) <del> if err != nil { <del> apiError(w, "Could not parse repository", http.StatusBadRequest) <del> return <del> } <del> tagName := vars["tag"] <del> tags, exists := testRepositories[repositoryName.String()] <del> if !exists { <del> apiError(w, "Repository not found", http.StatusNotFound) <del> return <del> } <del> tag, exists := tags[tagName] <del> if !exists { <del> apiError(w, "Tag not found", http.StatusNotFound) <del> return <del> } <del> writeResponse(w, tag, http.StatusOK) <del>} <del> <del>func handlerPutTag(w http.ResponseWriter, r *http.Request) { <del> if !requiresAuth(w, r) { <del> return <del> } <del> vars := mux.Vars(r) <del> repositoryName, err := reference.WithName(vars["repository"]) <del> if err != nil { <del> apiError(w, "Could not parse repository", http.StatusBadRequest) <del> return <del> } <del> tagName := vars["tag"] <del> tags, exists := testRepositories[repositoryName.String()] <del> if !exists { <del> tags = make(map[string]string) <del> testRepositories[repositoryName.String()] = tags <del> } <del> tagValue := "" <del> readJSON(r, tagValue) <del> tags[tagName] = tagValue <del> writeResponse(w, true, http.StatusOK) <del>} <del> <del>func handlerUsers(w http.ResponseWriter, r *http.Request) { <del> code := http.StatusOK <del> if r.Method == http.MethodPost { <del> code = http.StatusCreated <del> } else if r.Method == http.MethodPut { <del> code = http.StatusNoContent <del> } <del> writeResponse(w, "", code) <del>} <del> <del>func handlerImages(w http.ResponseWriter, r *http.Request) { <del> u, _ := url.Parse(testHTTPServer.URL) <del> w.Header().Add("X-Docker-Endpoints", fmt.Sprintf("%s , %s ", u.Host, "test.example.com")) <del> w.Header().Add("X-Docker-Token", fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano())) <del> if r.Method == http.MethodPut { <del> if strings.HasSuffix(r.URL.Path, "images") { <del> writeResponse(w, "", http.StatusNoContent) <del> return <del> } <del> writeResponse(w, "", http.StatusOK) <del> return <del> } <del> if r.Method == http.MethodDelete { <del> writeResponse(w, "", http.StatusNoContent) <del> return <del> } <del> var images []map[string]string <del> for imageID, layer := range testLayers { <del> image := make(map[string]string) <del> image["id"] = imageID <del> image["checksum"] = layer["checksum_tarsum"] <del> image["Tag"] = "latest" <del> images = append(images, image) <del> } <del> writeResponse(w, images, http.StatusOK) <del>} <del> <del>func handlerAuth(w http.ResponseWriter, r *http.Request) { <del> writeResponse(w, "OK", http.StatusOK) <del>} <del> <ide> func handlerSearch(w http.ResponseWriter, r *http.Request) { <ide> result := &registrytypes.SearchResults{ <ide> Query: "fakequery", <ide> func TestPing(t *testing.T) { <ide> if err != nil { <ide> t.Fatal(err) <ide> } <del> assertEqual(t, res.StatusCode, http.StatusOK, "") <del> assertEqual(t, res.Header.Get("X-Docker-Registry-Config"), "mock", <del> "This is not a Mocked Registry") <add> assert.Equal(t, res.StatusCode, http.StatusOK, "") <add> assert.Equal(t, res.Header.Get("X-Docker-Registry-Config"), "mock", "This is not a Mocked Registry") <ide> } <del> <del>/* Uncomment this to test Mocked Registry locally with curl <del> * WARNING: Don't push on the repos uncommented, it'll block the tests <del> * <del>func TestWait(t *testing.T) { <del> logrus.Println("Test HTTP server ready and waiting:", testHTTPServer.URL) <del> c := make(chan int) <del> <-c <del>} <del> <del>//*/ <ide><path>registry/registry_test.go <ide> import ( <ide> "github.com/docker/docker/api/types" <ide> registrytypes "github.com/docker/docker/api/types/registry" <ide> "gotest.tools/v3/assert" <add> is "gotest.tools/v3/assert/cmp" <ide> "gotest.tools/v3/skip" <ide> ) <ide> <ide> func TestPingRegistryEndpoint(t *testing.T) { <ide> t.Fatal(err) <ide> } <ide> <del> assertEqual(t, regInfo.Standalone, expectedStandalone, assertMessage) <add> assert.Equal(t, regInfo.Standalone, expectedStandalone, assertMessage) <ide> } <ide> <ide> testPing(makeIndex("/v1/"), true, "Expected standalone to be true (default)") <ide> func TestEndpoint(t *testing.T) { <ide> assertInsecureIndex := func(index *registrytypes.IndexInfo) { <ide> index.Secure = true <ide> _, err := NewV1Endpoint(index, "", nil) <del> assertNotEqual(t, err, nil, index.Name+": Expected error for insecure index") <del> assertEqual(t, strings.Contains(err.Error(), "insecure-registry"), true, index.Name+": Expected insecure-registry error for insecure index") <add> assert.ErrorContains(t, err, "insecure-registry", index.Name+": Expected insecure-registry error for insecure index") <ide> index.Secure = false <ide> } <ide> <ide> assertSecureIndex := func(index *registrytypes.IndexInfo) { <ide> index.Secure = true <ide> _, err := NewV1Endpoint(index, "", nil) <del> assertNotEqual(t, err, nil, index.Name+": Expected cert error for secure index") <del> assertEqual(t, strings.Contains(err.Error(), "certificate signed by unknown authority"), true, index.Name+": Expected cert error for secure index") <add> assert.ErrorContains(t, err, "certificate signed by unknown authority", index.Name+": Expected cert error for secure index") <ide> index.Secure = false <ide> } <ide> <ide> index := &registrytypes.IndexInfo{} <ide> index.Name = makeURL("/v1/") <ide> endpoint := expandEndpoint(index) <del> assertEqual(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) <add> assert.Equal(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) <ide> assertInsecureIndex(index) <ide> <ide> index.Name = makeURL("") <ide> endpoint = expandEndpoint(index) <del> assertEqual(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") <add> assert.Equal(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") <ide> assertInsecureIndex(index) <ide> <ide> httpURL := makeURL("") <ide> index.Name = strings.SplitN(httpURL, "://", 2)[1] <ide> endpoint = expandEndpoint(index) <del> assertEqual(t, endpoint.String(), httpURL+"/v1/", index.Name+": Expected endpoint to be "+httpURL+"/v1/") <add> assert.Equal(t, endpoint.String(), httpURL+"/v1/", index.Name+": Expected endpoint to be "+httpURL+"/v1/") <ide> assertInsecureIndex(index) <ide> <ide> index.Name = makeHTTPSURL("/v1/") <ide> endpoint = expandEndpoint(index) <del> assertEqual(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) <add> assert.Equal(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) <ide> assertSecureIndex(index) <ide> <ide> index.Name = makeHTTPSURL("") <ide> endpoint = expandEndpoint(index) <del> assertEqual(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") <add> assert.Equal(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") <ide> assertSecureIndex(index) <ide> <ide> httpsURL := makeHTTPSURL("") <ide> index.Name = strings.SplitN(httpsURL, "://", 2)[1] <ide> endpoint = expandEndpoint(index) <del> assertEqual(t, endpoint.String(), httpsURL+"/v1/", index.Name+": Expected endpoint to be "+httpsURL+"/v1/") <add> assert.Equal(t, endpoint.String(), httpsURL+"/v1/", index.Name+": Expected endpoint to be "+httpsURL+"/v1/") <ide> assertSecureIndex(index) <ide> <ide> badEndpoints := []string{ <ide> func TestEndpoint(t *testing.T) { <ide> for _, address := range badEndpoints { <ide> index.Name = address <ide> _, err := NewV1Endpoint(index, "", nil) <del> checkNotEqual(t, err, nil, "Expected error while expanding bad endpoint") <add> assert.Check(t, err != nil, "Expected error while expanding bad endpoint: %s", address) <ide> } <ide> } <ide> <ide> func TestParseRepositoryInfo(t *testing.T) { <ide> if err != nil { <ide> t.Error(err) <ide> } else { <del> checkEqual(t, repoInfo.Index.Name, expectedRepoInfo.Index.Name, reposName) <del> checkEqual(t, reference.Path(repoInfo.Name), expectedRepoInfo.RemoteName, reposName) <del> checkEqual(t, reference.FamiliarName(repoInfo.Name), expectedRepoInfo.LocalName, reposName) <del> checkEqual(t, repoInfo.Name.Name(), expectedRepoInfo.CanonicalName, reposName) <del> checkEqual(t, repoInfo.Index.Official, expectedRepoInfo.Index.Official, reposName) <del> checkEqual(t, repoInfo.Official, expectedRepoInfo.Official, reposName) <add> assert.Check(t, is.Equal(repoInfo.Index.Name, expectedRepoInfo.Index.Name), reposName) <add> assert.Check(t, is.Equal(reference.Path(repoInfo.Name), expectedRepoInfo.RemoteName), reposName) <add> assert.Check(t, is.Equal(reference.FamiliarName(repoInfo.Name), expectedRepoInfo.LocalName), reposName) <add> assert.Check(t, is.Equal(repoInfo.Name.Name(), expectedRepoInfo.CanonicalName), reposName) <add> assert.Check(t, is.Equal(repoInfo.Index.Official, expectedRepoInfo.Index.Official), reposName) <add> assert.Check(t, is.Equal(repoInfo.Official, expectedRepoInfo.Official), reposName) <ide> } <ide> } <ide> } <ide> func TestNewIndexInfo(t *testing.T) { <ide> if err != nil { <ide> t.Fatal(err) <ide> } else { <del> checkEqual(t, index.Name, expectedIndexInfo.Name, indexName+" name") <del> checkEqual(t, index.Official, expectedIndexInfo.Official, indexName+" is official") <del> checkEqual(t, index.Secure, expectedIndexInfo.Secure, indexName+" is secure") <del> checkEqual(t, len(index.Mirrors), len(expectedIndexInfo.Mirrors), indexName+" mirrors") <add> assert.Check(t, is.Equal(index.Name, expectedIndexInfo.Name), indexName+" name") <add> assert.Check(t, is.Equal(index.Official, expectedIndexInfo.Official), indexName+" is official") <add> assert.Check(t, is.Equal(index.Secure, expectedIndexInfo.Secure), indexName+" is secure") <add> assert.Check(t, is.Equal(len(index.Mirrors), len(expectedIndexInfo.Mirrors)), indexName+" mirrors") <ide> } <ide> } <ide> } <ide> func TestSearchRepositories(t *testing.T) { <ide> if results == nil { <ide> t.Fatal("Expected non-nil SearchResults object") <ide> } <del> assertEqual(t, results.NumResults, 1, "Expected 1 search results") <del> assertEqual(t, results.Query, "fakequery", "Expected 'fakequery' as query") <del> assertEqual(t, results.Results[0].StarCount, 42, "Expected 'fakeimage' to have 42 stars") <add> assert.Equal(t, results.NumResults, 1, "Expected 1 search results") <add> assert.Equal(t, results.Query, "fakequery", "Expected 'fakequery' as query") <add> assert.Equal(t, results.Results[0].StarCount, 42, "Expected 'fakeimage' to have 42 stars") <ide> } <ide> <ide> func TestTrustedLocation(t *testing.T) { <ide> func TestAddRequiredHeadersToRedirectedRequests(t *testing.T) { <ide> reqFrom.Header.Add("Authorization", "super_secret") <ide> reqTo, _ := http.NewRequest(http.MethodGet, urls[1], nil) <ide> <del> addRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) <add> _ = addRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) <ide> <ide> if len(reqTo.Header) != 1 { <ide> t.Fatalf("Expected 1 headers, got %d", len(reqTo.Header)) <ide> func TestAddRequiredHeadersToRedirectedRequests(t *testing.T) { <ide> reqFrom.Header.Add("Authorization", "super_secret") <ide> reqTo, _ := http.NewRequest(http.MethodGet, urls[1], nil) <ide> <del> addRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) <add> _ = addRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) <ide> <ide> if len(reqTo.Header) != 2 { <ide> t.Fatalf("Expected 2 headers, got %d", len(reqTo.Header))
3
Python
Python
fix edge cases of custom object deserialization
7e870a97eca4c8a3301a4686a04fc522997b68e9
<ide><path>keras/utils/generic_utils.py <ide> def deserialize_keras_object(identifier, module_objects=None, <ide> ': ' + class_name) <ide> if hasattr(cls, 'from_config'): <ide> arg_spec = inspect.getargspec(cls.from_config) <add> custom_objects = custom_objects or {} <ide> if 'custom_objects' in arg_spec.args: <del> custom_objects = custom_objects or {} <ide> return cls.from_config(config['config'], <ide> custom_objects=dict(list(_GLOBAL_CUSTOM_OBJECTS.items()) + <ide> list(custom_objects.items()))) <del> return cls.from_config(config['config']) <add> with CustomObjectScope(custom_objects): <add> return cls.from_config(config['config']) <ide> else: <ide> # Then `cls` may be a function returning a class. <ide> # in this case by convention `config` holds <ide> # the kwargs of the function. <del> return cls(**config['config']) <add> custom_objects = custom_objects or {} <add> with CustomObjectScope(custom_objects): <add> return cls(**config['config']) <ide> elif isinstance(identifier, six.string_types): <ide> function_name = identifier <ide> if custom_objects and function_name in custom_objects: <ide><path>tests/test_model_saving.py <ide> import numpy as np <ide> from numpy.testing import assert_allclose <ide> <add>from keras import backend as K <ide> from keras.models import Model, Sequential <ide> from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed <ide> from keras.layers import Input <ide> def test_saving_lambda_custom_objects(): <ide> assert_allclose(out, out2, atol=1e-05) <ide> <ide> <add>@keras_test <add>def test_saving_custom_activation_function(): <add> x = Input(shape=(3,)) <add> output = Dense(3, activation=K.cos)(x) <add> <add> model = Model(x, output) <add> model.compile(loss=losses.MSE, <add> optimizer=optimizers.RMSprop(lr=0.0001), <add> metrics=[metrics.categorical_accuracy]) <add> x = np.random.random((1, 3)) <add> y = np.random.random((1, 3)) <add> model.train_on_batch(x, y) <add> <add> out = model.predict(x) <add> _, fname = tempfile.mkstemp('.h5') <add> save_model(model, fname) <add> <add> model = load_model(fname, custom_objects={'cos': K.cos}) <add> os.remove(fname) <add> <add> out2 = model.predict(x) <add> assert_allclose(out, out2, atol=1e-05) <add> <ide> if __name__ == '__main__': <ide> pytest.main([__file__])
2
Javascript
Javascript
add compatibility for htmlelement's dataset (#945)
d6e4607fd039c0f2ccb7c863d048cd4d9235ec9d
<ide><path>web/compatibility.js <ide> }); <ide> })(); <ide> <add>// HTMLElement dataset property <add>(function checkDatasetProperty() { <add> var div = document.createElement('div'); <add> if ('dataset' in div) <add> return; // dataset property exists <add> Object.defineProperty(HTMLElement.prototype, 'dataset', { <add> get: function htmlElementDatasetGetter() { <add> // adding dataset field to the actual object <add> return (this.dataset = {}); <add> } <add> }); <add>})();
1
Javascript
Javascript
add e2e tests, bug fixes for testids
9fdbf6029eead8e80a285350293b1e3bf164c440
<ide><path>Libraries/Components/DatePicker/DatePickerIOS.ios.js <ide> class DatePickerIOS extends React.Component<Props> { <ide> return ( <ide> <View style={props.style}> <ide> <RCTDatePickerIOS <add> testID={props.testID} <ide> ref={picker => { <ide> this._picker = picker; <ide> }} <ide><path>Libraries/Components/Picker/PickerIOS.ios.js <ide> type RCTPickerIOSType = Class< <ide> onStartShouldSetResponder: () => boolean, <ide> selectedIndex: number, <ide> style?: ?TextStyleProp, <add> testID?: ?string, <ide> |}>, <ide> >, <ide> >; <ide> class PickerIOS extends React.Component<Props, State> { <ide> ref={picker => { <ide> this._picker = picker; <ide> }} <add> testID={this.props.testID} <ide> style={[styles.pickerIOS, this.props.itemStyle]} <ide> items={this.state.items} <ide> selectedIndex={this.state.selectedIndex} <ide><path>RNTester/e2e/__tests__/DatePickerIOS-test.js <add>/** <add> * Copyright (c) Facebook, Inc. and its affiliates. <add> * <add> * This source code is licensed under the MIT license found in the <add> * LICENSE file in the root directory of this source tree. <add> * <add> * @emails oncall+react_native <add> * @format <add> */ <add> <add>/* global element, by, expect */ <add> <add>describe('DatePickerIOS', () => { <add> beforeAll(async () => { <add> await element(by.id('explorer_search')).replaceText('<DatePickerIOS>'); <add> await element( <add> by.label( <add> '<DatePickerIOS> Select dates and times using the native UIDatePicker.', <add> ), <add> ).tap(); <add> }); <add> <add> afterAll(async () => { <add> await element(by.label('Back')).tap(); <add> }); <add> <add> it('Should change indicator with datetime picker', async () => { <add> const testID = 'date-and-time'; <add> const indicatorID = 'date-and-time-indicator'; <add> <add> const testElement = await element( <add> by.type('UIPickerView').withAncestor(by.id(testID)), <add> ); <add> const indicator = await element(by.id(indicatorID)); <add> <add> await expect(testElement).toBeVisible(); <add> await expect(indicator).toBeVisible(); <add> <add> await testElement.setColumnToValue(0, 'Dec 4'); <add> await testElement.setColumnToValue(1, '4'); <add> await testElement.setColumnToValue(2, '10'); <add> await testElement.setColumnToValue(3, 'AM'); <add> <add> await expect(indicator).toHaveText('12/4/2005 4:10 AM'); <add> }); <add> <add> it('Should change indicator with date-only picker', async () => { <add> const testID = 'date-only'; <add> const indicatorID = 'date-and-time-indicator'; <add> <add> const testElement = await element( <add> by.type('UIPickerView').withAncestor(by.id(testID)), <add> ); <add> const indicator = await element(by.id(indicatorID)); <add> <add> await expect(testElement).toBeVisible(); <add> await expect(indicator).toBeVisible(); <add> <add> await testElement.setColumnToValue(0, 'November'); <add> await testElement.setColumnToValue(1, '3'); <add> await testElement.setColumnToValue(2, '2006'); <add> <add> await expect(indicator).toHaveText('11/3/2006 4:10 AM'); <add> }); <add>}); <ide><path>RNTester/e2e/__tests__/Picker-test.js <add>/** <add> * Copyright (c) Facebook, Inc. and its affiliates. <add> * <add> * This source code is licensed under the MIT license found in the <add> * LICENSE file in the root directory of this source tree. <add> * <add> * @emails oncall+react_native <add> * @format <add> */ <add> <add>/* global element, by, expect */ <add> <add>describe('Picker', () => { <add> beforeAll(async () => { <add> await element(by.id('explorer_search')).replaceText('<Picker>'); <add> await element( <add> by.label( <add> '<Picker> Provides multiple options to choose from, using either a dropdown menu or a dialog.', <add> ), <add> ).tap(); <add> }); <add> <add> afterAll(async () => { <add> await element(by.label('Back')).tap(); <add> }); <add> <add> it('should be selectable by ID', async () => { <add> await expect(element(by.id('basic-picker'))).toBeVisible(); <add> }); <add>}); <ide><path>RNTester/js/DatePickerIOSExample.js <ide> class DatePickerExample extends React.Component< <ide> return ( <ide> <View> <ide> <WithLabel label="Value:"> <del> <Text> <add> <Text testID="date-and-time-indicator"> <ide> {this.state.date.toLocaleDateString() + <ide> ' ' + <del> this.state.date.toLocaleTimeString()} <add> this.state.date.toLocaleTimeString([], { <add> hour: '2-digit', <add> minute: '2-digit', <add> })} <ide> </Text> <ide> </WithLabel> <ide> <WithLabel label="Timezone:"> <ide> class DatePickerExample extends React.Component< <ide> </WithLabel> <ide> <Heading label="Date + time picker" /> <ide> <DatePickerIOS <add> testID="date-and-time" <ide> date={this.state.date} <ide> mode="datetime" <ide> timeZoneOffsetInMinutes={this.state.timeZoneOffsetInHours * 60} <ide> onDateChange={this.onDateChange} <ide> /> <ide> <Heading label="Date picker" /> <ide> <DatePickerIOS <add> testID="date-only" <ide> date={this.state.date} <ide> mode="date" <ide> timeZoneOffsetInMinutes={this.state.timeZoneOffsetInHours * 60} <ide> onDateChange={this.onDateChange} <ide> /> <ide> <Heading label="Time picker, 10-minute interval" /> <ide> <DatePickerIOS <add> testID="time-only" <ide> date={this.state.date} <ide> mode="time" <ide> timeZoneOffsetInMinutes={this.state.timeZoneOffsetInHours * 60} <ide><path>RNTester/js/PickerExample.js <ide> class PickerExample extends React.Component<{}, $FlowFixMeState> { <ide> <RNTesterPage title="<Picker>"> <ide> <RNTesterBlock title="Basic Picker"> <ide> <Picker <add> testID="basic-picker" <ide> style={styles.picker} <ide> selectedValue={this.state.selected1} <ide> onValueChange={this.onValueChange.bind(this, 'selected1')}>
6
Python
Python
update boto3 to <1.19
5f07675db75024513e1f0042107838b64e5c777f
<ide><path>setup.py <ide> def write_version(filename: str = os.path.join(*[my_dir, "airflow", "git_version <ide> 'oss2>=2.14.0', <ide> ] <ide> amazon = [ <del> 'boto3>=1.15.0,<1.18.0', <add> 'boto3>=1.15.0,<1.19.0', <ide> 'watchtower~=1.0.6', <ide> 'jsonpath_ng>=1.5.3', <ide> ]
1
Mixed
Ruby
add ssl support for postgresql database tasks
4497abb4a9026bf98475824de3fb15ca9f549c13
<ide><path>activerecord/CHANGELOG.md <add>* Add ssl support for postgresql database tasks <add> <add> Add `PGSSLMODE`, `PGSSLCERT`, `PGSSLKEY` and `PGSSLROOTCERT` to pg_env from database config <add> when running postgresql database tasks. <add> <add> ```yaml <add> # config/database.yml <add> <add> production: <add> sslmode: verify-full <add> sslcert: client.crt <add> sslkey: client.key <add> sslrootcert: ca.crt <add> ``` <add> <add> Environment variables <add> <add> ``` <add> PGSSLMODE=verify-full <add> PGSSLCERT=client.crt <add> PGSSLKEY=client.key <add> PGSSLROOTCERT=ca.crt <add> ``` <add> <add> Fixes #42994 <add> <add> *Michael Bayucot* <add> <ide> * Add config option for ignoring tables when dumping the schema cache. <ide> <ide> Applications can now be configured to ignore certain tables when dumping the schema cache. <ide><path>activerecord/lib/active_record/tasks/postgresql_database_tasks.rb <ide> def establish_master_connection <ide> <ide> def psql_env <ide> {}.tap do |env| <del> env["PGHOST"] = db_config.host if db_config.host <del> env["PGPORT"] = configuration_hash[:port].to_s if configuration_hash[:port] <del> env["PGPASSWORD"] = configuration_hash[:password].to_s if configuration_hash[:password] <del> env["PGUSER"] = configuration_hash[:username].to_s if configuration_hash[:username] <add> env["PGHOST"] = db_config.host if db_config.host <add> env["PGPORT"] = configuration_hash[:port].to_s if configuration_hash[:port] <add> env["PGPASSWORD"] = configuration_hash[:password].to_s if configuration_hash[:password] <add> env["PGUSER"] = configuration_hash[:username].to_s if configuration_hash[:username] <add> env["PGSSLMODE"] = configuration_hash[:sslmode].to_s if configuration_hash[:sslmode] <add> env["PGSSLCERT"] = configuration_hash[:sslcert].to_s if configuration_hash[:sslcert] <add> env["PGSSLKEY"] = configuration_hash[:sslkey].to_s if configuration_hash[:sslkey] <add> env["PGSSLROOTCERT"] = configuration_hash[:sslrootcert].to_s if configuration_hash[:sslrootcert] <ide> end <ide> end <ide> <ide><path>activerecord/test/cases/tasks/postgresql_rake_test.rb <ide> def test_structure_dump_with_env <ide> end <ide> end <ide> <add> def test_structure_dump_with_ssl_env <add> expected_env = { "PGSSLMODE" => "verify-full", "PGSSLCERT" => "client.crt", "PGSSLKEY" => "client.key", "PGSSLROOTCERT" => "root.crt" } <add> expected_command = [expected_env, "pg_dump", "--schema-only", "--no-privileges", "--no-owner", "--file", @filename, "my-app-db"] <add> <add> assert_called_with(Kernel, :system, expected_command, returns: true) do <add> ActiveRecord::Tasks::DatabaseTasks.structure_dump( <add> @configuration.merge(sslmode: "verify-full", sslcert: "client.crt", sslkey: "client.key", sslrootcert: "root.crt"), <add> @filename <add> ) <add> end <add> end <add> <ide> def test_structure_dump_with_extra_flags <ide> expected_command = [{}, "pg_dump", "--schema-only", "--no-privileges", "--no-owner", "--file", @filename, "--noop", "my-app-db"] <ide> <ide> def test_structure_load_with_env <ide> end <ide> end <ide> <add> def test_structure_load_with_ssl_env <add> filename = "awesome-file.sql" <add> expected_env = { "PGSSLMODE" => "verify-full", "PGSSLCERT" => "client.crt", "PGSSLKEY" => "client.key", "PGSSLROOTCERT" => "root.crt" } <add> expected_command = [expected_env, "psql", "--set", "ON_ERROR_STOP=1", "--quiet", "--no-psqlrc", "--file", filename, "--noop", @configuration["database"]] <add> <add> assert_called_with(Kernel, :system, expected_command, returns: true) do <add> with_structure_load_flags(["--noop"]) do <add> ActiveRecord::Tasks::DatabaseTasks.structure_load( <add> @configuration.merge(sslmode: "verify-full", sslcert: "client.crt", sslkey: "client.key", sslrootcert: "root.crt"), <add> filename <add> ) <add> end <add> end <add> end <add> <ide> def test_structure_load_with_hash_extra_flags_for_a_different_driver <ide> filename = "awesome-file.sql" <ide> expected_command = [{}, "psql", "--set", "ON_ERROR_STOP=1", "--quiet", "--no-psqlrc", "--file", filename, @configuration["database"]]
3
PHP
PHP
fix issue with rendering elements inside blocks
6c902a19b3b436d9f6d436ba193c85e15dccb53d
<ide><path>lib/Cake/View/View.php <ide> protected function _render($viewFile, $data = array()) { <ide> $data = $this->viewVars; <ide> } <ide> $this->_current = $viewFile; <add> $initialBlocks = count($this->Blocks->unclosed()); <ide> <ide> $this->getEventManager()->dispatch(new CakeEvent('View.beforeRenderFile', $this, array($viewFile))); <ide> $content = $this->_evaluate($viewFile, $data); <del> if ($this->Blocks->active()) { <del> throw new CakeException(__d('cake_dev', 'The "%s" block was left open.', $this->Blocks->active())); <del> } <ide> $afterEvent = new CakeEvent('View.afterRenderFile', $this, array($viewFile, $content)); <ide> //TODO: For BC puporses, set extra info in the event object. Remove when appropriate <ide> $afterEvent->modParams = 1; <ide> protected function _render($viewFile, $data = array()) { <ide> $this->_stack[] = $this->fetch('content'); <ide> $this->assign('content', $content); <ide> <del> $content = $this->_render($this->_parents[$viewFile], $data); <del> <add> $content = $this->_render($this->_parents[$viewFile]); <ide> $this->assign('content', array_pop($this->_stack)); <ide> } <ide> <add> $remainingBlocks = count($this->Blocks->unclosed()); <add> <add> if ($initialBlocks !== $remainingBlocks) { <add> throw new CakeException(__d('cake_dev', 'The "%s" block was left open. Blocks are not allowed to cross files.', $this->Blocks->active())); <add> } <add> <ide> return $content; <ide> } <ide> <ide><path>lib/Cake/View/ViewBlock.php <ide> public function keys() { <ide> public function active() { <ide> return end($this->_active); <ide> } <add> <add>/** <add> * Get the names of the unclosed/active blocks. <add> * <add> * @return array An array of unclosed blocks. <add> */ <add> public function unclosed() { <add> return $this->_active; <add> } <ide> }
2
Ruby
Ruby
catch insecure bitbucket
64bcb5a07f74384fbd66daba7d03f1d271d02b68
<ide><path>Library/Homebrew/cmd/audit.rb <ide> def audit_homepage <ide> %r{^http://[^/.]+\.ietf\.org}, <ide> %r{^http://[^/.]+\.tools\.ietf\.org}, <ide> %r{^http://www\.gnu\.org/}, <del> %r{^http://code\.google\.com/} <add> %r{^http://code\.google\.com/}, <add> %r{^http://bitbucket\.org/} <ide> problem "Please use https:// for #{homepage}" <ide> end <ide> <ide> def audit_urls <ide> %r{^http://([^/]*\.|)bintray\.com/}, <ide> %r{^http://tools\.ietf\.org/}, <ide> %r{^http://www\.mirrorservice\.org/}, <del> %r{^http://launchpad\.net/} <add> %r{^http://launchpad\.net/}, <add> %r{^http://bitbucket\.org/} <ide> problem "Please use https:// for #{p}" <ide> when %r{^http://search\.mcpan\.org/CPAN/(.*)}i <ide> problem "#{p} should be `https://cpan.metacpan.org/#{$1}`"
1
Javascript
Javascript
simplify dategetter() within the date filter
3eb0c8bc67644412f34e55945f4b538f87bbb003
<ide><path>src/filters.js <ide> function padNumber(num, digits, trim) { <ide> } <ide> function dateGetter(name, size, offset, trim) { <ide> return function(date) { <del> var value = date['get' + name].call(date); <add> var value = date['get' + name](); <ide> if (offset > 0 || value > -offset) <ide> value += offset; <ide> if (value === 0 && offset == -12 ) value = 12;
1
Javascript
Javascript
move timers to misc/timers
3761be3d99fc63858a5cf6f23f2374922d41be4b
<ide><path>benchmark/misc/timers.js <add>var common = require('../common.js'); <add> <add>var bench = common.createBenchmark(main, { <add> thousands: [500], <add> type: ['depth', 'breadth'] <add>}); <add> <add>function main(conf) { <add> var n = +conf.thousands * 1e3; <add> if (conf.type === 'breadth') <add> breadth(n); <add> else <add> depth(n); <add>} <add> <add>function depth(N) { <add> var n = 0; <add> bench.start(); <add> setTimeout(cb); <add> function cb() { <add> n++; <add> if (n === N) <add> bench.end(N / 1e3); <add> else <add> setTimeout(cb); <add> } <add>} <add> <add>function breadth(N) { <add> var n = 0; <add> bench.start(); <add> function cb() { <add> n++; <add> if (n === N) <add> bench.end(N / 1e3); <add> } <add> for (var i = 0; i < N; i++) { <add> setTimeout(cb); <add> } <add>} <ide><path>benchmark/timers.js <del>function next (i) { <del> if (i <= 0) return; <del> setTimeout(function () { next(i-1); }, 1); <del>} <del>next(700);
2
Javascript
Javascript
improve error wording
d1864fa43613bac18b55aaa8b2970bd411bb6657
<ide><path>packager/react-packager/src/Resolver/polyfills/require.js <ide> mod.module.hot.acceptCallback(); <ide> } else { <ide> console.log( <del> '[HMR] Module `' + id + '` cannot be accepted. ' + <del> 'Please reload bundle to get the updates.' <add> '[HMR] Module `' + id + '` can\'t be hot reloaded because it ' + <add> 'doesn\'t provide accept callback hook. Reload the app to get the updates.' <ide> ); <ide> } <ide> }
1
Ruby
Ruby
update the tip formatter to handle multiline tips
e3dbe66e196069fde7d3ff118bd25a03184915fa
<ide><path>railties/guides/rails_guides/textile_extensions.rb <ide> def notestuff(body) <ide> end <ide> <ide> def tip(body) <del> body.gsub!(/^TIP[.:](.*)$/) do |m| <add> body.gsub!(/^TIP[.:](.*?)(\n\Z|\n\n+|\Z)/m) do |m| <ide> result = "<div class='info'><p>" <ide> result << $1.strip <ide> result << '</p></div>' <add> result << $2 if $2 <ide> result <ide> end <ide> end <ide><path>railties/test/guides/textile_extionsions_test.rb <add>require 'isolation/abstract_unit' <add>require 'guides/rails_guides/textile_extensions' <add> <add>class TextileExtensionsTest < Test::Unit::TestCase <add> include ActiveSupport::Testing::Isolation <add> include RailsGuides::TextileExtensions <add> <add> test "tips can handle a single line" do <add> expected_output = "<div class='info'><p>this is a single line tip</p></div>" <add> assert_equal expected_output, tip('TIP. this is a single line tip') <add> end <add> <add> def setup <add> @multi_line_tip = "This is a multi-line tip.\n" + <add> "Isn't it fantastic?" <add> end <add> <add> test "tips can handle a multi-line tip" do <add> expected_output = "<div class='info'><p>#{@multi_line_tip}</p></div>" <add> <add> assert_equal expected_output, tip("TIP. #{@multi_line_tip}") <add> end <add> <add> test "muli-line tips handles text before and after the tip" do <add> pre_line = "This is text before hand.\n\n" <add> post_line = "\n\nThis is some text after" <add> input_text = pre_line + <add> "TIP. #{@multi_line_tip}" + <add> post_line <add> <add> expected_output = pre_line + <add> "<div class='info'><p>#{@multi_line_tip}</p></div>" + <add> post_line <add> <add> assert_equal expected_output, tip(input_text) <add> end <add>end <ide>\ No newline at end of file
2
Ruby
Ruby
move loop invariants out of loop
9bab0869676e5c7edfa904090df52ef736f2dfda
<ide><path>Library/Homebrew/language/python.rb <ide> def self.setup_install_args(prefix) <ide> end <ide> <ide> def self.rewrite_python_shebang(python_path) <add> regex = %r{^#! ?/usr/bin/(env )?python([23](\.\d{1,2})?)$} <add> maximum_regex_length = 28 # the length of "#! /usr/bin/env pythonx.yyy$" <ide> Pathname(".").find do |f| <del> regex = %r{^#! ?/usr/bin/(env )?python([23](\.\d{1,2})?)$} <del> maximum_regex_length = "#! /usr/bin/env pythonx.yyy$".length <ide> next unless f.file? <ide> next unless regex.match?(f.read(maximum_regex_length)) <ide>
1
Mixed
Javascript
use trackballcontrols instead of orbitcontrols
e95a9252bcd683bd6d58e92f4b214526ca4aa799
<ide><path>threejs/lessons/resources/threejs-primitives.js <ide> function main() { <ide> return addElem(parent, 'div', className); <ide> } <ide> <del> document.querySelectorAll('[data-primitive]').forEach(createPrimitiveDOM); <del> document.querySelectorAll('[data-primitive-diagram]').forEach(createPrimitiveDiagram); <add> const renderFuncs = [ <add> ...[...document.querySelectorAll('[data-primitive]')].map(createPrimitiveDOM), <add> ...[...document.querySelectorAll('[data-primitive-diagram]')].map(createPrimitiveDiagram), <add> ]; <ide> <ide> function createPrimitiveDOM(base) { <ide> const name = base.dataset.primitive; <ide> function main() { <ide> } <ide> addDiv(right, '.note').innerHTML = text; <ide> <del> createPrimitive(elem, info); <add> return createPrimitive(elem, info); <ide> } <ide> <ide> function createPrimitiveDiagram(base) { <ide> function main() { <ide> if (!info) { <ide> throw new Error(`no primitive ${name}`); <ide> } <del> createPrimitive(base, info); <add> return createPrimitive(base, info); <ide> } <ide> <ide> function createPrimitive(elem, info) { <ide> function main() { <ide> const camera = new THREE.PerspectiveCamera(fov, aspect, zNear, zFar); <ide> camera.position.z = 15; <ide> <del> const controls = new THREE.OrbitControls(camera, elem); <del> controls.enableZoom = false; <del> controls.enablePan = false; <add> const controls = new THREE.TrackballControls(camera, elem); <add> controls.noZoom = true; <add> controls.noPan = true; <ide> <ide> promise.then((geometryInfo) => { <ide> if (geometryInfo instanceof THREE.BufferGeometry || <ide> function main() { <ide> } <ide> }); <ide> <del> Object.assign(info, { <del> scene, <del> root, <del> elem, <del> camera, <del> }); <add> let oldWidth = -1; <add> let oldHeight = -1; <add> <add> function render(renderer, time) { <add> root.rotation.x = time * .1; <add> root.rotation.y = time * .11; <add> <add> const rect = elem.getBoundingClientRect(); <add> if (rect.bottom < 0 || rect.top > renderer.domElement.clientHeight || <add> rect.right < 0 || rect.left > renderer.domElement.clientWidth) { <add> return; <add> } <add> <add> const width = (rect.right - rect.left) * pixelRatio; <add> const height = (rect.bottom - rect.top) * pixelRatio; <add> const left = rect.left * pixelRatio; <add> const top = rect.top * pixelRatio; <add> <add> if (width !== oldWidth || height !== oldHeight) { <add> controls.handleResize(); <add> } <add> controls.update(); <add> <add> const aspect = width / height; <add> const targetFov = THREE.Math.degToRad(60); <add> const fov = aspect >= 1 <add> ? targetFov <add> : (2 * Math.atan(Math.tan(targetFov * .5) / aspect)); <add> <add> camera.fov = THREE.Math.radToDeg(fov); <add> camera.aspect = aspect; <add> camera.updateProjectionMatrix(); <add> <add> renderer.setViewport(left, top, width, height); <add> renderer.setScissor(left, top, width, height); <add> renderer.render(scene, camera); <add> } <add> <add> return render; <ide> } <ide> <ide> const pixelRatio = 2; // even on low-res we want hi-res rendering <ide> function main() { <ide> resizeRendererToDisplaySize(renderer); <ide> <ide> renderer.setScissorTest(false); <del> // renderer.clear(); <ide> <ide> // Three r93 needs to render at least once for some reason. <ide> renderer.render(scene, camera); <ide> <ide> renderer.setScissorTest(true); <ide> <del> for (const info of Object.values(primitives)) { <del> const {root, scene, camera, elem} = info; <del> root.rotation.x = time * .1; <del> root.rotation.y = time * .11; <del> <del> const rect = elem.getBoundingClientRect(); <del> if (rect.bottom < 0 || rect.top > renderer.domElement.clientHeight || <del> rect.right < 0 || rect.left > renderer.domElement.clientWidth) { <del> continue; <del> } <del> <del> const width = (rect.right - rect.left) * pixelRatio; <del> const height = (rect.bottom - rect.top) * pixelRatio; <del> const left = rect.left * pixelRatio; <del> const top = rect.top * pixelRatio; <del> <del> const aspect = width / height; <del> const targetFov = THREE.Math.degToRad(60); <del> const fov = aspect >= 1 <del> ? targetFov <del> : (2 * Math.atan(Math.tan(targetFov * .5) / aspect)); <del> <del> camera.fov = THREE.Math.radToDeg(fov); <del> camera.aspect = aspect; <del> camera.updateProjectionMatrix(); <del> <del> renderer.setViewport(left, top, width, height); <del> renderer.setScissor(left, top, width, height); <del> renderer.render(scene, camera); <del> } <add> renderFuncs.forEach((fn) => { <add> fn(renderer, time); <add> }); <ide> <ide> requestAnimationFrame(render); <ide> } <ide><path>threejs/lessons/threejs-primitives.md <ide> to use it](threejs-scenegraph.html). <ide> <ide> <canvas id="c"></canvas> <ide> <script src="../resources/threejs/r93/three.min.js"></script> <del><script src="../resources/threejs/r93/js/controls/OrbitControls.js"></script> <add><script src="../resources/threejs/r93/js/controls/TrackballControls.js"></script> <ide> <script src="resources/threejs-primitives.js"></script> <ide> <style> <ide> .spread {
2
Text
Text
add myself to contributors
5c7c08c2e3acf49673d6e3d914b4259328792b12
<ide><path>.github/contributors/jerbob92.md <add># spaCy contributor agreement <add> <add>This spaCy Contributor Agreement (**"SCA"**) is based on the <add>[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf). <add>The SCA applies to any contribution that you make to any product or project <add>managed by us (the **"project"**), and sets out the intellectual property rights <add>you grant to us in the contributed materials. The term **"us"** shall mean <add>[ExplosionAI UG (haftungsbeschränkt)](https://explosion.ai/legal). The term <add>**"you"** shall mean the person or entity identified below. <add> <add>If you agree to be bound by these terms, fill in the information requested <add>below and include the filled-in version with your first pull request, under the <add>folder [`.github/contributors/`](/.github/contributors/). The name of the file <add>should be your GitHub username, with the extension `.md`. For example, the user <add>example_user would create the file `.github/contributors/example_user.md`. <add> <add>Read this agreement carefully before signing. These terms and conditions <add>constitute a binding legal agreement. <add> <add>## Contributor Agreement <add> <add>1. The term "contribution" or "contributed materials" means any source code, <add>object code, patch, tool, sample, graphic, specification, manual, <add>documentation, or any other material posted or submitted by you to the project. <add> <add>2. With respect to any worldwide copyrights, or copyright applications and <add>registrations, in your contribution: <add> <add> * you hereby assign to us joint ownership, and to the extent that such <add> assignment is or becomes invalid, ineffective or unenforceable, you hereby <add> grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge, <add> royalty-free, unrestricted license to exercise all rights under those <add> copyrights. This includes, at our option, the right to sublicense these same <add> rights to third parties through multiple levels of sublicensees or other <add> licensing arrangements; <add> <add> * you agree that each of us can do all things in relation to your <add> contribution as if each of us were the sole owners, and if one of us makes <add> a derivative work of your contribution, the one who makes the derivative <add> work (or has it made will be the sole owner of that derivative work; <add> <add> * you agree that you will not assert any moral rights in your contribution <add> against us, our licensees or transferees; <add> <add> * you agree that we may register a copyright in your contribution and <add> exercise all ownership rights associated with it; and <add> <add> * you agree that neither of us has any duty to consult with, obtain the <add> consent of, pay or render an accounting to the other for any use or <add> distribution of your contribution. <add> <add>3. With respect to any patents you own, or that you can license without payment <add>to any third party, you hereby grant to us a perpetual, irrevocable, <add>non-exclusive, worldwide, no-charge, royalty-free license to: <add> <add> * make, have made, use, sell, offer to sell, import, and otherwise transfer <add> your contribution in whole or in part, alone or in combination with or <add> included in any product, work or materials arising out of the project to <add> which your contribution was submitted, and <add> <add> * at our option, to sublicense these same rights to third parties through <add> multiple levels of sublicensees or other licensing arrangements. <add> <add>4. Except as set out above, you keep all right, title, and interest in your <add>contribution. The rights that you grant to us under these terms are effective <add>on the date you first submitted a contribution to us, even if your submission <add>took place before the date you sign these terms. <add> <add>5. You covenant, represent, warrant and agree that: <add> <add> * Each contribution that you submit is and shall be an original work of <add> authorship and you can legally grant the rights set out in this SCA; <add> <add> * to the best of your knowledge, each contribution will not violate any <add> third party's copyrights, trademarks, patents, or other intellectual <add> property rights; and <add> <add> * each contribution shall be in compliance with U.S. export control laws and <add> other applicable export and import laws. You agree to notify us if you <add> become aware of any circumstance which would make any of the foregoing <add> representations inaccurate in any respect. We may publicly disclose your <add> participation in the project, including the fact that you have signed the SCA. <add> <add>6. This SCA is governed by the laws of the State of California and applicable <add>U.S. Federal law. Any choice of law rules will not apply. <add> <add>7. Please place an “x” on one of the applicable statement below. Please do NOT <add>mark both statements: <add> <add> * [x] I am signing on behalf of myself as an individual and no other person <add> or entity, including my employer, has or will have rights with respect to my <add> contributions. <add> <add> * [ ] I am signing on behalf of my employer or a legal entity and I have the <add> actual authority to contractually bind that entity. <add> <add>## Contributor Details <add> <add>| Field | Entry | <add>|------------------------------- | -------------------- | <add>| Name | Jeroen Bobbeldijk | <add>| Company name (if applicable) | | <add>| Title or role (if applicable) | | <add>| Date | 22-10-2017 | <add>| GitHub username | jerbob92 | <add>| Website (optional) | |
1
Python
Python
add spare space for growth for .npy files
15cb486664e110dfe73ed70a3b9c68915a69bd88
<ide><path>numpy/lib/format.py <ide> MAGIC_LEN = len(MAGIC_PREFIX) + 2 <ide> ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096 <ide> BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes <add># allow growth within the address space of a 64 bit machine along one axis <add>GROWTH_AXIS_MAX_DIGITS = 21 # = len(str(8*2**64-1)) hypothetical int1 dtype <ide> <ide> # difference between version 1.0 and 2.0 is a 4 byte (I) header length <ide> # instead of 2 bytes (H) allowing storage of large structured arrays <ide> def _write_array_header(fp, d, version=None): <ide> header.append("'%s': %s, " % (key, repr(value))) <ide> header.append("}") <ide> header = "".join(header) <add> <add> # Add some spare space so that the array header can be modified in-place <add> # when changing the array size, e.g. when growing it by appending data at <add> # the end. <add> shape = d['shape'] <add> header += " " * ((GROWTH_AXIS_MAX_DIGITS - len(repr( <add> shape[-1 if d['fortran_order'] else 0] <add> ))) if len(shape) > 0 else 0) <add> <ide> if version is None: <ide> header = _wrap_header_guess_version(header) <ide> else: <ide><path>numpy/lib/tests/test_format.py <ide> def test_bad_magic_args(): <ide> <ide> def test_large_header(): <ide> s = BytesIO() <del> d = {'a': 1, 'b': 2} <add> d = {'shape': tuple(), 'fortran_order': False, 'descr': '<i8'} <ide> format.write_array_header_1_0(s, d) <ide> <ide> s = BytesIO() <del> d = {'a': 1, 'b': 2, 'c': 'x'*256*256} <add> d['descr'] = [('x'*256*256, '<i8')] <ide> assert_raises(ValueError, format.write_array_header_1_0, s, d) <ide> <ide> <ide> def test_bad_header(): <ide> assert_raises(ValueError, format.read_array_header_1_0, s) <ide> <ide> # headers without the exact keys required should fail <del> d = {"shape": (1, 2), <del> "descr": "x"} <del> s = BytesIO() <del> format.write_array_header_1_0(s, d) <add> # d = {"shape": (1, 2), <add> # "descr": "x"} <add> s = BytesIO( <add> b"\x93NUMPY\x01\x006\x00{'descr': 'x', 'shape': (1, 2), }" + <add> b" \n" <add> ) <ide> assert_raises(ValueError, format.read_array_header_1_0, s) <ide> <ide> d = {"shape": (1, 2), <ide> def test_unicode_field_names(tmpdir): <ide> with assert_warns(UserWarning): <ide> format.write_array(f, arr, version=None) <ide> <add>def test_header_growth_axis(): <add> for is_fortran_array, dtype_space, expected_header_length in [ <add> [False, 22, 128], [False, 23, 192], [True, 23, 128], [True, 24, 192] <add> ]: <add> for size in [10**i for i in range(format.GROWTH_AXIS_MAX_DIGITS)]: <add> fp = BytesIO() <add> format.write_array_header_1_0(fp, { <add> 'shape': (2, size) if is_fortran_array else (size, 2), <add> 'fortran_order': is_fortran_array, <add> 'descr': np.dtype([(' '*dtype_space, int)]) <add> }) <add> <add> assert len(fp.getvalue()) == expected_header_length <ide> <ide> @pytest.mark.parametrize('dt, fail', [ <ide> (np.dtype({'names': ['a', 'b'], 'formats': [float, np.dtype('S3',
2
Python
Python
add tests for np.sctype2char
0412b2a59cd94ff58da5631f373526f36c4fef60
<ide><path>numpy/core/tests/test_numerictypes.py <ide> def test_other(self, t): <ide> assert_equal(np.maximum_sctype(t), t) <ide> <ide> <add>class Test_sctype2char(object): <add> # This function is old enough that we're really just documenting the quirks <add> # at this point. <add> <add> def test_scalar_type(self): <add> assert_equal(np.sctype2char(np.double), 'd') <add> assert_equal(np.sctype2char(np.int_), 'l') <add> assert_equal(np.sctype2char(np.unicode_), 'U') <add> assert_equal(np.sctype2char(np.bytes_), 'S') <add> <add> def test_other_type(self): <add> assert_equal(np.sctype2char(float), 'd') <add> assert_equal(np.sctype2char(list), 'O') <add> assert_equal(np.sctype2char(np.ndarray), 'O') <add> <add> def test_third_party_scalar_type(self): <add> from numpy.core._rational_tests import rational <add> assert_raises(KeyError, np.sctype2char, rational) <add> assert_raises(KeyError, np.sctype2char, rational(1)) <add> <add> def test_array_instance(self): <add> assert_equal(np.sctype2char(np.array([1.0, 2.0])), 'd') <add> <add> def test_abstract_type(self): <add> assert_raises(KeyError, np.sctype2char, np.floating) <add> <add> def test_non_type(self): <add> assert_raises(ValueError, np.sctype2char, 1) <add> <add> <ide> @pytest.mark.skipif(sys.flags.optimize > 1, <ide> reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1") <ide> class TestDocStrings(object):
1
Python
Python
add benchmark for sorting random array
b728205741edbf770bda94be203fb7b24039c2ee
<ide><path>benchmarks/benchmarks/bench_function_base.py <ide> class Sort(Benchmark): <ide> ['quick', 'merge', 'heap'], <ide> ['float64', 'int64', 'uint64'], <ide> [ <add> ('random',), <ide> ('ordered',), <ide> ('reversed',), <ide> ('uniform',),
1
Python
Python
fix typeerror when raising typeerror
8894b81b166f6b0c200dde77a70fb360e95b0f7c
<ide><path>numpy/lib/stride_tricks.py <ide> def broadcast_arrays(*args, **kwargs): <ide> subok = kwargs.pop('subok', False) <ide> if kwargs: <ide> raise TypeError('broadcast_arrays() got an unexpected keyword ' <del> 'argument {}'.format(kwargs.pop())) <add> 'argument {!r}'.format(kwargs.keys()[0])) <ide> args = [np.array(_m, copy=False, subok=subok) for _m in args] <ide> <ide> shape = _broadcast_shape(*args)
1
Ruby
Ruby
require action_dispatch, not action_controller
2675ba9ef4ae1839f721aff77c255ee1cb8ac7ac
<ide><path>railties/lib/rails/commands/server.rb <del>require 'action_controller' <add>require 'action_dispatch' <ide> <ide> require 'fileutils' <ide> require 'optparse'
1
Text
Text
add zijian liu to collaborators
ecb78188a36f529e9095e9d21d4637d68e3e325c
<ide><path>README.md <ide> For information about the governance of the Node.js project, see <ide> **Luigi Pinca** &lt;[email protected]&gt; (he/him) <ide> * [lundibundi](https://github.com/lundibundi) - <ide> **Denys Otrishko** &lt;[email protected]&gt; (he/him) <add>* [Lxxyx](https://github.com/Lxxyx) - <add>**Zijian Liu** &lt;[email protected]&gt; (he/him) <ide> * [mafintosh](https://github.com/mafintosh) - <ide> **Mathias Buus** &lt;[email protected]&gt; (he/him) <ide> * [mcollina](https://github.com/mcollina) -
1
Javascript
Javascript
fix renderpage in production mode
8711a7d14e064604078a5cfd29eb90a1ab1f18c7
<ide><path>client/next-dev.js <ide> import 'react-hot-loader/patch' <add>import initNext, * as next from './' <ide> import initOnDemandEntries from './on-demand-entries-client' <ide> import initWebpackHMR from './webpack-hot-middleware-client' <ide> <del>const next = window.next = require('./') <add>window.next = next <ide> <del>next.default() <add>initNext() <ide> .then((emitter) => { <ide> initOnDemandEntries() <ide> initWebpackHMR() <ide><path>client/next.js <del>import next from './' <add>import initNext, * as next from './' <ide> <del>next() <add>window.next = next <add> <add>initNext() <ide> .catch((err) => { <ide> console.error(`${err.message}\n${err.stack}`) <ide> })
2
Go
Go
add socket activation for go apps
cfb7711a74dc4b54d879f79bc2f6435ed71163b9
<ide><path>pkg/socketactivation/activation.go <add>/* <add> Package to allow go applications to immediately start <add> listening on a socket, unix, tcp, udp but hold connections <add> until the application has booted and is ready to accept them <add>*/ <add>package socketactivation <add> <add>import ( <add> "fmt" <add> "net" <add> "time" <add>) <add> <add>// NewActivationListener returns a listener listening on addr with the protocol. It sets the <add>// timeout to wait on first connection before an error is returned <add>func NewActivationListener(proto, addr string, activate chan struct{}, timeout time.Duration) (net.Listener, error) { <add> wrapped, err := net.Listen(proto, addr) <add> if err != nil { <add> return nil, err <add> } <add> <add> return &defaultListener{ <add> wrapped: wrapped, <add> activate: activate, <add> timeout: timeout, <add> }, nil <add>} <add> <add>type defaultListener struct { <add> wrapped net.Listener // the real listener to wrap <add> ready bool // is the listner ready to start accpeting connections <add> activate chan struct{} <add> timeout time.Duration // how long to wait before we consider this an error <add>} <add> <add>func (l *defaultListener) Close() error { <add> return l.wrapped.Close() <add>} <add> <add>func (l *defaultListener) Addr() net.Addr { <add> return l.wrapped.Addr() <add>} <add> <add>func (l *defaultListener) Accept() (net.Conn, error) { <add> // if the listen has been told it is ready then we can go ahead and <add> // start returning connections <add> if l.ready { <add> return l.wrapped.Accept() <add> } <add> <add> select { <add> case <-time.After(l.timeout): <add> // close the connection so any clients are disconnected <add> l.Close() <add> return nil, fmt.Errorf("timeout (%s) reached waiting for listener to become ready", l.timeout.String()) <add> case <-l.activate: <add> l.ready = true <add> return l.Accept() <add> } <add> panic("unreachable") <add>}
1
Javascript
Javascript
fix bad unittest
944fa78369653c25262c6e4d325efdd2a1f967b9
<ide><path>test/NormalModule.unittest.js <ide> describe("NormalModule", function() { <ide> resource, <ide> parser <ide> ); <add> normalModule.cacheable = true; <ide> }); <ide> describe("#identifier", function() { <ide> it("returns an identifier for this module", function() {
1
Javascript
Javascript
add `action` support to ember.textfield
da4aa8531f337811c69e86300d81352fc1dab241
<ide><path>packages/ember-handlebars/lib/controls/text_field.js <ide> Ember.TextField = Ember.View.extend(Ember.TextSupport, <ide> @type String <ide> @default null <ide> */ <del> size: null <add> size: null, <add> <add> /** <add> The action to be sent when the user presses the return key. <add> <add> This is similar to the `{{action}}` helper, but is fired when <add> the user presses the return key when editing a text field, and sends <add> the value of the field as the context. <add> <add> @property action <add> @type String <add> @default null <add> */ <add> action: null, <add> <add> insertNewline: function() { <add> var controller = get(this, 'controller'), <add> action = get(this, 'action'); <add> <add> if (action) { <add> controller.send(action, get(this, 'value')); <add> } <add> <add> return false; <add> } <ide> }); <ide><path>packages/ember-handlebars/tests/controls/text_field_test.js <ide> test("should call the cancel method when escape key is pressed", function() { <ide> ok(wasCalled, "invokes cancel method"); <ide> }); <ide> <add>test("should sent an action if one is defined when the return key is pressed", function() { <add> expect(2); <add> <add> var StubController = Ember.Object.extend({ <add> send: function(actionName, arg1) { <add> equal(actionName, 'didTriggerAction', "text field sent correct action name"); <add> equal(arg1, "textFieldValue", "text field sent its current value as first argument"); <add> } <add> }); <add> <add> textField.set('action', 'didTriggerAction'); <add> textField.set('value', "textFieldValue"); <add> textField.set('controller', StubController.create()); <add> <add> Ember.run(function() { textField.append(); }); <add> <add> var event = { <add> keyCode: 13 <add> }; <add> <add> textField.trigger('keyUp', event); <add>}); <add> <ide> // test("listens for focus and blur events", function() { <ide> // var focusCalled = 0; <ide> // var blurCalled = 0;
2
PHP
PHP
update phpdoc comments to match signature
8cd28a3a78eaf0097e7bcac29309646310cd277d
<ide><path>src/Illuminate/Log/LogManager.php <ide> public function extend($driver, Closure $callback) <ide> /** <ide> * Unset the given channel instance. <ide> * <del> * @param string|null $name <add> * @param string|null $driver <ide> * @return $this <ide> */ <ide> public function forgetChannel($driver = null) <ide><path>src/Illuminate/Queue/CallQueuedClosure.php <ide> public function __construct(SerializableClosure $closure) <ide> /** <ide> * Create a new job instance. <ide> * <del> * @param \Closure $closure <add> * @param \Closure $job <ide> * @return self <ide> */ <ide> public static function create(Closure $job)
2
Text
Text
changin the word "instalaciones" to "instalación"
abf91a30ed703a4acad6072f87363c136cfe75e6
<ide><path>guide/spanish/r/index.md <ide> localeTitle: R <ide> <ide> R es un lenguaje de programación de código abierto y un entorno de software para computación estadística y gráficos. Es uno de los idiomas principales utilizados por los científicos de datos y estadísticos por igual. Es compatible con la R Foundation for Statistical Computing y una gran comunidad de desarrolladores de código abierto. Dado que R utilizó una interfaz de línea de comandos, puede haber una curva de aprendizaje pronunciada para algunas personas que están acostumbradas a usar programas centrados en GUI como SPSS y SAS, por lo que las extensiones a R como RStudio pueden ser muy beneficiosas. Dado que R es un programa de código abierto y está disponible gratuitamente, puede haber una gran atracción para académicos cuyo acceso a programas estadísticos está regulado a través de su asociación a varios colegios o universidades. <ide> <del>## Instalaciones <add>## Instalación <ide> <ide> Lo primero que debe comenzar con R es descargarlo de su [sitio oficial de](https://www.r-project.org/) acuerdo con su sistema operativo. Ahora instálalo en tu computadora. Para obtener ayuda sobre la instalación, consulte la sección de referencia a continuación. <ide> <ide> Lo primero que debe comenzar con R es descargarlo de su [sitio oficial de](https <ide> <ide> * [Instalando R en Windows](http://youtu.be/Ohnk9hcxf9M) <ide> <del>* [Instalando R en Mac](https://youtu.be/uxuuWXU-7UQ) <ide>\ No newline at end of file <add>* [Instalando R en Mac](https://youtu.be/uxuuWXU-7UQ)
1
Ruby
Ruby
use index_by and index_with wherever possible
d3599d8affa90b60b4e9ab24fdbd35477b5ac06d
<ide><path>actionview/lib/action_view/renderer/partial_renderer/collection_caching.rb <ide> # frozen_string_literal: true <ide> <add>require "active_support/core_ext/enumerable" <add> <ide> module ActionView <ide> module CollectionCaching # :nodoc: <ide> extend ActiveSupport::Concern <ide> def expanded_cache_key(key, view, template, digest_path) <ide> # If the partial is not already cached it will also be <ide> # written back to the underlying cache store. <ide> def fetch_or_cache_partial(cached_partials, template, order_by:) <del> order_by.each_with_object({}) do |cache_key, hash| <del> hash[cache_key] = <del> if content = cached_partials[cache_key] <del> build_rendered_template(content, template) <del> else <del> yield.tap do |rendered_partial| <del> collection_cache.write(cache_key, rendered_partial.body) <del> end <del> end <add> order_by.index_with do |cache_key| <add> if content = cached_partials[cache_key] <add> build_rendered_template(content, template) <add> else <add> yield.tap do |rendered_partial| <add> collection_cache.write(cache_key, rendered_partial.body) <add> end <ide> end <add> end <ide> end <ide> end <ide> end <ide><path>actionview/test/template/form_options_helper_test.rb <ide> # frozen_string_literal: true <ide> <ide> require "abstract_unit" <add>require "active_support/core_ext/enumerable" <ide> <ide> class Map < Hash <ide> def category <ide> class << base <ide> ActiveSupport::TimeZone.prepend FakeZones <ide> <ide> setup do <del> ActiveSupport::TimeZone.fake_zones = %w(A B C D E).map do |id| <del> [ id, FakeZones::FakeZone.new(id) ] <del> end.to_h <add> ActiveSupport::TimeZone.fake_zones = %w(A B C D E).index_with do |id| <add> FakeZones::FakeZone.new(id) <add> end <ide> <ide> @fake_timezones = ActiveSupport::TimeZone.all <ide> end <ide><path>activerecord/lib/active_record/associations/preloader.rb <ide> # frozen_string_literal: true <ide> <add>require "active_support/core_ext/enumerable" <add> <ide> module ActiveRecord <ide> module Associations <ide> # Implements the details of eager loading of Active Record associations. <ide> def preloaded_records <ide> end <ide> <ide> def records_by_owner <del> @records_by_owner ||= owners.each_with_object({}) do |owner, result| <del> result[owner] = Array(owner.association(reflection.name).target) <add> @records_by_owner ||= owners.index_with do |owner| <add> Array(owner.association(reflection.name).target) <ide> end <ide> end <ide> <ide><path>activerecord/lib/active_record/attribute_methods.rb <ide> # frozen_string_literal: true <ide> <ide> require "mutex_m" <add>require "active_support/core_ext/enumerable" <ide> <ide> module ActiveRecord <ide> # = Active Record Attribute Methods <ide> def attribute_method?(attr_name) <ide> end <ide> <ide> def attributes_with_values(attribute_names) <del> attribute_names.each_with_object({}) do |name, attrs| <del> attrs[name] = _read_attribute(name) <add> attribute_names.index_with do |name| <add> _read_attribute(name) <ide> end <ide> end <ide> <ide><path>activerecord/lib/active_record/core.rb <ide> # frozen_string_literal: true <ide> <add>require "active_support/core_ext/enumerable" <ide> require "active_support/core_ext/hash/indifferent_access" <ide> require "active_support/core_ext/string/filters" <ide> require "active_support/parameter_filter" <ide> def find_by(*args) # :nodoc: <ide> keys = hash.keys <ide> <ide> statement = cached_find_by_statement(keys) { |params| <del> wheres = keys.each_with_object({}) { |param, o| <del> o[param] = params.bind <del> } <add> wheres = keys.index_with { params.bind } <ide> where(wheres).limit(1) <ide> } <ide> begin <ide><path>activerecord/lib/active_record/insert_all.rb <ide> # frozen_string_literal: true <ide> <add>require "active_support/core_ext/enumerable" <add> <ide> module ActiveRecord <ide> class InsertAll # :nodoc: <ide> attr_reader :model, :connection, :inserts, :keys <ide> def extract_types_from_columns_on(table_name, keys:) <ide> unknown_column = (keys - columns.keys).first <ide> raise UnknownAttributeError.new(model.new, unknown_column) if unknown_column <ide> <del> keys.map { |key| [ key, connection.lookup_cast_type_from_column(columns[key]) ] }.to_h <add> keys.index_with { |key| connection.lookup_cast_type_from_column(columns[key]) } <ide> end <ide> <ide> def format_columns(columns) <ide><path>activerecord/lib/active_record/relation/calculations.rb <ide> # frozen_string_literal: true <ide> <add>require "active_support/core_ext/enumerable" <add> <ide> module ActiveRecord <ide> module Calculations <ide> # Count the records. <ide> def execute_grouped_calculation(operation, column_name, distinct) #:nodoc: <ide> if association <ide> key_ids = calculated_data.collect { |row| row[group_aliases.first] } <ide> key_records = association.klass.base_class.where(association.klass.base_class.primary_key => key_ids) <del> key_records = Hash[key_records.map { |r| [r.id, r] }] <add> key_records = key_records.index_by(&:id) <ide> end <ide> <ide> Hash[calculated_data.map do |row| <ide><path>activerecord/lib/active_record/test_fixtures.rb <ide> # frozen_string_literal: true <ide> <add>require "active_support/core_ext/enumerable" <add> <ide> module ActiveRecord <ide> module TestFixtures <ide> extend ActiveSupport::Concern <ide> def setup_shared_connection_pool <ide> end <ide> <ide> def load_fixtures(config) <del> fixtures = ActiveRecord::FixtureSet.create_fixtures(fixture_path, fixture_table_names, fixture_class_names, config) <del> Hash[fixtures.map { |f| [f.name, f] }] <add> ActiveRecord::FixtureSet.create_fixtures(fixture_path, fixture_table_names, fixture_class_names, config).index_by(&:name) <ide> end <ide> <ide> def instantiate_fixtures <ide><path>activerecord/test/cases/arel/support/fake_record.rb <ide> # frozen_string_literal: true <ide> <add>require "active_support/core_ext/enumerable" <add> <ide> require "date" <ide> module FakeRecord <ide> class Column < Struct.new(:name, :type) <ide> def initialize(visitor = nil) <ide> ] <ide> } <ide> @columns_hash = { <del> "users" => Hash[@columns["users"].map { |x| [x.name, x] }], <del> "products" => Hash[@columns["products"].map { |x| [x.name, x] }] <add> "users" => @columns["users"].index_by(&:name), <add> "products" => @columns["products"].index_by(&:name) <ide> } <ide> @primary_keys = { <ide> "users" => "id", <ide><path>activerecord/test/cases/attribute_methods/read_test.rb <ide> # frozen_string_literal: true <ide> <ide> require "cases/helper" <add>require "active_support/core_ext/enumerable" <ide> <ide> module ActiveRecord <ide> module AttributeMethods <ide> def self.columns <ide> end <ide> <ide> def self.columns_hash <del> Hash[attribute_names.map { |name| <del> [name, FakeColumn.new(name)] <del> }] <add> attribute_names.index_with { |name| <add> FakeColumn.new(name) <add> } <ide> end <ide> end <ide> end <ide><path>activerecord/test/cases/base_test.rb <ide> require "models/car" <ide> require "models/bulb" <ide> require "concurrent/atomic/count_down_latch" <add>require "active_support/core_ext/enumerable" <ide> <ide> class FirstAbstractClass < ActiveRecord::Base <ide> self.abstract_class = true <ide> def test_attributes_on_dummy_time_with_invalid_time <ide> def test_attributes <ide> category = Category.new(name: "Ruby") <ide> <del> expected_attributes = category.attribute_names.map do |attribute_name| <del> [attribute_name, category.public_send(attribute_name)] <del> end.to_h <add> expected_attributes = category.attribute_names.index_with do |attribute_name| <add> category.public_send(attribute_name) <add> end <ide> <ide> assert_instance_of Hash, category.attributes <ide> assert_equal expected_attributes, category.attributes <ide><path>activerecord/test/cases/counter_cache_test.rb <ide> require "models/subscriber" <ide> require "models/subscription" <ide> require "models/book" <add>require "active_support/core_ext/enumerable" <ide> <ide> class CounterCacheTest < ActiveRecord::TestCase <ide> fixtures :topics, :categories, :categorizations, :cars, :dogs, :dog_lovers, :people, :friendships, :subscribers, :subscriptions, :books <ide> class ::SpecialReply < ::Reply <ide> <ide> private <ide> def assert_touching(record, *attributes) <del> record.update_columns attributes.map { |attr| [ attr, 5.minutes.ago ] }.to_h <del> touch_times = attributes.map { |attr| [ attr, record.public_send(attr) ] }.to_h <add> record.update_columns attributes.index_with(5.minutes.ago) <add> touch_times = attributes.index_with { |attr| record.public_send(attr) } <ide> <ide> yield <ide> <ide><path>activesupport/lib/active_support/cache.rb <ide> require "zlib" <ide> require "active_support/core_ext/array/extract_options" <ide> require "active_support/core_ext/array/wrap" <add>require "active_support/core_ext/enumerable" <ide> require "active_support/core_ext/module/attribute_accessors" <ide> require "active_support/core_ext/numeric/bytes" <ide> require "active_support/core_ext/numeric/time" <ide> def fetch_multi(*names) <ide> instrument :read_multi, names, options do |payload| <ide> reads = read_multi_entries(names, **options) <ide> writes = {} <del> ordered = names.each_with_object({}) do |name, hash| <del> hash[name] = reads.fetch(name) { writes[name] = yield(name) } <add> ordered = names.index_with do |name| <add> reads.fetch(name) { writes[name] = yield(name) } <ide> end <ide> <ide> payload[:hits] = reads.keys <ide><path>activesupport/lib/active_support/cache/mem_cache_store.rb <ide> raise e <ide> end <ide> <add>require "active_support/core_ext/enumerable" <ide> require "active_support/core_ext/marshal" <ide> require "active_support/core_ext/array/extract_options" <ide> <ide> def write_entry(key, entry, **options) <ide> <ide> # Reads multiple entries from the cache implementation. <ide> def read_multi_entries(names, **options) <del> keys_to_names = Hash[names.map { |name| [normalize_key(name, options), name] }] <add> keys_to_names = names.index_by { |name| normalize_key(name, options) } <ide> <ide> raw_values = @data.with { |c| c.get_multi(keys_to_names.keys) } <ide> values = {} <ide><path>activesupport/lib/active_support/current_attributes.rb <ide> # frozen_string_literal: true <ide> <ide> require "active_support/callbacks" <add>require "active_support/core_ext/enumerable" <ide> <ide> module ActiveSupport <ide> # Abstract super class that provides a thread-isolated attributes singleton, which resets automatically <ide> def assign_attributes(new_attributes) <ide> end <ide> <ide> def compute_attributes(keys) <del> keys.collect { |key| [ key, public_send(key) ] }.to_h <add> keys.index_with { |key| public_send(key) } <ide> end <ide> end <ide> end <ide><path>activesupport/lib/active_support/testing/assertions.rb <ide> # frozen_string_literal: true <ide> <add>require "active_support/core_ext/enumerable" <add> <ide> module ActiveSupport <ide> module Testing <ide> module Assertions <ide> def assert_difference(expression, *args, &block) <ide> else <ide> difference = args[0] || 1 <ide> message = args[1] <del> Hash[Array(expression).map { |e| [e, difference] }] <add> Array(expression).index_with(difference) <ide> end <ide> <ide> exps = expressions.keys.map { |e| <ide><path>railties/lib/rails/generators.rb <ide> <ide> require "active_support/core_ext/kernel/singleton_class" <ide> require "active_support/core_ext/array/extract_options" <add>require "active_support/core_ext/enumerable" <ide> require "active_support/core_ext/hash/deep_merge" <ide> require "active_support/core_ext/module/attribute_accessors" <ide> require "active_support/core_ext/string/indent" <ide> def find_by_namespace(name, base = nil, context = nil) #:nodoc: <ide> <ide> lookup(lookups) <ide> <del> namespaces = Hash[subclasses.map { |klass| [klass.namespace, klass] }] <add> namespaces = subclasses.index_by(&:namespace) <ide> lookups.each do |namespace| <ide> klass = namespaces[namespace] <ide> return klass if klass
17
Python
Python
remove unused import
ece9a009deb0202f4344881607426d651d89da38
<ide><path>keras/backend/tensorflow_backend.py <ide> from tensorflow.python.ops import control_flow_ops <ide> from tensorflow.python.ops import functional_ops <ide> from tensorflow.python.ops import ctc_ops as ctc <del>from tensorflow.python.ops import variables as tf_variables <ide> from tensorflow.python.client import device_lib <ide> <ide> from collections import defaultdict
1
Ruby
Ruby
add tests for set_sequence_name etc
fd7ca98bb6218de42b821d48db083ea8c0e97d67
<ide><path>activerecord/lib/active_record/base.rb <ide> def inheritance_column <ide> if self == Base <ide> 'type' <ide> else <del> defined?(@inheritance_column) ? @inheritance_column : superclass.inheritance_column <add> (@inheritance_column ||= nil) || superclass.inheritance_column <ide> end <ide> end <ide> <ide><path>activerecord/test/cases/base_test.rb <ide> def test_set_inheritance_column_with_block <ide> assert_equal "type_id", k.inheritance_column <ide> end <ide> <add> def test_set_sequence_name_with_value <add> k = Class.new( ActiveRecord::Base ) <add> k.sequence_name = "foo" <add> assert_equal "foo", k.sequence_name <add> <add> k.set_sequence_name "bar" <add> assert_equal "bar", k.sequence_name <add> end <add> <add> def test_set_sequence_name_with_block <add> k = Class.new( ActiveRecord::Base ) <add> k.table_name = "projects" <add> orig_name = k.sequence_name <add> <add> if orig_name <add> k.set_sequence_name { original_sequence_name + "_lol" } <add> assert_equal orig_name + "_lol", k.sequence_name <add> else <add> skip "sequences not supported by db" <add> end <add> end <add> <ide> def test_count_with_join <ide> res = Post.count_by_sql "SELECT COUNT(*) FROM posts LEFT JOIN comments ON posts.id=comments.post_id WHERE posts.#{QUOTED_TYPE} = 'Post'" <ide>
2
Mixed
Java
allow headless js tasks to retry
ac7ec4602fd4d8ec033a084df6a92437ea2d3a8a
<ide><path>Libraries/ReactNative/AppRegistry.js <ide> const createPerformanceLogger = require('../Utilities/createPerformanceLogger'); <ide> import type {IPerformanceLogger} from '../Utilities/createPerformanceLogger'; <ide> <ide> import NativeHeadlessJsTaskSupport from './NativeHeadlessJsTaskSupport'; <add>import HeadlessJsTaskError from './HeadlessJsTaskError'; <ide> <ide> type Task = (taskData: any) => Promise<void>; <ide> type TaskProvider = () => Task; <ide> const AppRegistry = { <ide> }) <ide> .catch(reason => { <ide> console.error(reason); <del> if (NativeHeadlessJsTaskSupport) { <del> NativeHeadlessJsTaskSupport.notifyTaskFinished(taskId); <add> <add> if ( <add> NativeHeadlessJsTaskSupport && <add> reason instanceof HeadlessJsTaskError <add> ) { <add> NativeHeadlessJsTaskSupport.notifyTaskRetry(taskId).then( <add> retryPosted => { <add> if (!retryPosted) { <add> NativeHeadlessJsTaskSupport.notifyTaskFinished(taskId); <add> } <add> }, <add> ); <ide> } <ide> }); <ide> }, <ide><path>Libraries/ReactNative/HeadlessJsTaskError.js <add>/** <add> * Copyright (c) Facebook, Inc. and its affiliates. <add> * <add> * This source code is licensed under the MIT license found in the <add> * LICENSE file in the root directory of this source tree. <add> * <add> * @flow <add> * @format <add> */ <add>'use strict'; <add> <add>export default class HeadlessJsTaskError extends Error {} <ide><path>Libraries/ReactNative/NativeHeadlessJsTaskSupport.js <ide> import * as TurboModuleRegistry from '../TurboModule/TurboModuleRegistry'; <ide> <ide> export interface Spec extends TurboModule { <ide> +notifyTaskFinished: (taskId: number) => void; <add> +notifyTaskRetry: (taskId: number) => Promise<boolean>; <ide> } <ide> <ide> export default TurboModuleRegistry.get<Spec>('HeadlessJsTaskSupport'); <ide><path>ReactAndroid/src/androidTest/java/com/facebook/react/tests/core/WritableNativeMapTest.java <ide> import androidx.test.runner.AndroidJUnit4; <ide> import com.facebook.react.bridge.NoSuchKeyException; <ide> import com.facebook.react.bridge.UnexpectedNativeTypeException; <add>import com.facebook.react.bridge.WritableArray; <add>import com.facebook.react.bridge.WritableMap; <ide> import com.facebook.react.bridge.WritableNativeArray; <ide> import com.facebook.react.bridge.WritableNativeMap; <ide> import org.junit.Assert; <ide> @RunWith(AndroidJUnit4.class) <ide> public class WritableNativeMapTest { <ide> <add> private static final String ARRAY = "array"; <add> private static final String MAP = "map"; <ide> private WritableNativeMap mMap; <ide> <ide> @Before <ide> public void setup() { <ide> mMap.putDouble("double", 1.2); <ide> mMap.putInt("int", 1); <ide> mMap.putString("string", "abc"); <del> mMap.putMap("map", new WritableNativeMap()); <del> mMap.putArray("array", new WritableNativeArray()); <add> mMap.putMap(MAP, new WritableNativeMap()); <add> mMap.putArray(ARRAY, new WritableNativeArray()); <ide> mMap.putBoolean("dvacca", true); <ide> } <ide> <ide> public void testErrorMessageContainsKey() { <ide> assertThat(e.getMessage()).contains(key); <ide> } <ide> } <add> <add> @Test <add> public void testCopy() { <add> final WritableMap copy = mMap.copy(); <add> <add> assertThat(copy).isNotSameAs(mMap); <add> assertThat(copy.getMap(MAP)).isNotSameAs(mMap.getMap(MAP)); <add> assertThat(copy.getArray(ARRAY)).isNotSameAs(mMap.getArray(ARRAY)); <add> } <add> <add> @Test <add> public void testCopyModification() { <add> final WritableMap copy = mMap.copy(); <add> copy.putString("string", "foo"); <add> <add> assertThat(copy.getString("string")).isEqualTo("foo"); <add> assertThat(mMap.getString("string")).isEqualTo("abc"); <add> } <ide> } <ide><path>ReactAndroid/src/main/java/com/facebook/react/bridge/JavaOnlyMap.java <ide> public void merge(@Nonnull ReadableMap source) { <ide> mBackingMap.putAll(((JavaOnlyMap) source).mBackingMap); <ide> } <ide> <add> @Override <add> public WritableMap copy() { <add> final JavaOnlyMap target = new JavaOnlyMap(); <add> target.merge(this); <add> return target; <add> } <add> <ide> @Override <ide> public void putArray(@Nonnull String key, @Nullable WritableArray value) { <ide> mBackingMap.put(key, value); <ide><path>ReactAndroid/src/main/java/com/facebook/react/bridge/UiThreadUtil.java <ide> public static void assertNotOnUiThread() { <ide> * Runs the given {@code Runnable} on the UI thread. <ide> */ <ide> public static void runOnUiThread(Runnable runnable) { <add> runOnUiThread(runnable, 0); <add> } <add> <add> /** <add> * Runs the given {@code Runnable} on the UI thread with the specified delay. <add> */ <add> public static void runOnUiThread(Runnable runnable, long delayInMs) { <ide> synchronized (UiThreadUtil.class) { <ide> if (sMainHandler == null) { <ide> sMainHandler = new Handler(Looper.getMainLooper()); <ide> } <ide> } <del> sMainHandler.post(runnable); <add> sMainHandler.postDelayed(runnable, delayInMs); <ide> } <ide> } <ide><path>ReactAndroid/src/main/java/com/facebook/react/bridge/WritableMap.java <ide> public interface WritableMap extends ReadableMap { <ide> void putMap(@Nonnull String key, @Nullable WritableMap value); <ide> <ide> void merge(@Nonnull ReadableMap source); <add> WritableMap copy(); <ide> } <ide><path>ReactAndroid/src/main/java/com/facebook/react/bridge/WritableNativeMap.java <ide> public void merge(@Nonnull ReadableMap source) { <ide> mergeNativeMap((ReadableNativeMap) source); <ide> } <ide> <add> @Override <add> public WritableMap copy() { <add> final WritableNativeMap target = new WritableNativeMap(); <add> target.merge(this); <add> return target; <add> } <add> <ide> public WritableNativeMap() { <ide> super(initHybrid()); <ide> } <ide><path>ReactAndroid/src/main/java/com/facebook/react/jstasks/HeadlessJsTaskConfig.java <ide> public class HeadlessJsTaskConfig { <ide> private final WritableMap mData; <ide> private final long mTimeout; <ide> private final boolean mAllowedInForeground; <add> private final HeadlessJsTaskRetryPolicy mRetryPolicy; <ide> <ide> /** <ide> * Create a HeadlessJsTaskConfig. Equivalent to calling <ide> public HeadlessJsTaskConfig( <ide> WritableMap data, <ide> long timeout, <ide> boolean allowedInForeground) { <add> this(taskKey, data, timeout, allowedInForeground, NoRetryPolicy.INSTANCE); <add> } <add> <add> /** <add> * Create a HeadlessJsTaskConfig. <add> * <add> * @param taskKey the key for the JS task to execute. This is the same key that you call {@code <add> * AppRegistry.registerTask} with in JS. <add> * @param data a map of parameters passed to the JS task executor. <add> * @param timeout the amount of time (in ms) after which the React instance should be terminated <add> * regardless of whether the task has completed or not. This is meant as a safeguard against <add> * accidentally keeping the device awake for long periods of time because JS crashed or some <add> * request timed out. A value of 0 means no timeout (should only be used for long-running tasks <add> * such as music playback). <add> * @param allowedInForeground whether to allow this task to run while the app is in the foreground <add> * (i.e. there is a host in resumed mode for the current ReactContext). Only set this to true if <add> * you really need it. Note that tasks run in the same JS thread as UI code, so doing expensive <add> * operations would degrade user experience. <add> * @param retryPolicy the number of times & delays the task should be retried on error. <add> */ <add> public HeadlessJsTaskConfig( <add> String taskKey, <add> WritableMap data, <add> long timeout, <add> boolean allowedInForeground, <add> HeadlessJsTaskRetryPolicy retryPolicy) { <ide> mTaskKey = taskKey; <ide> mData = data; <ide> mTimeout = timeout; <ide> mAllowedInForeground = allowedInForeground; <add> mRetryPolicy = retryPolicy; <add> } <add> <add> public HeadlessJsTaskConfig(HeadlessJsTaskConfig source) { <add> mTaskKey = source.mTaskKey; <add> mData = source.mData.copy(); <add> mTimeout = source.mTimeout; <add> mAllowedInForeground = source.mAllowedInForeground; <add> <add> final HeadlessJsTaskRetryPolicy retryPolicy = source.mRetryPolicy; <add> if (retryPolicy != null) { <add> mRetryPolicy = retryPolicy.copy(); <add> } else { <add> mRetryPolicy = null; <add> } <ide> } <ide> <ide> /* package */ String getTaskKey() { <ide> public HeadlessJsTaskConfig( <ide> /* package */ boolean isAllowedInForeground() { <ide> return mAllowedInForeground; <ide> } <add> <add> /* package */ HeadlessJsTaskRetryPolicy getRetryPolicy() { <add> return mRetryPolicy; <add> } <ide> } <ide><path>ReactAndroid/src/main/java/com/facebook/react/jstasks/HeadlessJsTaskContext.java <ide> <ide> package com.facebook.react.jstasks; <ide> <del>import java.lang.ref.WeakReference; <del>import java.util.Set; <del>import java.util.WeakHashMap; <del>import java.util.concurrent.CopyOnWriteArraySet; <del>import java.util.concurrent.atomic.AtomicInteger; <del> <ide> import android.os.Handler; <ide> import android.util.SparseArray; <ide> <ide> import com.facebook.react.common.LifecycleState; <ide> import com.facebook.react.modules.appregistry.AppRegistry; <ide> <add>import java.lang.ref.WeakReference; <add>import java.util.Map; <add>import java.util.Set; <add>import java.util.WeakHashMap; <add>import java.util.concurrent.ConcurrentHashMap; <add>import java.util.concurrent.CopyOnWriteArraySet; <add>import java.util.concurrent.atomic.AtomicInteger; <add> <ide> /** <ide> * Helper class for dealing with JS tasks. Handles per-ReactContext active task tracking, starting / <ide> * stopping tasks and notifying listeners. <ide> public static HeadlessJsTaskContext getInstance(ReactContext context) { <ide> private final AtomicInteger mLastTaskId = new AtomicInteger(0); <ide> private final Handler mHandler = new Handler(); <ide> private final Set<Integer> mActiveTasks = new CopyOnWriteArraySet<>(); <add> private final Map<Integer, HeadlessJsTaskConfig> mActiveTaskConfigs = new ConcurrentHashMap<>(); <ide> private final SparseArray<Runnable> mTaskTimeouts = new SparseArray<>(); <ide> <ide> private HeadlessJsTaskContext(ReactContext reactContext) { <ide> public boolean hasActiveTasks() { <ide> * @return a unique id representing this task instance. <ide> */ <ide> public synchronized int startTask(final HeadlessJsTaskConfig taskConfig) { <add> final int taskId = mLastTaskId.incrementAndGet(); <add> startTask(taskConfig, taskId); <add> return taskId; <add> } <add> <add> /** <add> * Start a JS task the provided task id. Handles invoking {@link AppRegistry#startHeadlessTask} <add> * and notifying listeners. <add> */ <add> private synchronized void startTask(final HeadlessJsTaskConfig taskConfig, int taskId) { <ide> UiThreadUtil.assertOnUiThread(); <ide> ReactContext reactContext = Assertions.assertNotNull( <ide> mReactContext.get(), <ide> public synchronized int startTask(final HeadlessJsTaskConfig taskConfig) { <ide> "Tried to start task " + taskConfig.getTaskKey() + <ide> " while in foreground, but this is not allowed."); <ide> } <del> final int taskId = mLastTaskId.incrementAndGet(); <ide> mActiveTasks.add(taskId); <add> mActiveTaskConfigs.put(taskId, new HeadlessJsTaskConfig(taskConfig)); <ide> reactContext.getJSModule(AppRegistry.class) <ide> .startHeadlessTask(taskId, taskConfig.getTaskKey(), taskConfig.getData()); <ide> if (taskConfig.getTimeout() > 0) { <ide> public synchronized int startTask(final HeadlessJsTaskConfig taskConfig) { <ide> for (HeadlessJsTaskEventListener listener : mHeadlessJsTaskEventListeners) { <ide> listener.onHeadlessJsTaskStart(taskId); <ide> } <del> return taskId; <add> } <add> <add> /** <add> * Retry a running JS task with a delay. Invokes <add> * {@link HeadlessJsTaskContext#startTask(HeadlessJsTaskConfig, int)} as long as the process does <add> * not get killed. <add> * <add> * @return true if a retry attempt has been posted. <add> */ <add> public synchronized boolean retryTask(final int taskId) { <add> final HeadlessJsTaskConfig sourceTaskConfig = mActiveTaskConfigs.get(taskId); <add> Assertions.assertCondition( <add> sourceTaskConfig != null, <add> "Tried to retrieve non-existent task config with id " + taskId + "."); <add> <add> final HeadlessJsTaskRetryPolicy retryPolicy = sourceTaskConfig.getRetryPolicy(); <add> if (!retryPolicy.canRetry()) { <add> return false; <add> } <add> <add> removeTimeout(taskId); <add> final HeadlessJsTaskConfig taskConfig = new HeadlessJsTaskConfig( <add> sourceTaskConfig.getTaskKey(), <add> sourceTaskConfig.getData(), <add> sourceTaskConfig.getTimeout(), <add> sourceTaskConfig.isAllowedInForeground(), <add> retryPolicy.update() <add> ); <add> <add> final Runnable retryAttempt = new Runnable() { <add> @Override <add> public void run() { <add> startTask(taskConfig, taskId); <add> } <add> }; <add> <add> UiThreadUtil.runOnUiThread(retryAttempt, retryPolicy.getDelay()); <add> return true; <ide> } <ide> <ide> /** <ide> public synchronized void finishTask(final int taskId) { <ide> Assertions.assertCondition( <ide> mActiveTasks.remove(taskId), <ide> "Tried to finish non-existent task with id " + taskId + "."); <del> Runnable timeout = mTaskTimeouts.get(taskId); <del> if (timeout != null) { <del> mHandler.removeCallbacks(timeout); <del> mTaskTimeouts.remove(taskId); <del> } <add> Assertions.assertCondition( <add> mActiveTaskConfigs.remove(taskId) != null, <add> "Tried to remove non-existent task config with id " + taskId + "."); <add> removeTimeout(taskId); <ide> UiThreadUtil.runOnUiThread(new Runnable() { <ide> @Override <ide> public void run() { <ide> public void run() { <ide> }); <ide> } <ide> <add> private void removeTimeout(int taskId) { <add> Runnable timeout = mTaskTimeouts.get(taskId); <add> if (timeout != null) { <add> mHandler.removeCallbacks(timeout); <add> mTaskTimeouts.remove(taskId); <add> } <add> } <add> <ide> /** <ide> * Check if a given task is currently running. A task is stopped if either {@link #finishTask} is <ide> * called or it times out. <ide><path>ReactAndroid/src/main/java/com/facebook/react/jstasks/HeadlessJsTaskRetryPolicy.java <add>package com.facebook.react.jstasks; <add> <add>import javax.annotation.CheckReturnValue; <add> <add>public interface HeadlessJsTaskRetryPolicy { <add> <add> boolean canRetry(); <add> <add> int getDelay(); <add> <add> @CheckReturnValue <add> HeadlessJsTaskRetryPolicy update(); <add> <add> HeadlessJsTaskRetryPolicy copy(); <add> <add>} <ide><path>ReactAndroid/src/main/java/com/facebook/react/jstasks/LinearCountingRetryPolicy.java <add>package com.facebook.react.jstasks; <add> <add>public class LinearCountingRetryPolicy implements HeadlessJsTaskRetryPolicy { <add> <add> private final int mRetryAttempts; <add> private final int mDelayBetweenAttemptsInMs; <add> <add> public LinearCountingRetryPolicy(int retryAttempts, int delayBetweenAttemptsInMs) { <add> mRetryAttempts = retryAttempts; <add> mDelayBetweenAttemptsInMs = delayBetweenAttemptsInMs; <add> } <add> <add> @Override <add> public boolean canRetry() { <add> return mRetryAttempts > 0; <add> } <add> <add> @Override <add> public int getDelay() { <add> return mDelayBetweenAttemptsInMs; <add> } <add> <add> @Override <add> public HeadlessJsTaskRetryPolicy update() { <add> final int remainingRetryAttempts = mRetryAttempts - 1; <add> <add> if (remainingRetryAttempts > 0) { <add> return new LinearCountingRetryPolicy(remainingRetryAttempts, mDelayBetweenAttemptsInMs); <add> } else { <add> return NoRetryPolicy.INSTANCE; <add> } <add> } <add> <add> @Override <add> public HeadlessJsTaskRetryPolicy copy() { <add> return new LinearCountingRetryPolicy(mRetryAttempts, mDelayBetweenAttemptsInMs); <add> } <add>} <ide><path>ReactAndroid/src/main/java/com/facebook/react/jstasks/NoRetryPolicy.java <add>package com.facebook.react.jstasks; <add> <add>public class NoRetryPolicy implements HeadlessJsTaskRetryPolicy { <add> <add> public static final NoRetryPolicy INSTANCE = new NoRetryPolicy(); <add> <add> private NoRetryPolicy() { <add> } <add> <add> @Override <add> public boolean canRetry() { <add> return false; <add> } <add> <add> @Override <add> public int getDelay() { <add> throw new IllegalStateException("Should not retrieve delay as canRetry is: " + canRetry()); <add> } <add> <add> @Override <add> public HeadlessJsTaskRetryPolicy update() { <add> throw new IllegalStateException("Should not update as canRetry is: " + canRetry()); <add> } <add> <add> @Override <add> public HeadlessJsTaskRetryPolicy copy() { <add> // Class is immutable so no need to copy <add> return this; <add> } <add>} <ide><path>ReactAndroid/src/main/java/com/facebook/react/modules/core/HeadlessJsTaskSupportModule.java <ide> package com.facebook.react.modules.core; <ide> <ide> import com.facebook.common.logging.FLog; <add>import com.facebook.react.bridge.Promise; <ide> import com.facebook.react.bridge.ReactApplicationContext; <ide> import com.facebook.react.bridge.ReactContextBaseJavaModule; <ide> import com.facebook.react.bridge.ReactMethod; <ide> public String getName() { <ide> return NAME; <ide> } <ide> <add> @ReactMethod <add> public void notifyTaskRetry(int taskId, Promise promise) { <add> HeadlessJsTaskContext headlessJsTaskContext = <add> HeadlessJsTaskContext.getInstance(getReactApplicationContext()); <add> if (headlessJsTaskContext.isTaskRunning(taskId)) { <add> final boolean retryPosted = headlessJsTaskContext.retryTask(taskId); <add> promise.resolve(retryPosted); <add> } else { <add> FLog.w( <add> HeadlessJsTaskSupportModule.class, <add> "Tried to retry non-active task with id %d. Did it time out?", <add> taskId); <add> promise.resolve(false); <add> } <add> } <add> <ide> @ReactMethod <ide> public void notifyTaskFinished(int taskId) { <ide> HeadlessJsTaskContext headlessJsTaskContext =
14
Ruby
Ruby
add homebrew-binary to search
583dd38b0887f8121cdd3ac65449665542987f6a
<ide><path>Library/Homebrew/cmd/search.rb <ide> def search <ide> %w{Homebrew science}, <ide> %w{Homebrew completions}, <ide> %w{Homebrew x11}, <add> %w{Homebrew binary}, <ide> ] <ide> <ide> def query_regexp(query)
1
Python
Python
add some more test for verify_hostname
01117eead5425c429c2ee3ba0dba1b4080871c67
<ide><path>libcloud/test/test_httplib_ssl.py <ide> def test_custom_ca_path_using_env_var_exist(self): <ide> self.assertEqual(libcloud.security.CA_CERTS_PATH, [file_path]) <ide> <ide> def test_verify_hostname(self): <add> # commonName <ide> cert1 = {'notAfter': 'Feb 16 16:54:50 2013 GMT', <ide> 'subject': ((('countryName', 'US'),), <ide> (('stateOrProvinceName', 'Delaware'),), <ide> def test_verify_hostname(self): <ide> (('organizationalUnitName', 'SSL'),), <ide> (('commonName', 'somemachine.python.org'),))} <ide> <add> # commonName <ide> cert2 = {'notAfter': 'Feb 16 16:54:50 2013 GMT', <ide> 'subject': ((('countryName', 'US'),), <ide> (('stateOrProvinceName', 'Delaware'),), <ide> def test_verify_hostname(self): <ide> 'subjectAltName': ((('DNS', 'foo.alt.name')), <ide> (('DNS', 'foo.alt.name.1')))} <ide> <add> # commonName <ide> cert3 = {'notAfter': 'Feb 16 16:54:50 2013 GMT', <ide> 'subject': ((('countryName', 'US'),), <ide> (('stateOrProvinceName', 'Delaware'),), <ide> def test_verify_hostname(self): <ide> (('organizationalUnitName', 'SSL'),), <ide> (('commonName', 'python.org'),))} <ide> <add> # wildcard commonName <ide> cert4 = {'notAfter': 'Feb 16 16:54:50 2013 GMT', <ide> 'subject': ((('countryName', 'US'),), <ide> (('stateOrProvinceName', 'Delaware'),), <ide> def test_verify_hostname(self): <ide> hostname='us-east-1.api.joyentcloud.com', cert=cert4)) <ide> self.assertTrue(self.httplib_object._verify_hostname( <ide> hostname='useast-1.api.joyentcloud.com', cert=cert4)) <add> self.assertFalse(self.httplib_object._verify_hostname( <add> hostname='t1.useast-1.api.joyentcloud.com', cert=cert4)) <add> self.assertFalse(self.httplib_object._verify_hostname( <add> hostname='ponies.useast-1.api.joyentcloud.com', cert=cert4)) <add> self.assertFalse(self.httplib_object._verify_hostname( <add> hostname='api.useast-1.api.joyentcloud.com', cert=cert4)) <ide> <ide> def test_get_subject_alt_names(self): <ide> cert1 = {'notAfter': 'Feb 16 16:54:50 2013 GMT',
1
Java
Java
fix concat to allow multiple observers
21f7d526ad38506c0635576f54cc710c2e78a7f7
<ide><path>rxjava-core/src/main/java/rx/operators/OperationConcat.java <ide> public static <T> OnSubscribeFunc<T> concat(final Iterable<? extends Observable< <ide> } <ide> <ide> public static <T> OnSubscribeFunc<T> concat(final Observable<? extends Observable<? extends T>> sequences) { <del> return new Concat<T>(sequences); <add> return new OnSubscribeFunc<T>() { <add> @Override <add> public Subscription onSubscribe(Observer<? super T> t1) { <add> return new Concat<T>(sequences).onSubscribe(t1); <add> } <add> }; <ide> } <ide> <ide> private static class Concat<T> implements OnSubscribeFunc<T> { <ide> public void onNext(Observable<? extends T> nextSequence) { <ide> @Override <ide> public void onError(Throwable e) { <ide> if (completedOrErred.compareAndSet(false, true)) { <del> if (innerSubscription != null) { <del> innerSubscription.unsubscribe(); <add> Subscription q; <add> synchronized (nextSequences) { <add> q = innerSubscription; <add> } <add> if (q != null) { <add> q.unsubscribe(); <ide> } <ide> observer.onError(e); <ide> } <ide> public void onError(Throwable e) { <ide> @Override <ide> public void onCompleted() { <ide> allSequencesReceived.set(true); <del> if (innerSubscription == null) { <add> Subscription q; <add> synchronized (nextSequences) { <add> q = innerSubscription; <add> } <add> if (q == null) { <ide> // We are not subscribed to any sequence, and none are coming anymore <ide> if (completedOrErred.compareAndSet(false, true)) { <ide> observer.onCompleted(); <ide> public void onCompleted() { <ide> return new Subscription() { <ide> @Override <ide> public void unsubscribe() { <add> Subscription q; <ide> synchronized (nextSequences) { <del> if (innerSubscription != null) <del> innerSubscription.unsubscribe(); <del> outerSubscription.unsubscribe(); <add> q = innerSubscription; <add> } <add> if (q != null) { <add> q.unsubscribe(); <ide> } <add> outerSubscription.unsubscribe(); <ide> } <ide> }; <ide> } <ide><path>rxjava-core/src/test/java/rx/operators/OperationConcatTest.java <ide> package rx.operators; <ide> <ide> import static org.junit.Assert.*; <del>import static org.mockito.Matchers.*; <ide> import static org.mockito.Mockito.*; <ide> import static rx.operators.OperationConcat.*; <ide> <ide> import rx.Observable; <ide> import rx.Observer; <ide> import rx.Subscription; <add>import rx.concurrency.TestScheduler; <ide> import rx.subscriptions.BooleanSubscription; <ide> <ide> public class OperationConcatTest { <ide> public void run() { <ide> return s; <ide> } <ide> } <add> @Test <add> public void testMultipleObservers() { <add> Observer<Object> o1 = mock(Observer.class); <add> Observer<Object> o2 = mock(Observer.class); <add> <add> TestScheduler s = new TestScheduler(); <add> <add> Observable<Long> timer = Observable.interval(500, TimeUnit.MILLISECONDS, s).take(2); <add> Observable<Long> o = Observable.concat(timer, timer); <add> <add> o.subscribe(o1); <add> o.subscribe(o2); <add> <add> InOrder inOrder1 = inOrder(o1); <add> InOrder inOrder2 = inOrder(o2); <add> <add> s.advanceTimeBy(500, TimeUnit.MILLISECONDS); <add> <add> inOrder1.verify(o1, times(1)).onNext(0L); <add> inOrder2.verify(o2, times(1)).onNext(0L); <add> <add> s.advanceTimeBy(500, TimeUnit.MILLISECONDS); <add> <add> inOrder1.verify(o1, times(1)).onNext(1L); <add> inOrder2.verify(o2, times(1)).onNext(1L); <add> <add> s.advanceTimeBy(500, TimeUnit.MILLISECONDS); <add> <add> inOrder1.verify(o1, times(1)).onNext(0L); <add> inOrder2.verify(o2, times(1)).onNext(0L); <add> <add> s.advanceTimeBy(500, TimeUnit.MILLISECONDS); <add> <add> inOrder1.verify(o1, times(1)).onNext(1L); <add> inOrder2.verify(o2, times(1)).onNext(1L); <add> <add> inOrder1.verify(o1, times(1)).onCompleted(); <add> inOrder2.verify(o2, times(1)).onCompleted(); <add> <add> verify(o1, never()).onError(any(Throwable.class)); <add> verify(o2, never()).onError(any(Throwable.class)); <add> } <ide> }
2
Javascript
Javascript
use weakmaps instead of mutating assets
bfc74252fd42a6134ddf2fe588fe614a29d736ab
<ide><path>lib/SourceMapDevToolPlugin.js <ide> const basename = name => { <ide> return name.substr(name.lastIndexOf("/") + 1); <ide> }; <ide> <add>const assetsCache = new WeakMap(); <add> <ide> const getTaskForFile = (file, chunk, options, compilation) => { <ide> const asset = compilation.assets[file]; <del> if (asset.__SourceMapDevToolFile === file && asset.__SourceMapDevToolData) { <del> const data = asset.__SourceMapDevToolData; <del> for (const cachedFile in data) { <del> compilation.assets[cachedFile] = data[cachedFile]; <add> const cache = assetsCache.get(asset); <add> if (cache && cache.file === file) { <add> for (const cachedFile in cache.assets) { <add> compilation.assets[cachedFile] = cache.assets[cachedFile]; <ide> if (cachedFile !== file) chunk.files.push(cachedFile); <ide> } <ide> return; <ide> class SourceMapDevToolPlugin { <ide> task.file, <ide> "attach SourceMap" <ide> ); <add> const assets = Object.create(null); <ide> const chunk = task.chunk; <ide> const file = task.file; <ide> const asset = task.asset; <ide> class SourceMapDevToolPlugin { <ide> } <ide> sourceMap.sourceRoot = options.sourceRoot || ""; <ide> sourceMap.file = file; <del> asset.__SourceMapDevToolFile = file; <del> asset.__SourceMapDevToolData = {}; <add> assetsCache.set(asset, { file, assets }); <ide> let currentSourceMappingURLComment = sourceMappingURLComment; <ide> if ( <ide> currentSourceMappingURLComment !== false && <ide> class SourceMapDevToolPlugin { <ide> .relative(path.dirname(file), sourceMapFile) <ide> .replace(/\\/g, "/"); <ide> if (currentSourceMappingURLComment !== false) { <del> asset.__SourceMapDevToolData[file] = compilation.assets[ <del> file <del> ] = new ConcatSource( <add> assets[file] = compilation.assets[file] = new ConcatSource( <ide> new RawSource(source), <ide> currentSourceMappingURLComment.replace( <ide> /\[url\]/g, <ide> sourceMapUrl <ide> ) <ide> ); <ide> } <del> asset.__SourceMapDevToolData[sourceMapFile] = compilation.assets[ <add> assets[sourceMapFile] = compilation.assets[ <ide> sourceMapFile <ide> ] = new RawSource(sourceMapString); <ide> chunk.files.push(sourceMapFile); <ide> } else { <del> asset.__SourceMapDevToolData[file] = compilation.assets[ <del> file <del> ] = new ConcatSource( <add> assets[file] = compilation.assets[file] = new ConcatSource( <ide> new RawSource(source), <ide> currentSourceMappingURLComment <ide> .replace(/\[map\]/g, () => sourceMapString)
1
PHP
PHP
improve comments in db\connector
00fcaf2be9dbdb9de5ae474754eaf60770c71739
<ide><path>system/db/connector.php <ide> public static function connect($config) <ide> { <ide> // ----------------------------------------------------- <ide> // Check the application/db directory first. <add> // <add> // If the database doesn't exist there, maybe the full <add> // path was specified as the database name? <ide> // ----------------------------------------------------- <ide> if (file_exists($path = APP_PATH.'db/'.$config->database.'.sqlite')) <ide> { <ide> return new \PDO('sqlite:'.$path, null, null, static::$options); <ide> } <del> // ----------------------------------------------------- <del> // Is the database name the full path? <del> // ----------------------------------------------------- <ide> elseif (file_exists($config->database)) <ide> { <ide> return new \PDO('sqlite:'.$config->database, null, null, static::$options);
1
Javascript
Javascript
improve coverage of stream.readable
fc6e7e93e81aa70d55b374bd25a0d46f0f0523e3
<ide><path>test/parallel/test-streams-highwatermark.js <ide> const { inspect } = require('util'); <ide> readable._read = common.mustCall(); <ide> readable.read(0); <ide> } <add> <add>{ <add> // Parse size as decimal integer <add> ['1', '1.0', 1].forEach((size) => { <add> const readable = new stream.Readable({ <add> read: common.mustCall(), <add> highWaterMark: 0, <add> }); <add> readable.read(size); <add> <add> assert.strictEqual(readable._readableState.highWaterMark, Number(size)); <add> }); <add>}
1
Javascript
Javascript
change some constructs to es2015
be40f6da65da88ec876d6258ab353af832372d65
<ide><path>lib/EnvironmentPlugin.js <ide> const DefinePlugin = require("./DefinePlugin"); <ide> <ide> class EnvironmentPlugin { <ide> constructor(keys) { <del> this.defaultValues = {}; <del> <ide> if(Array.isArray(keys)) { <ide> this.keys = keys; <add> this.defaultValues = {}; <ide> } else if(keys && typeof keys === "object") { <ide> this.keys = Object.keys(keys); <ide> this.defaultValues = keys; <ide> } else { <ide> this.keys = Array.prototype.slice.call(arguments); <add> this.defaultValues = {}; <ide> } <ide> } <ide> <ide> apply(compiler) { <del> const definitions = this.keys.reduce(function(defs, key) { <add> const definitions = this.keys.reduce((defs, key) => { <ide> const value = process.env[key] || this.defaultValues[key]; <ide> <ide> if(value === undefined) { <del> compiler.plugin("this-compilation", function(compilation) { <add> compiler.plugin("this-compilation", compilation => { <ide> const error = new Error( <del> "EnvironmentPlugin - " + key + " environment variable is undefined. \n\n" + <del> "You can pass an object with default values to suppress this warning. \n" + <del> "See https://webpack.js.org/plugins/environment-plugin for example." <add> `EnvironmentPlugin - ${key} environment variable is undefined.\n\n` + <add> `You can pass an object with default values to suppress this warning.\n` + <add> `See https://webpack.js.org/plugins/environment-plugin for example.` <ide> ); <ide> <ide> error.name = "EnvVariableNotDefinedError"; <ide> compilation.warnings.push(error); <ide> }); <ide> } <ide> <del> defs["process.env." + key] = value === undefined ? "undefined" : JSON.stringify(value); <add> defs[`process.env.${key}`] = value === undefined ? "undefined" : JSON.stringify(value); <ide> <ide> return defs; <del> }.bind(this), {}); <add> }, {}); <ide> <ide> compiler.apply(new DefinePlugin(definitions)); <ide> }
1
Ruby
Ruby
remove redundant check for is_a?(string)
4c4d5c7e6c028c6a29701abb0f64231ff284a466
<ide><path>actionpack/lib/action_view/path_set.rb <ide> def find(*args) <ide> end <ide> <ide> def find_all(path, prefixes = [], *args) <del> prefixes = Array.wrap(prefixes) if String === prefixes <add> prefixes = Array.wrap(prefixes) <ide> prefixes.each do |prefix| <ide> each do |resolver| <ide> templates = resolver.find_all(path, prefix, *args) <ide><path>actionpack/lib/action_view/template/error.rb <ide> class MissingTemplate < ActionViewError #:nodoc: <ide> <ide> def initialize(paths, path, prefixes, partial, details, *) <ide> @path = path <del> prefixes = Array.wrap(prefixes) if String === prefixes <add> prefixes = Array.wrap(prefixes) <ide> display_paths = paths.compact.map{ |p| p.to_s.inspect }.join(", ") <ide> template_type = if partial <ide> "partial"
2
Ruby
Ruby
remove dead code
6ef13ec5d29b3ec66900c60bcfdf1ac5be8d20ac
<ide><path>lib/arel/table.rb <ide> class << self; attr_accessor :engine; end <ide> <ide> def initialize(name, as: nil, type_caster: nil) <ide> @name = name.to_s <del> @columns = nil <ide> @type_caster = type_caster <ide> <ide> # Sometime AR sends an :as parameter to table, to let the table know <ide> def able_to_type_cast? <ide> protected <ide> <ide> attr_reader :type_caster <del> <del> private <del> <del> def attributes_for columns <del> return nil unless columns <del> <del> columns.map do |column| <del> Attributes.for(column).new self, column.name.to_sym <del> end <del> end <del> <ide> end <ide> end
1
Javascript
Javascript
improve getdisplayname util
4d8e9cacdd5b3833ae4a52bfe1ae9ad0a6a3104d
<ide><path>lib/utils.js <ide> export function printAndExit (message, code = 1) { <ide> } <ide> <ide> export function getDisplayName (Component) { <add> if (typeof Component === 'string') { <add> return Component <add> } <add> <ide> return Component.displayName || Component.name || 'Unknown' <ide> } <ide> <ide><path>test/unit/getDisplayName.test.js <add>/* global describe, it, expect */ <add>import { Component } from 'react' <add>import { getDisplayName } from '../../dist/lib/utils' <add> <add>describe('getDisplayName', () => { <add> it('gets the proper display name of a component', () => { <add> class ComponentOne extends Component { <add> render () { <add> return null <add> } <add> } <add> <add> class ComponentTwo extends Component { <add> static displayName = 'CustomDisplayName' <add> render () { <add> return null <add> } <add> } <add> <add> function FunctionalComponent () { <add> return null <add> } <add> <add> expect(getDisplayName(ComponentOne)).toBe('ComponentOne') <add> expect(getDisplayName(ComponentTwo)).toBe('CustomDisplayName') <add> expect(getDisplayName(FunctionalComponent)).toBe('FunctionalComponent') <add> expect(getDisplayName(() => null)).toBe('Unknown') <add> expect(getDisplayName('div')).toBe('div') <add> }) <add>})
2
Text
Text
note the system requirements for v8 tests
d14d9f9edb38fc3140e829da3b3d62c6556179f4
<ide><path>doc/guides/maintaining-V8.md <ide> to be cherry-picked in the Node.js repository and V8-CI must test the change. <ide> * Run the Node.js [V8 CI][] in addition to the [Node.js CI][]. <ide> The CI uses the `test-v8` target in the `Makefile`, which uses <ide> `tools/make-v8.sh` to reconstruct a git tree in the `deps/v8` directory to <del> run V8 tests. <add> run V8 tests<sup>2</sup>. <ide> <ide> The [`git-node`][] tool can be used to simplify this task. Run <ide> `git node v8 backport <sha>` to cherry-pick a commit. <ide> This would require some tooling to: <ide> <sup>1</sup>Node.js 0.12 and older are intentionally omitted from this document <ide> as their support has ended. <ide> <add><sup>2</sup>The V8 tests still require Python 2. To run these tests locally, <add>you can run `PYTHON2 ./configure.py` before running `make test-v8`, in the root <add>of this repository. On macOS, this also requires a full Xcode install, <add>not just the "command line tools" for Xcode. <add> <ide> [ChromiumReleaseCalendar]: https://www.chromium.org/developers/calendar <ide> [Node.js CI]: https://ci.nodejs.org/job/node-test-pull-request/ <ide> [Node.js `canary` branch]: https://github.com/nodejs/node-v8/tree/canary
1
PHP
PHP
add one more test for a reported problem
74c76d48acd35a1a00b9d49201b589ceffadfa23
<ide><path>tests/TestCase/ORM/TableTest.php <ide> class TableTest extends TestCase <ide> { <ide> <ide> public $fixtures = [ <del> 'core.users', 'core.categories', 'core.articles', 'core.authors', <del> 'core.tags', 'core.articles_tags' <add> 'core.comments', <add> 'core.users', <add> 'core.categories', <add> 'core.articles', <add> 'core.authors', <add> 'core.tags', <add> 'core.articles_tags' <ide> ]; <ide> <ide> /** <ide> public function testHasMany() <ide> $this->assertSame($table, $hasMany->source()); <ide> } <ide> <add> /** <add> * testHasManyWithClassName <add> * <add> * @return void <add> */ <add> public function testHasManyWithClassName() <add> { <add> $table = TableRegistry::get('Articles'); <add> $table->hasMany('Comments', [ <add> 'className' => 'Comments', <add> 'conditions' => ['published' => 'Y'], <add> ]); <add> <add> $table->hasMany('UnapprovedComments', [ <add> 'className' => 'Comments', <add> 'conditions' => ['published' => 'N'], <add> 'propertyName' => 'unaproved_comments' <add> ]); <add> <add> $expected = [ <add> 'id' => 1, <add> 'title' => 'First Article', <add> 'unaproved_comments' => [ <add> [ <add> 'id' => 4, <add> 'article_id' => 1, <add> 'comment' => 'Fourth Comment for First Article' <add> ] <add> ], <add> 'comments' => [ <add> [ <add> 'id' => 1, <add> 'article_id' => 1, <add> 'comment' => 'First Comment for First Article' <add> ], <add> [ <add> 'id' => 2, <add> 'article_id' => 1, <add> 'comment' => 'Second Comment for First Article' <add> ], <add> [ <add> 'id' => 3, <add> 'article_id' => 1, <add> 'comment' => 'Third Comment for First Article' <add> ] <add> ] <add> ]; <add> $result = $table->find() <add> ->select(['id', 'title']) <add> ->contain([ <add> 'Comments' => ['fields' => ['id', 'article_id', 'comment']], <add> 'UnapprovedComments' => ['fields' => ['id', 'article_id', 'comment']] <add> ]) <add> ->where(['id' => 1]) <add> ->first(); <add> <add> $this->assertSame($expected, $result->toArray()); <add> } <add> <ide> /** <ide> * Ensure associations use the plugin-prefixed model <ide> *
1
PHP
PHP
add a session helper
60065df712ec06aefdd2b6639318af2ae99d9104
<ide><path>src/Illuminate/Foundation/helpers.php <ide> function secure_url($path, $parameters = array()) <ide> } <ide> } <ide> <add>if ( ! function_exists('session')) <add>{ <add> /** <add> * Get a value from the session. <add> * <add> * @param string $name <add> * @param mixed|null $default <add> * @return mixed <add> */ <add> function session($name, $default = null) <add> { <add> return app('session')->get($name, $default); <add> } <add>} <add> <ide> if ( ! function_exists('storage_path')) <ide> { <ide> /**
1
Python
Python
remove duplicate decorator in wrong place
7d59e50d87d260c9459cbc890e3bce0592dd5f99
<ide><path>celery/bin/events.py <ide> def _run_evtop(app): <ide> raise click.UsageError("The curses module is required for this command.") <ide> <ide> <del>@handle_preload_options <ide> @click.command(cls=CeleryDaemonCommand) <ide> @click.option('-d', <ide> '--dump',
1
Go
Go
use string concatenation instead of sprintf
44c9ccc069c1bfda25ad6fb5932eeeceaa8de491
<ide><path>client/client.go <ide> func (cli *Client) getAPIPath(p string, query url.Values) string { <ide> var apiPath string <ide> if cli.version != "" { <ide> v := strings.TrimPrefix(cli.version, "v") <del> apiPath = fmt.Sprintf("%s/v%s%s", cli.basePath, v, p) <add> apiPath = cli.basePath + "/v" + v + p <ide> } else { <del> apiPath = fmt.Sprintf("%s%s", cli.basePath, p) <add> apiPath = cli.basePath + p <ide> } <ide> <ide> u := &url.URL{ <ide><path>client/container_copy.go <ide> func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path stri <ide> query := url.Values{} <ide> query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. <ide> <del> urlStr := fmt.Sprintf("/containers/%s/archive", containerID) <add> urlStr := "/containers/" + containerID + "/archive" <ide> response, err := cli.head(ctx, urlStr, query, nil) <ide> if err != nil { <ide> return types.ContainerPathStat{}, err <ide> func (cli *Client) CopyToContainer(ctx context.Context, container, path string, <ide> query.Set("copyUIDGID", "true") <ide> } <ide> <del> apiPath := fmt.Sprintf("/containers/%s/archive", container) <add> apiPath := "/containers/" + container + "/archive" <ide> <ide> response, err := cli.putRaw(ctx, apiPath, query, content, nil) <ide> if err != nil { <ide> func (cli *Client) CopyFromContainer(ctx context.Context, container, srcPath str <ide> query := make(url.Values, 1) <ide> query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. <ide> <del> apiPath := fmt.Sprintf("/containers/%s/archive", container) <add> apiPath := "/containers/" + container + "/archive" <ide> response, err := cli.get(ctx, apiPath, query, nil) <ide> if err != nil { <ide> return nil, types.ContainerPathStat{}, err <ide><path>client/ping.go <ide> package client <ide> <ide> import ( <del> "fmt" <del> <ide> "github.com/docker/docker/api/types" <ide> "golang.org/x/net/context" <ide> ) <ide> <ide> // Ping pings the server and returns the value of the "Docker-Experimental", "OS-Type" & "API-Version" headers <ide> func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { <ide> var ping types.Ping <del> req, err := cli.buildRequest("GET", fmt.Sprintf("%s/_ping", cli.basePath), nil, nil) <add> req, err := cli.buildRequest("GET", cli.basePath+"/_ping", nil, nil) <ide> if err != nil { <ide> return ping, err <ide> } <ide><path>client/plugin_upgrade.go <ide> package client <ide> <ide> import ( <del> "fmt" <ide> "io" <ide> "net/url" <ide> <ide> func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types <ide> <ide> func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) { <ide> headers := map[string][]string{"X-Registry-Auth": {registryAuth}} <del> return cli.post(ctx, fmt.Sprintf("/plugins/%s/upgrade", name), query, privileges, headers) <add> return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, headers) <ide> }
4
Javascript
Javascript
fix comparison in _handlelinkedcheckchange
91ef878ca864a74571f525dd3b14445ef059fec4
<ide><path>src/dom/components/LinkedValueUtils.js <ide> function _handleLinkedValueChange(e) { <ide> */ <ide> function _handleLinkedCheckChange(e) { <ide> /*jshint validthis:true */ <del> this.props.checkedLink.requestChange(e.target.checked === 'true'); <add> this.props.checkedLink.requestChange(e.target.checked); <ide> } <ide> <ide> /**
1
Python
Python
add import for textlexer.
e704dd2e404b31f77831eebbb44aa6042a42b332
<ide><path>rest_framework/compat.py <ide> def apply_markdown(text): <ide> <ide> try: <ide> import pygments <del> from pygments.lexers import get_lexer_by_name <add> from pygments.lexers import get_lexer_by_name, TextLexer <ide> from pygments.formatters import HtmlFormatter <ide> <ide> def pygments_highlight(text, lang, style):
1
Mixed
Python
fix none uuid foreignkey serialization
20d1fdba697a590307ef65626bfb574a0c92bc96
<ide><path>docs/topics/release-notes.md <ide> You can determine your currently installed version using `pip freeze`: <ide> **Unreleased** <ide> <ide> * Dropped support for EOL Django 1.7 ([#3933][gh3933]) <add>* Fixed null foreign keys targeting UUIDField primary keys. ([#3936][gh3936]) <ide> <ide> ### 3.3.2 <ide> <ide><path>rest_framework/fields.py <ide> def to_internal_value(self, data): <ide> return data <ide> <ide> def to_representation(self, value): <add> if value is None: <add> return None <ide> if self.uuid_format == 'hex_verbose': <ide> return str(value) <ide> else: <ide><path>tests/models.py <ide> from __future__ import unicode_literals <ide> <add>import uuid <add> <ide> from django.db import models <ide> from django.utils.translation import ugettext_lazy as _ <ide> <ide> class ForeignKeyTarget(RESTFrameworkModel): <ide> name = models.CharField(max_length=100) <ide> <ide> <add>class UUIDForeignKeyTarget(RESTFrameworkModel): <add> uuid = models.UUIDField(primary_key=True, default=uuid.uuid4) <add> name = models.CharField(max_length=100) <add> <add> <ide> class ForeignKeySource(RESTFrameworkModel): <ide> name = models.CharField(max_length=100) <ide> target = models.ForeignKey(ForeignKeyTarget, related_name='sources', <ide> class NullableForeignKeySource(RESTFrameworkModel): <ide> on_delete=models.CASCADE) <ide> <ide> <add>class NullableUUIDForeignKeySource(RESTFrameworkModel): <add> name = models.CharField(max_length=100) <add> target = models.ForeignKey(ForeignKeyTarget, null=True, blank=True, <add> related_name='nullable_sources', <add> verbose_name='Optional target object', <add> on_delete=models.CASCADE) <add> <add> <ide> # OneToOne <ide> class OneToOneTarget(RESTFrameworkModel): <ide> name = models.CharField(max_length=100) <ide><path>tests/test_relations_pk.py <ide> from rest_framework import serializers <ide> from tests.models import ( <ide> ForeignKeySource, ForeignKeyTarget, ManyToManySource, ManyToManyTarget, <del> NullableForeignKeySource, NullableOneToOneSource, OneToOneTarget <add> NullableForeignKeySource, NullableOneToOneSource, <add> NullableUUIDForeignKeySource, OneToOneTarget, UUIDForeignKeyTarget <ide> ) <ide> <ide> <ide> class Meta: <ide> fields = ('id', 'name', 'target') <ide> <ide> <add># Nullable UUIDForeignKey <add>class NullableUUIDForeignKeySourceSerializer(serializers.ModelSerializer): <add> target = serializers.PrimaryKeyRelatedField( <add> pk_field=serializers.UUIDField(), <add> queryset=UUIDForeignKeyTarget.objects.all(), <add> allow_empty=True) <add> <add> class Meta: <add> model = NullableUUIDForeignKeySource <add> fields = ('id', 'name', 'target') <add> <add> <ide> # Nullable OneToOne <ide> class NullableOneToOneTargetSerializer(serializers.ModelSerializer): <ide> class Meta: <ide> def test_foreign_key_update_with_valid_emptystring(self): <ide> ] <ide> self.assertEqual(serializer.data, expected) <ide> <add> def test_null_uuid_foreign_key_serializes_as_none(self): <add> source = NullableUUIDForeignKeySource(name='Source') <add> serializer = NullableUUIDForeignKeySourceSerializer(source) <add> data = serializer.data <add> self.assertEqual(data["target"], None) <add> <ide> <ide> class PKNullableOneToOneTests(TestCase): <ide> def setUp(self):
4
Javascript
Javascript
fix segfault in pummel/test-tls-ci-reneg-attack
c492d43f4817eb65cd251749bd074df7bb4ebc28
<ide><path>lib/tls.js <ide> function onhandshakestart() { <ide> // Defer the error event to the next tick. We're being called from OpenSSL's <ide> // state machine and OpenSSL is not re-entrant. We cannot allow the user's <ide> // callback to destroy the connection right now, it would crash and burn. <del> process.nextTick(function() { <add> setImmediate(function() { <ide> var err = new Error('TLS session renegotiation attack detected.'); <ide> if (self.cleartext) self.cleartext.emit('error', err); <ide> });
1
Mixed
Javascript
add split chunks example
5930cf54b92e6a839bf75f42b1eb32ace793b916
<ide><path>examples/many-pages/README.md <add># Info <add> <add>This example illustrates webpack's algorthim for automatic deduplication using `optimization.splitChunks`. <add> <add>This example application contains 7 pages, each of them importing 1-3 modules from the `node_modules` folder (vendor libs) and 0-3 modules from the `stuff` folder (application modules). In reallity an application is probably more complex, but the same mechanisms apply. <add> <add>The following configuration is used: <add> <add>* `optimization.splitChunks.chunks: "all"` - This opt-in into automatic splitting of initial chunks which is off by default <add>* `optimization.splitChunks.maxInitial/AsyncRequests: 20` - This opt-in into a HTTP2 optimized splitting mode by increasing the allowed amount of requests. Browser only supports 6 requests in parallel for HTTP1.1. <add> <add># Interpreting the result <add> <add>* `pageA.js` the normal output files for the entrypoint `pageA` <add>* `vendors~pageD~pageE~pageF~pageG.js` vendor libs shared by these pages extracted into a separate output file when larger then the threshold in size <add>* `vendors~pageA.js` vendors only used by a single page but larger than the threshold in size <add>* `pageA~pageD~pageF.js` application modules shared by these pages and larger than the threshold in size <add> <add>The threshold is here 40 bytes, but by default (in a real application) 30kb. <add> <add>Some modules are intentially duplicated, i. e. `./stuff/s4.js` is shared by `pageA` and `pageC`, but it's the only shared module so no separate output file is created because it would be smaller than the threshold. A separate request (which comes with an overhead and worsen gzipping) is not worth the extra bytes. <add> <add>Note: decreasing `maxInitial/AsyncRequest` will increase duplication further to reduce the number of requests. Duplication doesn't affect initial page load, it only affects download size of navigations to other pages of the application. <add> <add>## webpack.config.js <add> <add>``` <add>module.exports = { <add> // mode: "development || "production", <add> entry: { <add> pageA: "./pages/a", <add> pageB: "./pages/b", <add> pageC: "./pages/c", <add> pageD: "./pages/d", <add> pageE: "./pages/e", <add> pageF: "./pages/f", <add> pageG: "./pages/g" <add> }, <add> optimization: { <add> splitChunks: { <add> chunks: "all", <add> maxInitialRequests: 20, // for HTTP2 <add> maxAsyncRequests: 20, // for HTTP2 <add> minSize: 40 // for example only: choosen to match 2 modules <add> // omit minSize in real use case to use the default of 30kb <add> } <add> } <add>}; <add>``` <add> <add>## Production mode <add> <add>``` <add>Hash: 0a1b2c3d4e5f6a7b8c9d <add>Version: webpack 4.8.3 <add> Asset Size Chunks Chunk Names <add> pageG.js 1.15 KiB 7 [emitted] pageG <add>vendors~pageD~pageE~pageF~pageG.js 119 bytes 0 [emitted] vendors~pageD~pageE~pageF~pageG <add> vendors~pageD~pageE~pageF.js 178 bytes 2 [emitted] vendors~pageD~pageE~pageF <add> vendors~pageA~pageB~pageC.js 180 bytes 3 [emitted] vendors~pageA~pageB~pageC <add> vendors~pageC.js 121 bytes 4 [emitted] vendors~pageC <add> vendors~pageB.js 121 bytes 5 [emitted] vendors~pageB <add> vendors~pageA.js 121 bytes 6 [emitted] vendors~pageA <add> pageA~pageD~pageF.js 156 bytes 1 [emitted] pageA~pageD~pageF <add> pageF.js 1.18 KiB 8 [emitted] pageF <add> pageE.js 1.17 KiB 9 [emitted] pageE <add> pageD.js 1.18 KiB 10 [emitted] pageD <add> pageC.js 1.27 KiB 11 [emitted] pageC <add> pageB.js 1.27 KiB 12 [emitted] pageB <add> pageA.js 1.18 KiB 13 [emitted] pageA <add>Entrypoint pageA = vendors~pageA~pageB~pageC.js vendors~pageA.js pageA~pageD~pageF.js pageA.js <add>Entrypoint pageB = vendors~pageA~pageB~pageC.js vendors~pageB.js pageB.js <add>Entrypoint pageC = vendors~pageA~pageB~pageC.js vendors~pageC.js pageC.js <add>Entrypoint pageD = vendors~pageD~pageE~pageF~pageG.js vendors~pageD~pageE~pageF.js pageA~pageD~pageF.js pageD.js <add>Entrypoint pageE = vendors~pageD~pageE~pageF~pageG.js vendors~pageD~pageE~pageF.js pageE.js <add>Entrypoint pageF = vendors~pageD~pageE~pageF~pageG.js vendors~pageD~pageE~pageF.js pageA~pageD~pageF.js pageF.js <add>Entrypoint pageG = vendors~pageD~pageE~pageF~pageG.js pageG.js <add>chunk {0} vendors~pageD~pageE~pageF~pageG.js (vendors~pageD~pageE~pageF~pageG) 43 bytes ={1}= ={10}= ={2}= ={7}= ={8}= ={9}= [initial] [rendered] split chunk (cache group: vendors) (name: vendors~pageD~pageE~pageF~pageG) <add> > ./pages/d pageD <add> > ./pages/e pageE <add> > ./pages/f pageF <add> > ./pages/g pageG <add> 1 module <add>chunk {1} pageA~pageD~pageF.js (pageA~pageD~pageF) 62 bytes ={0}= ={10}= ={13}= ={2}= ={3}= ={6}= ={8}= [initial] [rendered] split chunk (cache group: default) (name: pageA~pageD~pageF) <add> > ./pages/a pageA <add> > ./pages/d pageD <add> > ./pages/f pageF <add> [6] ./stuff/s3.js 31 bytes {1} [built] <add> [7] ./stuff/s2.js 31 bytes {1} [built] <add>chunk {2} vendors~pageD~pageE~pageF.js (vendors~pageD~pageE~pageF) 86 bytes ={0}= ={1}= ={10}= ={8}= ={9}= [initial] [rendered] split chunk (cache group: vendors) (name: vendors~pageD~pageE~pageF) <add> > ./pages/d pageD <add> > ./pages/e pageE <add> > ./pages/f pageF <add> 2 modules <add>chunk {3} vendors~pageA~pageB~pageC.js (vendors~pageA~pageB~pageC) 86 bytes ={1}= ={11}= ={12}= ={13}= ={4}= ={5}= ={6}= [initial] [rendered] split chunk (cache group: vendors) (name: vendors~pageA~pageB~pageC) <add> > ./pages/a pageA <add> > ./pages/b pageB <add> > ./pages/c pageC <add> 2 modules <add>chunk {4} vendors~pageC.js (vendors~pageC) 43 bytes ={11}= ={3}= [initial] [rendered] split chunk (cache group: vendors) (name: vendors~pageC) <add> > ./pages/c pageC <add> 1 module <add>chunk {5} vendors~pageB.js (vendors~pageB) 43 bytes ={12}= ={3}= [initial] [rendered] split chunk (cache group: vendors) (name: vendors~pageB) <add> > ./pages/b pageB <add> 1 module <add>chunk {6} vendors~pageA.js (vendors~pageA) 43 bytes ={1}= ={13}= ={3}= [initial] [rendered] split chunk (cache group: vendors) (name: vendors~pageA) <add> > ./pages/a pageA <add> 1 module <add>chunk {7} pageG.js (pageG) 70 bytes ={0}= [entry] [rendered] <add> > ./pages/g pageG <add> [0] ./stuff/s1.js 31 bytes {7} {8} {10} {12} [built] <add> [10] ./pages/g.js 39 bytes {7} [built] <add>chunk {8} pageF.js (pageF) 144 bytes ={0}= ={1}= ={2}= [entry] [rendered] <add> > ./pages/f pageF <add> [0] ./stuff/s1.js 31 bytes {7} {8} {10} {12} [built] <add> [11] ./pages/f.js 113 bytes {8} [built] <add>chunk {9} pageE.js (pageE) 98 bytes ={0}= ={2}= [entry] [rendered] <add> > ./pages/e pageE <add> [4] ./stuff/s7.js 31 bytes {9} {12} [built] <add> [12] ./pages/e.js 67 bytes {9} [built] <add>chunk {10} pageD.js (pageD) 144 bytes ={0}= ={1}= ={2}= [entry] [rendered] <add> > ./pages/d pageD <add> [0] ./stuff/s1.js 31 bytes {7} {8} {10} {12} [built] <add> [13] ./pages/d.js 113 bytes {10} [built] <add>chunk {11} pageC.js (pageC) 206 bytes ={3}= ={4}= [entry] [rendered] <add> > ./pages/c pageC <add> [5] ./stuff/s4.js 31 bytes {11} {13} [built] <add> [14] ./stuff/s6.js 31 bytes {11} [built] <add> [15] ./stuff/s5.js 31 bytes {11} [built] <add> [17] ./pages/c.js 113 bytes {11} [built] <add>chunk {12} pageB.js (pageB) 206 bytes ={3}= ={5}= [entry] [rendered] <add> > ./pages/b pageB <add> [0] ./stuff/s1.js 31 bytes {7} {8} {10} {12} [built] <add> [4] ./stuff/s7.js 31 bytes {9} {12} [built] <add> [18] ./stuff/s8.js 31 bytes {12} [built] <add> [20] ./pages/b.js 113 bytes {12} [built] <add>chunk {13} pageA.js (pageA) 144 bytes ={1}= ={3}= ={6}= [entry] [rendered] <add> > ./pages/a pageA <add> [5] ./stuff/s4.js 31 bytes {11} {13} [built] <add> [22] ./pages/a.js 113 bytes {13} [built] <add>``` <ide>\ No newline at end of file <ide><path>examples/many-pages/build.js <add>global.NO_TARGET_ARGS = true; <add>global.NO_REASONS = true; <add>require("../build-common"); <ide>\ No newline at end of file <ide><path>examples/many-pages/node_modules/m1.js <add>console.log("a module installed from npm"); <ide>\ No newline at end of file <ide><path>examples/many-pages/node_modules/m2.js <add>console.log("a module installed from npm"); <ide>\ No newline at end of file <ide><path>examples/many-pages/node_modules/m3.js <add>console.log("a module installed from npm"); <ide>\ No newline at end of file <ide><path>examples/many-pages/node_modules/m4.js <add>console.log("a module installed from npm"); <ide>\ No newline at end of file <ide><path>examples/many-pages/node_modules/m5.js <add>console.log("a module installed from npm"); <ide>\ No newline at end of file <ide><path>examples/many-pages/node_modules/m6.js <add>console.log("a module installed from npm"); <ide>\ No newline at end of file <ide><path>examples/many-pages/node_modules/m7.js <add>console.log("a module installed from npm"); <ide>\ No newline at end of file <ide><path>examples/many-pages/node_modules/m8.js <add>console.log("a module installed from npm"); <ide>\ No newline at end of file <ide><path>examples/many-pages/pages/a.js <add>import "m1"; <add>import "m2"; <add>import "m3"; <add> <add>import "../stuff/s2"; <add>import "../stuff/s3"; <add>import "../stuff/s4"; <ide><path>examples/many-pages/pages/b.js <add>import "m1"; <add>import "m2"; <add>import "m4"; <add> <add>import "../stuff/s1"; <add>import "../stuff/s7"; <add>import "../stuff/s8"; <ide><path>examples/many-pages/pages/c.js <add>import "m1"; <add>import "m2"; <add>import "m5"; <add> <add>import "../stuff/s4"; <add>import "../stuff/s5"; <add>import "../stuff/s6"; <ide><path>examples/many-pages/pages/d.js <add>import "m6"; <add>import "m7"; <add>import "m8"; <add> <add>import "../stuff/s1"; <add>import "../stuff/s2"; <add>import "../stuff/s3"; <ide><path>examples/many-pages/pages/e.js <add>import "m6"; <add>import "m7"; <add>import "m8"; <add> <add>import "../stuff/s7"; <ide><path>examples/many-pages/pages/f.js <add>import "m6"; <add>import "m7"; <add>import "m8"; <add> <add>import "../stuff/s1"; <add>import "../stuff/s2"; <add>import "../stuff/s3"; <ide><path>examples/many-pages/pages/g.js <add>import "m6"; <add> <add>import "../stuff/s1"; <ide><path>examples/many-pages/stuff/s1.js <add>console.log("some own module"); <ide>\ No newline at end of file <ide><path>examples/many-pages/stuff/s2.js <add>console.log("some own module"); <ide>\ No newline at end of file <ide><path>examples/many-pages/stuff/s3.js <add>console.log("some own module"); <ide>\ No newline at end of file <ide><path>examples/many-pages/stuff/s4.js <add>console.log("some own module"); <ide>\ No newline at end of file <ide><path>examples/many-pages/stuff/s5.js <add>console.log("some own module"); <ide>\ No newline at end of file <ide><path>examples/many-pages/stuff/s6.js <add>console.log("some own module"); <ide>\ No newline at end of file <ide><path>examples/many-pages/stuff/s7.js <add>console.log("some own module"); <ide>\ No newline at end of file <ide><path>examples/many-pages/stuff/s8.js <add>console.log("some own module"); <ide>\ No newline at end of file <ide><path>examples/many-pages/template.md <add># Info <add> <add>This example illustrates webpack's algorthim for automatic deduplication using `optimization.splitChunks`. <add> <add>This example application contains 7 pages, each of them importing 1-3 modules from the `node_modules` folder (vendor libs) and 0-3 modules from the `stuff` folder (application modules). In reallity an application is probably more complex, but the same mechanisms apply. <add> <add>The following configuration is used: <add> <add>* `optimization.splitChunks.chunks: "all"` - This opt-in into automatic splitting of initial chunks which is off by default <add>* `optimization.splitChunks.maxInitial/AsyncRequests: 20` - This opt-in into a HTTP2 optimized splitting mode by increasing the allowed amount of requests. Browser only supports 6 requests in parallel for HTTP1.1. <add> <add># Interpreting the result <add> <add>* `pageA.js` the normal output files for the entrypoint `pageA` <add>* `vendors~pageD~pageE~pageF~pageG.js` vendor libs shared by these pages extracted into a separate output file when larger then the threshold in size <add>* `vendors~pageA.js` vendors only used by a single page but larger than the threshold in size <add>* `pageA~pageD~pageF.js` application modules shared by these pages and larger than the threshold in size <add> <add>The threshold is here 40 bytes, but by default (in a real application) 30kb. <add> <add>Some modules are intentially duplicated, i. e. `./stuff/s4.js` is shared by `pageA` and `pageC`, but it's the only shared module so no separate output file is created because it would be smaller than the threshold. A separate request (which comes with an overhead and worsen gzipping) is not worth the extra bytes. <add> <add>Note: decreasing `maxInitial/AsyncRequest` will increase duplication further to reduce the number of requests. Duplication doesn't affect initial page load, it only affects download size of navigations to other pages of the application. <add> <add>## webpack.config.js <add> <add>``` <add>{{webpack.config.js}} <add>``` <add> <add>## Production mode <add> <add>``` <add>{{production:stdout}} <add>``` <ide>\ No newline at end of file <ide><path>examples/many-pages/webpack.config.js <add>module.exports = { <add> // mode: "development || "production", <add> entry: { <add> pageA: "./pages/a", <add> pageB: "./pages/b", <add> pageC: "./pages/c", <add> pageD: "./pages/d", <add> pageE: "./pages/e", <add> pageF: "./pages/f", <add> pageG: "./pages/g" <add> }, <add> optimization: { <add> splitChunks: { <add> chunks: "all", <add> maxInitialRequests: 20, // for HTTP2 <add> maxAsyncRequests: 20, // for HTTP2 <add> minSize: 40 // for example only: choosen to match 2 modules <add> // omit minSize in real use case to use the default of 30kb <add> } <add> } <add>};
27