hash
stringlengths
40
40
diff
stringlengths
131
114k
message
stringlengths
7
980
project
stringlengths
5
67
split
stringclasses
1 value
a1d6df7cce53ccd24ff1698133fa00c2b505160e
diff --git a/lib/rack/raw_upload.rb b/lib/rack/raw_upload.rb index <HASH>..<HASH> 100644 --- a/lib/rack/raw_upload.rb +++ b/lib/rack/raw_upload.rb @@ -24,6 +24,7 @@ module Rack def convert_and_pass_on(env) tempfile = Tempfile.new('raw-upload.') + tempfile = open(tempfile.path, "r+:BINARY") tempfile << env['rack.input'].read tempfile.flush tempfile.rewind @@ -62,4 +63,4 @@ module Rack !! (Regexp.new(regexp) =~ request_path) end end -end \ No newline at end of file +end
Fix for encoding problem with Ruby <I>. Unfortunately, I haven't been able to build a test for this
New-Bamboo_rack-raw-upload
train
4a35382e8fc896245d64c1374dc127cdc4a51cb3
diff --git a/fuzz.go b/fuzz.go index <HASH>..<HASH> 100644 --- a/fuzz.go +++ b/fuzz.go @@ -109,7 +109,7 @@ func NewFromGoFuzz(data []byte) *Fuzzer { // These functions are called sensibly, e.g., if you wanted custom string // fuzzing, the function `func(s *string, c fuzz.Continue)` would get // called and passed the address of strings. Maps and pointers will always -// be made/new'd for you, ignoring the NilChange option. For slices, it +// be made/new'd for you, ignoring the NilChance option. For slices, it // doesn't make much sense to pre-create them--Fuzzer doesn't know how // long you want your slice--so take a pointer to a slice, and make it // yourself. (If you don't want your map/pointer type pre-made, take a
chore: typo: NilChange -> NilChance (#<I>)
google_gofuzz
train
02c8ae7c109cfb9f346e184a72b562164dba7e03
diff --git a/remoting/src/main/java/hudson/remoting/Engine.java b/remoting/src/main/java/hudson/remoting/Engine.java index <HASH>..<HASH> 100644 --- a/remoting/src/main/java/hudson/remoting/Engine.java +++ b/remoting/src/main/java/hudson/remoting/Engine.java @@ -101,11 +101,12 @@ public class Engine extends Thread { this.tunnel = tunnel; } + @Override public void run() { try { while(true) { - listener.status("Locating Server"); - Exception firstError=null; + listener.status("Locating server among " + candidateUrls); + Throwable firstError=null; String port=null; for (URL url : candidateUrls) { @@ -118,7 +119,10 @@ public class Engine extends Thread { try { con.connect(); } catch (IOException x) { - throw new IOException("Failed to connect to " + salURL + ": " + x.getMessage()).initCause(x); + if (firstError == null) { + firstError = new IOException("Failed to connect to " + salURL + ": " + x.getMessage()).initCause(x); + } + continue; } port = con.getHeaderField("X-Hudson-JNLP-Port"); if(con.getResponseCode()!=200) { @@ -134,6 +138,7 @@ public class Engine extends Thread { // this URL works. From now on, only try this URL hudsonUrl = url; + firstError = null; candidateUrls = Collections.singletonList(hudsonUrl); break; }
Strategy of trying multiple URLs was broken - gave up after the first. git-svn-id: <URL>
jenkinsci_jenkins
train
a22092b13695c44790bb443ac8b536965b1cdba3
diff --git a/daemon/daemon_windows.go b/daemon/daemon_windows.go index <HASH>..<HASH> 100644 --- a/daemon/daemon_windows.go +++ b/daemon/daemon_windows.go @@ -402,7 +402,9 @@ func restoreCustomImage(is image.Store, ls layer.Store, rs reference.Store) erro id, err := is.Create(config) if err != nil { - return err + logrus.Warnf("Failed to restore custom image %s with error: %s.", name, err.Error) + logrus.Warnf("Skipping image %s...", name) + continue } if err := rs.AddTag(ref, id, true); err != nil { diff --git a/daemon/graphdriver/windows/windows.go b/daemon/graphdriver/windows/windows.go index <HASH>..<HASH> 100644 --- a/daemon/graphdriver/windows/windows.go +++ b/daemon/graphdriver/windows/windows.go @@ -449,7 +449,7 @@ func (d *Driver) GetCustomImageInfos() ([]CustomImageInfo, error) { imageData.ID = id // For now, hard code that all base images except nanoserver depend on win32k support - if imageData.Name != "nanoserver" { + if imageData.Name != "NanoServer" { imageData.OSFeatures = append(imageData.OSFeatures, "win32k") }
Fixing nanoserver image load bug. Fixes an issue that prevents nano server images from loading properly. Also updates logic for custom image loading to avoid preventing daemon start because an image failed to load.
containers_storage
train
2dc810c89844e4c7ff9e685e812ef6875495a577
diff --git a/src/Charcoal/Cms/News.php b/src/Charcoal/Cms/News.php index <HASH>..<HASH> 100644 --- a/src/Charcoal/Cms/News.php +++ b/src/Charcoal/Cms/News.php @@ -171,7 +171,7 @@ class News extends Content implements * @throws InvalidArgumentException * @return callable|null Route dispatcher */ - public function handle_route($path, RequestInterface $request, ResponseInterface $response) + public function route_handler($path, RequestInterface $request, ResponseInterface $response) { if (!is_string($path)) { throw new InvalidArgumentExeption(
The Routable method has been renamed (handle_route => route_handler)
locomotivemtl_charcoal-cms
train
612a5afc244a2830698af64bcc8d0801b8883c74
diff --git a/tbapy/main.py b/tbapy/main.py index <HASH>..<HASH> 100644 --- a/tbapy/main.py +++ b/tbapy/main.py @@ -72,27 +72,40 @@ class TBA: """ return APIStatus(self._get('status')) - # TODO: Allow automatic getting of entire team list. - def teams(self, page, year=None, simple=False, keys=False): + def teams(self, page=None, year=None, simple=False, keys=False): """ Get list of teams. :param page: Page of teams to view. Each page contains 500 teams. - :param year: Pass this parameter to view teams from a specific year. + :param year: View teams from a specific year. :param simple: Get only vital data. :param keys: Set to true if you only want the teams' keys rather than full data on them. :return: List of Team objects or string keys. """ - if year: - if keys: - return self._get('teams/%s/%s/keys' % (year, page)) + # If the user has requested a specific page, get that page. + if page is not None: + if year: + if keys: + return self._get('teams/%s/%s/keys' % (year, page)) + else: + return [Team(raw) for raw in self._get('teams/%s/%s%s' % (year, page, '/simple' if simple else ''))] else: - return [Team(raw) for raw in self._get('teams/%s/%s%s' % (year, page, '/simple' if simple else ''))] + if keys: + return self._get('teams/%s/keys' % page) + else: + return [Team(raw) for raw in self._get('teams/%s%s' % (page, '/simple' if simple else ''))] + # If no page was specified, get all of them and combine. else: - if keys: - return self._get('teams/%s/keys' % page) - else: - return [Team(raw) for raw in self._get('teams/%s%s' % (page, '/simple' if simple else ''))] + teams = [] + target = 0 + while True: + page_teams = self.teams(page=target, year=year, simple=simple, keys=keys) + if page_teams: + teams.extend(page_teams) + else: + break + target += 1 + return teams def team(self, team, simple=False): """
Support fetching entire list of teams automatically
frc1418_tbapy
train
be1267ced5ebfd84737fb1869cab18ff5096455a
diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index <HASH>..<HASH> 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -85,7 +85,7 @@ var Defaults = Config{ Recommit: 3 * time.Second, }, TxPool: core.DefaultTxPoolConfig, - RPCGasCap: 25000000, + RPCGasCap: 50000000, GPO: FullNodeGPO, RPCTxFeeCap: 1, // 1 ether }
eth/ethconfig: bump the RPC gas cap to <I>M, since <I> exceeds <I>
ethereum_go-ethereum
train
ea5abc9935d6d9f915f837cdea850268f1df7f29
diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index <HASH>..<HASH> 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -742,6 +742,9 @@ func (w *writer) Close() error { if err := w.driver.createManifest(w.path, w.driver.Container+"/"+w.segmentsPath); err != nil { return err } + if err := w.waitForSegmentsToShowUp(); err != nil { + return err + } } w.closed = true @@ -776,10 +779,14 @@ func (w *writer) Commit() error { } w.committed = true + return w.waitForSegmentsToShowUp() +} +func (w *writer) waitForSegmentsToShowUp() error { var err error waitingTime := readAfterWriteWait endTime := time.Now().Add(readAfterWriteTimeout) + for { var info swift.Object if info, _, err = w.driver.Conn.Object(w.driver.Container, w.driver.swiftPath(w.path)); err == nil {
wait for DLO segments to show up when Close()ing the writer Not just when Commit()ing the result. This fixes some errors I observed when the layer (i.e. the DLO) is Stat()ed immediately after closing, and reports the wrong file size because the container listing is not yet up-to-date.
docker_distribution
train
7387017a422b7c3951a68f54c5e6129a4b05ad58
diff --git a/index.js b/index.js index <HASH>..<HASH> 100644 --- a/index.js +++ b/index.js @@ -109,8 +109,8 @@ function byRevision(a, b) { function * up(t, options) { const latestMigration = (yield current(t, Object.assign({}, options, {revision: undefined, verbose: false}))).pop() || {}; - const latestRevision = latestMigration.revision || 0; - const latestBatch = latestMigration.batch || 0; + const latestRevision = latestMigration.revision || -1; + const latestBatch = latestMigration.batch || -1; const currentBatch = latestBatch + 1; const files = (yield fs.readdir(options.directory))
let filenames start from <I>
floatdrop_migratio
train
0955aff086011d1de46e2f4312c3211f8dde6410
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -10,7 +10,21 @@ setup( license="MIT", description="A Django field that automatically generates random slugs.", long_description="Generates unique random slugs using these characters " \ - "`abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789`.", + "`abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789`. " \ + "See the project page for more information: " \ + "http://github.com/melinko/django-randomslugfield", tests_require=['django'], test_suite='run_tests.main', + classifiers=[ + "Development Status :: 5 - Production/Stable", + "Environment :: Web Environment", + "Framework :: Django", + "Intended Audience :: Developers", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", + "Programming Language :: Python", + "Programming Language :: Python :: 2.6", + "Programming Language :: Python :: 2.7", + "Programming Language :: Python :: 3.3", + ], )
added classifiers to setup.py
mkrjhnsn_django-randomslugfield
train
032f640c819d0b68bbfe7d7e5cf4e471a209df4e
diff --git a/lib/fluent/parser.rb b/lib/fluent/parser.rb index <HASH>..<HASH> 100644 --- a/lib/fluent/parser.rb +++ b/lib/fluent/parser.rb @@ -358,10 +358,22 @@ module Fluent value = (value == '') ? nil : value end if value and @null_value_pattern - value = (@null_value_pattern.match(value)) ? nil : value + value = match(value) ? nil : value end value end + + def match(string) + begin + return @null_value_pattern.match(string) + rescue ArgumentError => e + raise e unless e.message.index("invalid byte sequence in".freeze).zero? + log.info "invalid byte sequence is replaced in `#{string}`" + string = string.scrub('?') + retry + end + return true + end end class TSVParser < ValuesParser
fix. use String#scrub if include invalid byte sequence.
fluent_fluentd
train
4f3f1efaecc6531b2118f5dece7fe05bcebd1830
diff --git a/api/src/main/java/io/minio/MinioClient.java b/api/src/main/java/io/minio/MinioClient.java index <HASH>..<HASH> 100755 --- a/api/src/main/java/io/minio/MinioClient.java +++ b/api/src/main/java/io/minio/MinioClient.java @@ -959,6 +959,8 @@ public class MinioClient { len = 0; } + // Disable default gzip compression by okhttp library. + requestBuilder.header("Accept-Encoding", "identity"); if (method == Method.POST && queryParamMap != null && queryParamMap.containsKey("delete")) { // Fix issue #579: Treat 'Delete Multiple Objects' specially which requires MD5 hash. String[] hashes = Digest.sha256Md5Hashes(data, len);
Set Accept-Encoding to identity by default (#<I>) Fixes #<I>
minio_minio-java
train
2329bf9dc05e58aee6fc0549f2692f19a9b0fdba
diff --git a/lib/msbuild-finder.js b/lib/msbuild-finder.js index <HASH>..<HASH> 100644 --- a/lib/msbuild-finder.js +++ b/lib/msbuild-finder.js @@ -44,9 +44,11 @@ var msBuildFromWhere = function(pathRoot) { return ''; } +var detectMsBuild15Dir = function (pathRoot) { var wherePath = msBuildFromWhere(pathRoot) || ''; if (wherePath.length > 0) { return wherePath; + } var vs2017Path = path.join(pathRoot, 'Microsoft Visual Studio', '2017'); var possibleFolders = ['BuildTools', 'Enterprise', 'Professional', 'Community'];
Function Defintion Start missing detectMsBuild<I>Dir went missing
hoffi_gulp-msbuild
train
f7fe9a6c6338d9a4d23a61e8a140bd94dc048cb6
diff --git a/js/bittrex.js b/js/bittrex.js index <HASH>..<HASH> 100644 --- a/js/bittrex.js +++ b/js/bittrex.js @@ -299,7 +299,6 @@ module.exports = class bittrex extends Exchange { 'max': undefined, }, 'leverage': { - 'min': 1, 'max': 1, }, }, diff --git a/js/coinbase.js b/js/coinbase.js index <HASH>..<HASH> 100644 --- a/js/coinbase.js +++ b/js/coinbase.js @@ -573,7 +573,6 @@ module.exports = class coinbase extends Exchange { 'max': undefined, }, 'leverage': { - 'min': 1, 'max': 1, }, }, diff --git a/js/ftx.js b/js/ftx.js index <HASH>..<HASH> 100644 --- a/js/ftx.js +++ b/js/ftx.js @@ -485,7 +485,6 @@ module.exports = class ftx extends Exchange { 'max': undefined, }, 'leverage': { - 'min': 1, 'max': 20, }, }, diff --git a/js/gateio.js b/js/gateio.js index <HASH>..<HASH> 100755 --- a/js/gateio.js +++ b/js/gateio.js @@ -656,7 +656,6 @@ module.exports = class gateio extends Exchange { 'contractSize': this.safeString (market, 'contractSize', '1'), 'limits': { 'leverage': { - 'min': this.safeNumber (market, 'leverage_min'), 'max': this.safeNumber (market, 'leverage_max'), }, 'amount': { diff --git a/js/huobi.js b/js/huobi.js index <HASH>..<HASH> 100644 --- a/js/huobi.js +++ b/js/huobi.js @@ -454,7 +454,6 @@ module.exports = class huobi extends Exchange { 'max': undefined, }, 'leverage': { - 'min': 1, 'max': this.safeNumber (market, 'leverage-ratio', 1), 'superMax': this.safeNumber (market, 'super-margin-leverage-ratio', 1), }, diff --git a/js/kraken.js b/js/kraken.js index <HASH>..<HASH> 100644 --- a/js/kraken.js +++ b/js/kraken.js @@ -372,7 +372,6 @@ module.exports = class kraken extends Exchange { 'max': undefined, }, 'leverage': { - 'min': 1, 'max': maxLeverage, }, }, diff --git a/js/okex.js b/js/okex.js index <HASH>..<HASH> 100644 --- a/js/okex.js +++ b/js/okex.js @@ -722,7 +722,6 @@ module.exports = class okex extends Exchange { 'max': undefined, }, 'leverage': { - 'min': 1, 'max': leverage, }, },
Removed min leverage field from describe.limits.leverage
ccxt_ccxt
train
1ffd8a86a4e14da059745d4b045541f43aaa43d4
diff --git a/src/Illuminate/Database/Schema/Builder.php b/src/Illuminate/Database/Schema/Builder.php index <HASH>..<HASH> 100755 --- a/src/Illuminate/Database/Schema/Builder.php +++ b/src/Illuminate/Database/Schema/Builder.php @@ -69,7 +69,7 @@ class Builder { $table = $this->connection->getTablePrefix().$table; - return count($this->connection->select( + return count($this->connection->selectFromWriteConnection( $this->grammar->compileTableExists(), [$table] )) > 0; } @@ -130,7 +130,7 @@ class Builder */ public function getColumnListing($table) { - $results = $this->connection->select($this->grammar->compileColumnListing( + $results = $this->connection->selectFromWriteConnection($this->grammar->compileColumnListing( $this->connection->getTablePrefix().$table )); diff --git a/tests/Database/DatabaseSchemaBuilderTest.php b/tests/Database/DatabaseSchemaBuilderTest.php index <HASH>..<HASH> 100755 --- a/tests/Database/DatabaseSchemaBuilderTest.php +++ b/tests/Database/DatabaseSchemaBuilderTest.php @@ -21,7 +21,7 @@ class DatabaseSchemaBuilderTest extends TestCase $builder = new Builder($connection); $grammar->shouldReceive('compileTableExists')->once()->andReturn('sql'); $connection->shouldReceive('getTablePrefix')->once()->andReturn('prefix_'); - $connection->shouldReceive('select')->once()->with('sql', ['prefix_table'])->andReturn(['prefix_table']); + $connection->shouldReceive('selectFromWriteConnection')->once()->with('sql', ['prefix_table'])->andReturn(['prefix_table']); $this->assertTrue($builder->hasTable('table')); }
[<I>] Allow Schema to select from write connection. (#<I>) This solves issue when using Schema::hasColumn() or Schema::hasTable() on a read write connection with huge latency.
laravel_framework
train
a72e0b16fd7bce80a1805c58a7f97c06a70d2eb8
diff --git a/lib/index.js b/lib/index.js index <HASH>..<HASH> 100644 --- a/lib/index.js +++ b/lib/index.js @@ -242,7 +242,6 @@ class Parser extends Transform { if((chr === cr || chr === nl) && this.state.wasRowDelimiter === false ){ this.state.wasRowDelimiter = true } - let recordDelimiterLength = this.__isRecordDelimiter(chr, buf, pos) // Previous char was a valid escape char // treat the current char as a regular char if(this.state.escaping === true){ @@ -303,6 +302,7 @@ class Parser extends Transform { } } if(this.state.quoting === false){ + let recordDelimiterLength = this.__isRecordDelimiter(chr, buf, pos) if(recordDelimiterLength !== 0){ // Do not emit comments which take a full line const skipCommentLine = this.state.commenting && (this.state.record.length === 0 && this.state.field.length === 0) @@ -315,7 +315,7 @@ class Parser extends Transform { this.info.empty_lines++ continue } - if(this.state.enabled === false && this.info.lines + (this.state.wasRowDelimiter ? 1: 0 ) >= from_line){ + if(this.state.enabled === false && this.info.lines + (this.state.wasRowDelimiter === true ? 1: 0 ) >= from_line){ this.state.enabled = true this.__resetField() this.__resetRow()
parser: move record delimiter declaration
adaltas_node-csv-parse
train
bb8c758974909372b5272f5ffec93ff0b9d4e4c5
diff --git a/tests/Rollerworks/Bundle/PasswordStrengthBundle/Tests/Validator/PasswordRequirementsValidatorTest.php b/tests/Rollerworks/Bundle/PasswordStrengthBundle/Tests/Validator/PasswordRequirementsValidatorTest.php index <HASH>..<HASH> 100644 --- a/tests/Rollerworks/Bundle/PasswordStrengthBundle/Tests/Validator/PasswordRequirementsValidatorTest.php +++ b/tests/Rollerworks/Bundle/PasswordStrengthBundle/Tests/Validator/PasswordRequirementsValidatorTest.php @@ -35,6 +35,13 @@ class PasswordRequirementsValidatorTest extends AbstractConstraintValidatorTest $this->assertNoViolation(); } + public function testEmptyIsValid() + { + $this->validator->validate('', new PasswordRequirements()); + + $this->assertNoViolation(); + } + /** * @dataProvider provideValidConstraints *
update PasswordRequirementsValidatorTest Test that empty value is valid
rollerworks_PasswordStrengthBundle
train
bd562b4c94c042886cea11b45c0b8fee7fc47428
diff --git a/bin/convert-argv.js b/bin/convert-argv.js index <HASH>..<HASH> 100644 --- a/bin/convert-argv.js +++ b/bin/convert-argv.js @@ -32,6 +32,14 @@ module.exports = function(optimist, argv, convertOptions) { var extensions = Object.keys(interpret.extensions).sort(function(a, b) { return a.length - b.length; }); + var configFiles = ["webpack.config", "webpackfile"].map(function(filename) { + return extensions.map(function(ext) { + return { + path: path.resolve(filename + ext), + ext: ext + }; + }); + }).reduce(function(a, i) { return a.concat(i); }, []); if(argv.config) { configPath = path.resolve(argv.config); @@ -46,10 +54,10 @@ module.exports = function(optimist, argv, convertOptions) { ext = path.extname(configPath); } } else { - for(var i = 0; i < extensions.length; i++) { - var webpackConfig = path.resolve("webpack.config" + extensions[i]); + for(var i = 0; i < configFiles.length; i++) { + var webpackConfig = configFiles[i].path; if(fs.existsSync(webpackConfig)) { - ext = extensions[i]; + ext = configFiles[i].ext; configPath = webpackConfig; break; }
support `webpackfile.js`
webpack_webpack
train
32f13eb6b2ae265fb6a4e760fe331b69c8703a60
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -4,10 +4,13 @@ from setuptools import find_packages, setup from os import path -from os import path this_directory = path.abspath(path.dirname(__file__)) -with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f: - long_description = f.read() +try: + with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f: + long_description = f.read() +except TypeError: + with open(path.join(this_directory, 'README.md'), 'rb') as f: + long_description = f.read().decode('utf-8') setup( name = "acos-client", @@ -20,7 +23,6 @@ setup( license = "Apache", keywords = "a10 axapi acos adc slb load balancer", url = "https://github.com/a10networks/acos-client", - long_description = long_description, long_description_content_type = "text/markdown",
Fix python<I>/3 compatability issue with file open in utf8
a10networks_acos-client
train
d8e1789b643348441633d5fa539248334365ba06
diff --git a/core/src/main/java/com/graphhopper/routing/util/HikeFlagEncoder.java b/core/src/main/java/com/graphhopper/routing/util/HikeFlagEncoder.java index <HASH>..<HASH> 100644 --- a/core/src/main/java/com/graphhopper/routing/util/HikeFlagEncoder.java +++ b/core/src/main/java/com/graphhopper/routing/util/HikeFlagEncoder.java @@ -42,7 +42,7 @@ public class HikeFlagEncoder extends FootFlagEncoder { this((int) properties.getLong("speedBits", 4), properties.getDouble("speedFactor", 1)); this.properties = properties; - this.setBlockFords(properties.getBool("block_fords", true)); + this.setBlockFords(properties.getBool("block_fords", false)); } public HikeFlagEncoder(String propertiesStr) {
allow fords for hike profile, #<I>
graphhopper_graphhopper
train
cba24a8f64bdab713c0982fd05d2e469ee23d2a3
diff --git a/lib/mail/message.rb b/lib/mail/message.rb index <HASH>..<HASH> 100644 --- a/lib/mail/message.rb +++ b/lib/mail/message.rb @@ -268,7 +268,7 @@ module Mail reply.references ||= bracketed_message_id end if subject - reply.subject = "RE: #{subject}" + reply.subject = subject =~ /^Re:/i ? subject : "RE: #{subject}" end if reply_to || from reply.to = self[reply_to ? :reply_to : :from].to_s diff --git a/spec/mail/message_spec.rb b/spec/mail/message_spec.rb index <HASH>..<HASH> 100644 --- a/spec/mail/message_spec.rb +++ b/spec/mail/message_spec.rb @@ -1562,6 +1562,10 @@ describe Mail::Message do @mail.reply[:references].message_ids.should == ['[email protected]', '[email protected]', '[email protected]'] end + it "should not append another RE:" do + @mail.reply.subject.should == "Re: Test reply email" + end + end describe "to a reply with an in-reply-to with a single message id but no references header" do
RE: should be appended to the subject of a reply email only if a RE: does not already exist in the subject
mikel_mail
train
9aa3865c0a5cc77a1450263550097f71642ba3ba
diff --git a/lib/fluent/plugin/in_debug_agent.rb b/lib/fluent/plugin/in_debug_agent.rb index <HASH>..<HASH> 100644 --- a/lib/fluent/plugin/in_debug_agent.rb +++ b/lib/fluent/plugin/in_debug_agent.rb @@ -34,6 +34,9 @@ module Fluent::Plugin def configure(conf) super + if system_config.workers > 1 + @port += fluentd_worker_id + end if @unix_path unless ::Fluent::FileUtil.writable?(@unix_path) raise Fluent::ConfigError, "in_debug_agent: `#{@unix_path}` is not writable" @@ -41,6 +44,10 @@ module Fluent::Plugin end end + def multi_workers_ready? + @unix_path.nil? + end + def start super @@ -50,7 +57,7 @@ module Fluent::Plugin else uri = "druby://#{@bind}:#{@port}" end - log.info "listening dRuby", uri: uri, object: @object + log.info "listening dRuby", uri: uri, object: @object, worker: fluentd_worker_id obj = eval(@object) @server = DRb::DRbServer.new(uri, obj) end diff --git a/test/plugin/test_in_debug_agent.rb b/test/plugin/test_in_debug_agent.rb index <HASH>..<HASH> 100644 --- a/test/plugin/test_in_debug_agent.rb +++ b/test/plugin/test_in_debug_agent.rb @@ -25,4 +25,25 @@ class DebugAgentInputTest < Test::Unit::TestCase create_driver %[unix_path #{TMP_DIR}/does_not_exist/test_path] end end + + def test_multi_worker_environment_with_port + Fluent::SystemConfig.overwrite_system_config('workers' => 4) do + d = Fluent::Test::Driver::Input.new(Fluent::Plugin::DebugAgentInput) + d.instance.instance_eval { @_fluentd_worker_id = 2 } + d.configure('port 24230') + + assert_true d.instance.multi_workers_ready? + assert_equal(24232, d.instance.instance_variable_get(:@port)) + end + end + + def test_multi_worker_environment_with_unix_path + Fluent::SystemConfig.overwrite_system_config('workers' => 4) do + d = Fluent::Test::Driver::Input.new(Fluent::Plugin::DebugAgentInput) + d.instance.instance_eval { @_fluentd_worker_id = 2 } + d.configure("unix_path #{TMP_DIR}/test_path") + + assert_false d.instance.multi_workers_ready? + end + end end
Support multi worker environment in in_debug_agent in_debug_agent will listen sequential port number based on worker_id just like in_monitor_agent.
fluent_fluentd
train
1163030118e394c016c3a7dcac234c925ae00301
diff --git a/FlowCal/io.py b/FlowCal/io.py index <HASH>..<HASH> 100644 --- a/FlowCal/io.py +++ b/FlowCal/io.py @@ -1523,6 +1523,10 @@ class FCSData(np.ndarray): and 'CellQuest Pro' in fcs_file.text.get('CREATOR'): channel_detector_voltage = fcs_file.text.get('BD$WORD{}' \ .format(12+i)) + + # Attempt to cast extracted value to float + # The FCS3.1 standard restricts $PnV to be a floating-point value + # only. Any value that cannot be casted will be replaced with None. if channel_detector_voltage is not None: try: channel_detector_voltage = float(channel_detector_voltage) @@ -1542,6 +1546,10 @@ class FCSData(np.ndarray): if channel_amp_gain is None and 'CREATOR' in fcs_file.text and \ 'FlowJoCollectorsEdition' in fcs_file.text.get('CREATOR'): channel_amp_gain = fcs_file.text.get('CytekP{:02d}G'.format(i)) + + # Attempt to cast extracted value to float + # The FCS3.1 standard restricts $PnG to be a floating-point value + # only. Any value that cannot be casted will be replaced with None. if channel_amp_gain is not None: try: channel_amp_gain = float(channel_amp_gain)
Added comments about casting PnV and PnG to float in FCSData.
taborlab_FlowCal
train
a5bbc126250260158f1538771349bc0ba303089f
diff --git a/gui/default/syncthing/core/syncthingController.js b/gui/default/syncthing/core/syncthingController.js index <HASH>..<HASH> 100755 --- a/gui/default/syncthing/core/syncthingController.js +++ b/gui/default/syncthing/core/syncthingController.js @@ -2056,6 +2056,9 @@ angular.module('syncthing.core') value.modTime = new Date(value.modTime); value.versionTime = new Date(value.versionTime); }); + values.sort(function (a, b) { + return b.versionTime - a.versionTime; + }); }); if (closed) return; $scope.restoreVersions.versions = data;
gui: Sort versions by date in restore dropdown (#<I>)
syncthing_syncthing
train
a1f8f1077dfd0356afbca3748f8a4fd0bbc2b5fb
diff --git a/src/sortable.js b/src/sortable.js index <HASH>..<HASH> 100644 --- a/src/sortable.js +++ b/src/sortable.js @@ -76,7 +76,7 @@ angular.module('ui.sortable', []) // been created, either placeholder will be false if no // placeholder class was given or placeholder.element will be // undefined if a class was given (placeholder will be a string) - if (placeholder && placeholder.element) { + if (placeholder && placeholder.element && typeof placeholder.element === 'function') { // exact match with the placeholder's class attribute to handle // the case that multiple connected sortables exist and // the placehoilder option equals the class of sortable items
Added extra check to test if placeholder.element is a function.
angular-ui_ui-sortable
train
22ad8ffc1eef0bf5b9a51d1e2aa8537e26a3f57c
diff --git a/Src/java/cql-to-elm/src/main/java/org/cqframework/cql/cql2elm/LibraryBuilder.java b/Src/java/cql-to-elm/src/main/java/org/cqframework/cql/cql2elm/LibraryBuilder.java index <HASH>..<HASH> 100644 --- a/Src/java/cql-to-elm/src/main/java/org/cqframework/cql/cql2elm/LibraryBuilder.java +++ b/Src/java/cql-to-elm/src/main/java/org/cqframework/cql/cql2elm/LibraryBuilder.java @@ -2232,7 +2232,11 @@ public class LibraryBuilder { ensureLibraryIncluded(libraryName, source); } - FunctionRef fr = of.createFunctionRef().withLibraryName(libraryName).withName(functionName).withOperand(source); + + String functionArgument = targetMap.substring(invocationStart + 1, targetMap.indexOf(")")); + FunctionRef fr = of.createFunctionRef() + .withLibraryName(libraryName).withName(functionName) + .withOperand(functionArgument.equals("%value") ? source : applyTargetMap(source, functionArgument)); fr.setResultType(source.getResultType()); return fr; }
Recursively applyTargetMap for functions
cqframework_clinical_quality_language
train
30b0e4156afb5d62bdbb789a50630ab952f29b74
diff --git a/lib/stripe/api_resource.rb b/lib/stripe/api_resource.rb index <HASH>..<HASH> 100644 --- a/lib/stripe/api_resource.rb +++ b/lib/stripe/api_resource.rb @@ -4,7 +4,7 @@ module Stripe self.name.split('::')[-1] end - def self.url() + def self.url if self == APIResource raise NotImplementedError.new('APIResource is an abstract class. You should perform actions on its subclasses (Charge, Customer, etc.)') end diff --git a/lib/stripe/singleton_api_resource.rb b/lib/stripe/singleton_api_resource.rb index <HASH>..<HASH> 100644 --- a/lib/stripe/singleton_api_resource.rb +++ b/lib/stripe/singleton_api_resource.rb @@ -1,6 +1,6 @@ module Stripe class SingletonAPIResource < APIResource - def self.url() + def self.url if self == SingletonAPIResource raise NotImplementedError.new('SingletonAPIResource is an abstract class. You should perform actions on its subclasses (Account, etc.)') end diff --git a/lib/stripe/stripe_object.rb b/lib/stripe/stripe_object.rb index <HASH>..<HASH> 100644 --- a/lib/stripe/stripe_object.rb +++ b/lib/stripe/stripe_object.rb @@ -37,7 +37,7 @@ module Stripe JSON.pretty_generate(@values) end - def inspect() + def inspect id_string = (self.respond_to?(:id) && !self.id.nil?) ? " id=#{self.id}" : "" "#<#{self.class}:0x#{self.object_id.to_s(16)}#{id_string}> JSON: " + JSON.pretty_generate(@values) end
formatting. remove () from methods that don't have arguments
stripe_stripe-ruby
train
98cb94ae73d8bc8b7a307a1dbec23df8f04a35b7
diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py index <HASH>..<HASH> 100644 --- a/tests/unit/grains/test_core.py +++ b/tests/unit/grains/test_core.py @@ -852,8 +852,7 @@ SwapTotal: 4789244 kB''' assert core.log.debug.call_args[0][2][0] == os.errno.EPERM assert core.log.debug.call_args[0][1] == '/etc/iscsi/initiatorname.iscsi' - @patch('salt.grains.core.os.path.isfile', MagicMock(return_value=False)) - @patch('salt.grains.core.os.access', MagicMock(return_value=True)) + @patch('salt.utils.files.fopen', MagicMock(side_effect=OSError(os.errno.ENOENT, ''))) @patch('salt.grains.core.log', MagicMock()) def test_linux_iqn_no_iscsii_initiator(self): ''' @@ -861,7 +860,5 @@ SwapTotal: 4789244 kB''' iscsii initiator is not there accessible or is not supported. :return: ''' - with pytest.raises(IOError) as error: - assert core._linux_iqn() == [] + assert core._linux_iqn() == [] core.log.debug.assert_not_called() - assert 'No such file or directory' in six.text_type(error)
Update unit test on ignoring logging while file do not exist
saltstack_salt
train
d4edf00de4d7250e75a085e01ed700117b4df5ed
diff --git a/solve.py b/solve.py index <HASH>..<HASH> 100644 --- a/solve.py +++ b/solve.py @@ -10,9 +10,13 @@ import inspect import equations default_methods = ('ideal gas', 'hydrostatic', 'constant g', 'constant Lv') -all_methods = ('ideal gas', 'hydrostatic', 'constant g', 'constant Lv', - 'bolton', 'goff-gratch', 'frozen bulb', 'unfrozen bulb', - 'stipanuk', 'dry') +all_methods = tuple(set([]).union(*[f[1].func_dict['assumptions'] + for f in inspect.getmembers(equations) + if hasattr(f[1], 'func_dict') and 'assumptions' in + f[1].func_dict.keys()])) +# all_methods = ('ideal gas', 'hydrostatic', 'constant g', 'constant Lv', +# 'bolton', 'goff-gratch', 'frozen bulb', 'unfrozen bulb', +# 'stipanuk', 'dry') def _get_methods(module):
added automatic generation of all_methods in solve.py
atmos-python_atmos
train
c7cccf41b19a48ac9e16c4f53118a6b85c9cf1c7
diff --git a/tensorflow_datasets/core/units.py b/tensorflow_datasets/core/units.py index <HASH>..<HASH> 100644 --- a/tensorflow_datasets/core/units.py +++ b/tensorflow_datasets/core/units.py @@ -32,7 +32,7 @@ def size_str(size_in_bytes): If size_in_bytes is None, then returns "Unknown size". - For example `size_str(1.5 * tfds.units.GiB) == "1.50 GiB"`. + For example `size_str(1.5 * tfds.core.units.GiB) == "1.50 GiB"`. Args: size_in_bytes: `int` or `None`, the size, in bytes, that we want to diff --git a/tensorflow_datasets/public_api.py b/tensorflow_datasets/public_api.py index <HASH>..<HASH> 100644 --- a/tensorflow_datasets/public_api.py +++ b/tensorflow_datasets/public_api.py @@ -25,7 +25,6 @@ from tensorflow_datasets.core import download from tensorflow_datasets.core import decode from tensorflow_datasets.core import deprecated from tensorflow_datasets.core import features -from tensorflow_datasets.core import units from tensorflow_datasets.core import visualization from tensorflow_datasets.core.as_dataframe import as_dataframe from tensorflow_datasets.core.folder_dataset import ImageFolder @@ -79,7 +78,6 @@ __all__ = [ "testing", "TranslateFolder", "typing", - "units", "visualization", "__version__", ] diff --git a/tensorflow_datasets/scripts/documentation/dataset_markdown_builder.py b/tensorflow_datasets/scripts/documentation/dataset_markdown_builder.py index <HASH>..<HASH> 100644 --- a/tensorflow_datasets/scripts/documentation/dataset_markdown_builder.py +++ b/tensorflow_datasets/scripts/documentation/dataset_markdown_builder.py @@ -215,7 +215,7 @@ class DownloadSizeSection(Section): return builder.info.download_size def content(self, builder: tfds.core.DatasetBuilder): - return f'`{tfds.units.size_str(builder.info.download_size)}`' + return f'`{tfds.core.units.size_str(builder.info.download_size)}`' class DatasetSizeSection(Section): @@ -226,7 +226,7 @@ class DatasetSizeSection(Section): return builder.info.dataset_size def content(self, builder: tfds.core.DatasetBuilder): - return f'`{tfds.units.size_str(builder.info.dataset_size)}`' + return f'`{tfds.core.units.size_str(builder.info.dataset_size)}`' class ManualDatasetSection(Section):
Remove tfds.units from the public API. PiperOrigin-RevId: <I>
tensorflow_datasets
train
5e351d799ecdf8f3874e1c43cb06a4985b16e6a6
diff --git a/astrobase/hplc.py b/astrobase/hplc.py index <HASH>..<HASH> 100644 --- a/astrobase/hplc.py +++ b/astrobase/hplc.py @@ -810,6 +810,23 @@ def generate_hatpi_binnedlc_pkl(binnedpklf, textlcf, timebinsec, return None + +def parallel_gen_binnedlc_pkls(binnedpkldir, + textlcdir, + timebinsec, + binnedpklglob='*binned*sec*.pkl', + textlcglob='*.tfalc.TF1*'): + ''' + This generates the binnedlc pkls for a directory of such files. + + FIXME: finish this + + ''' + + binnedpkls = glob.glob(os.path.join(binnedpkldir, binnedpklglob)) + + + ##################### ## POST-PROCESSING ## ##################### diff --git a/astrobase/lcproc.py b/astrobase/lcproc.py index <HASH>..<HASH> 100644 --- a/astrobase/lcproc.py +++ b/astrobase/lcproc.py @@ -908,8 +908,8 @@ def runpf(lcfile, # save the results resultdict[mcolget[-1]] = {'gls':gls, - 'bls':bls, - 'pdm':pdm} + 'bls':bls, + 'pdm':pdm} # add the SNR results to the BLS result dict resultdict[mcolget[-1]]['bls'].update({ diff --git a/astrobase/periodbase/kbls.py b/astrobase/periodbase/kbls.py index <HASH>..<HASH> 100644 --- a/astrobase/periodbase/kbls.py +++ b/astrobase/periodbase/kbls.py @@ -892,12 +892,26 @@ def bls_snr(blsdict, thisbestperiod = blsres['bestperiod'] # get the minimum light epoch using a spline fit - spfit = spline_fit_magseries(times, mags, errs, - thisbestperiod, - magsarefluxes=magsarefluxes, - verbose=verbose) + try: + + spfit = spline_fit_magseries(times, mags, errs, + thisbestperiod, + magsarefluxes=magsarefluxes, + verbose=verbose) + thisminepoch = spfit['fitinfo']['fitepoch'] + + except ValueError: + + LOGEXCEPTION('could not fit a spline to find a minimum of ' + 'the phased LC, trying SavGol fit instead...') + # fit a Savitsky-Golay instead and get its minimum + savfit = savgol_fit_magseries(times, mags, errs, + thisbestperiod, + magsarefluxes=magsarefluxes, + verbose=verbose) + thisminepoch = savfit['fitinfo']['fitepoch'] + - thisminepoch = spfit['fitinfo']['fitepoch'] if isinstance(thisminepoch, np.ndarray): if verbose: LOGWARNING('minimum epoch is actually an array:\n'
kbls: don't crash if spline fit to find a minimum fails
waqasbhatti_astrobase
train
abab9c70bd8811f5ee2a89d284bf44f674e2ab51
diff --git a/README.md b/README.md index <HASH>..<HASH> 100644 --- a/README.md +++ b/README.md @@ -169,12 +169,16 @@ configuration file, you have some alternatives: By default the preferences will use the public email address of the authenticated GitHub user. If the public email address is not set, the field will be empty. If the email address is important for your Trac installation -(for example for notifications), the request_email option can be set to always -request access to all email addresses from GitHub. The primary address will be -stored in the preferences on the first login. +(for example for notifications), the `request_email` option can be set to +always request access to all email addresses from GitHub. The primary address +will be stored in the preferences on the first login. [github] request_email = true + preferred_email_domain = example.org + +if specified, the first address matching the optional `preferred_email_domain` +will be used instead of the primary address. Note that the Trac mail address will only be initialized on the first login. Users can still change or remove the email address from their Trac account. diff --git a/runtests.py b/runtests.py index <HASH>..<HASH> 100755 --- a/runtests.py +++ b/runtests.py @@ -122,6 +122,8 @@ class TracGitHubTests(unittest.TestCase): conf.set('github', 'alt.branches', 'master stable/*') if 'request_email' in kwargs: conf.set('github', 'request_email', kwargs['request_email']) + if 'preferred_email_domain' in kwargs: + conf.set('github', 'preferred_email_domain', kwargs['preferred_email_domain']) if 'organization' in kwargs: conf.set('github', 'organization', kwargs['organization']) if 'username' in kwargs and 'access_token' in kwargs: @@ -599,6 +601,67 @@ class GitHubLoginModuleConfigurationTests(TracGitHubTests): email = self.getEmail(answers, request_email=True) self.assertEqual(email, ['[email protected]']) + def testPreferredEmailDomain(self): + """ + Test that an email address matching the preferred email domain is + preferred to one marked primary. + """ + answers = { + '/user': { + 'user': 'trololol', + 'login': 'trololol' + }, + '/user/emails': [ + { + 'email': '[email protected]', + 'verified': False, + 'primary': True + }, + { + 'email': '[email protected]', + 'verified': True, + 'primary': True + }, + { + 'email': '[email protected]', + 'verified': True, + 'primary': False + }, + ] + } + + email = self.getEmail(answers, request_email=True, + preferred_email_domain='example.net') + self.assertEqual(email, ['[email protected]']) + + def testPreferredEmailFallbackToPrimary(self): + """ + Test that the primary address is chosen if no address matches the + preferred email domain. + """ + answers = { + '/user': { + 'user': 'trololol', + 'login': 'trololol' + }, + '/user/emails': [ + { + 'email': '[email protected]', + 'verified': True, + 'primary': True + }, + { + 'email': '[email protected]', + 'verified': True, + 'primary': False + }, + ] + } + + email = self.getEmail(answers, request_email=True, + preferred_email_domain='example.org') + self.assertEqual(email, ['[email protected]']) + class GitHubPostCommitHookTests(TracGitHubTests): @@ -827,6 +890,7 @@ class TracContext(object): _valid_attrs = ('cached_git', 'request_email', + 'preferred_email_domain', 'organization', 'username', 'access_token', diff --git a/tracext/github.py b/tracext/github.py index <HASH>..<HASH> 100644 --- a/tracext/github.py +++ b/tracext/github.py @@ -30,6 +30,10 @@ class GitHubLoginModule(LoginModule): 'github', 'request_email', 'false', doc="Request access to the email address of the GitHub user.") + preferred_email_domain = Option( + 'github', 'preferred_email_domain', '', + doc="Prefer email address under this domain over the primary address.") + # INavigationContributor methods def get_active_navigation_item(self, req): @@ -120,9 +124,14 @@ class GitHubLoginModule(LoginModule): if not item['verified']: # ignore unverified email addresses continue - if item['primary']: + if (self.preferred_email_domain and + item['email'].endswith('@' + self.preferred_email_domain)): email = item['email'] break + if item['primary']: + email = item['email'] + if not self.preferred_email_domain: + break except Exception as exc: # pylint: disable=broad-except self._reject_oauth( req, exc,
Allow selection of a preferred mail address The first address matching the optional preferred_email_domain will be used instead of the primary address. Add two more test cases to test selecting an email address using a preferred email domain.
trac-hacks_trac-github
train
aa5d7ad7911fa5712ad05deb478507bd2558736c
diff --git a/lib/chef/knife/solo_cook.rb b/lib/chef/knife/solo_cook.rb index <HASH>..<HASH> 100644 --- a/lib/chef/knife/solo_cook.rb +++ b/lib/chef/knife/solo_cook.rb @@ -185,9 +185,14 @@ class Chef ui.warn "Berkshelf could not be loaded" ui.warn "Please add the berkshelf gem to your Gemfile or install it manually with `gem install berkshelf`" else - ui.msg "Installing Berkshelf cookbooks..." - Berkshelf::Berksfile.from_file(expand_path('Berksfile')).install(:path => berkshelf_path) - add_cookbook_path berkshelf_path + path = berkshelf_path + if path == :tmpdir + path = Dir.mktmpdir('berks-') + # TODO: remove the tmpdir after uploading it to the node + end + ui.msg "Installing Berkshelf cookbooks to '#{path}'..." + Berkshelf::Berksfile.from_file(expand_path('Berksfile')).install(:path => path) + add_cookbook_path path end end @@ -202,7 +207,12 @@ class Chef end def berkshelf_path - 'cookbooks' + path = config_value(:berkshelf_path) + if path.nil? + ui.warn "`knife[:berkshelf_path]` is not set. Using temporary directory to install Berkshelf cookbooks." + path = :tmpdir + end + path end def librarian_install
Allow Berkshelf path to be configured Default to temporary directory. TODO: - Tests - Remove tmpdir after syncing - Documentation
matschaffer_knife-solo
train
2d847e8580a61e14ea3ff09f79969395d634df74
diff --git a/modules/activiti-secure-javascript/src/main/java/org/activiti/scripting/secure/behavior/SecureJavascriptTaskActivityBehavior.java b/modules/activiti-secure-javascript/src/main/java/org/activiti/scripting/secure/behavior/SecureJavascriptTaskActivityBehavior.java index <HASH>..<HASH> 100644 --- a/modules/activiti-secure-javascript/src/main/java/org/activiti/scripting/secure/behavior/SecureJavascriptTaskActivityBehavior.java +++ b/modules/activiti-secure-javascript/src/main/java/org/activiti/scripting/secure/behavior/SecureJavascriptTaskActivityBehavior.java @@ -12,10 +12,16 @@ */ package org.activiti.scripting.secure.behavior; +import org.activiti.engine.ActivitiException; +import org.activiti.engine.delegate.BpmnError; import org.activiti.engine.impl.bpmn.behavior.ScriptTaskActivityBehavior; +import org.activiti.engine.impl.bpmn.helper.ErrorPropagation; import org.activiti.engine.impl.cfg.ProcessEngineConfigurationImpl; import org.activiti.engine.impl.pvm.delegate.ActivityExecution; import org.activiti.scripting.secure.impl.SecureJavascriptUtil; +import org.apache.commons.lang3.exception.ExceptionUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * @author Joram Barrez @@ -24,6 +30,7 @@ import org.activiti.scripting.secure.impl.SecureJavascriptUtil; public class SecureJavascriptTaskActivityBehavior extends ScriptTaskActivityBehavior { private static final long serialVersionUID = 1L; + private static final Logger LOGGER = LoggerFactory.getLogger(SecureJavascriptTaskActivityBehavior.class); public SecureJavascriptTaskActivityBehavior(String scriptTaskId, String script, String language, String resultVariable, boolean storeScriptVariables) { @@ -33,7 +40,30 @@ public class SecureJavascriptTaskActivityBehavior extends ScriptTaskActivityBeha @Override public void execute(ActivityExecution execution) throws Exception { ProcessEngineConfigurationImpl config = (ProcessEngineConfigurationImpl) execution.getEngineServices().getProcessEngineConfiguration(); - SecureJavascriptUtil.evaluateScript(execution, script, config.getBeans()); + + boolean noErrors = true; + try { + Object result = SecureJavascriptUtil.evaluateScript(execution, script, config.getBeans()); + if (resultVariable != null) { + execution.setVariable(resultVariable, result); + } + + } catch (ActivitiException e) { + + LOGGER.warn("Exception while executing " + execution.getActivity().getId() + " : " + e.getMessage()); + + noErrors = false; + Throwable rootCause = ExceptionUtils.getRootCause(e); + if (rootCause instanceof BpmnError) { + ErrorPropagation.propagateError((BpmnError) rootCause, execution); + } else { + throw e; + } + } + + if (noErrors) { + leave(execution); + } } }
fix execution of script task with secure scripting
Activiti_Activiti
train
75f55a352ba7ea1354b5ba03fd1fa60033ac90ff
diff --git a/java/server/src/org/openqa/selenium/grid/distributor/local/GridModel.java b/java/server/src/org/openqa/selenium/grid/distributor/local/GridModel.java index <HASH>..<HASH> 100644 --- a/java/server/src/org/openqa/selenium/grid/distributor/local/GridModel.java +++ b/java/server/src/org/openqa/selenium/grid/distributor/local/GridModel.java @@ -164,12 +164,11 @@ public class GridModel { public void purgeDeadNodes() { long now = System.currentTimeMillis(); - long timeout = 30000; Lock writeLock = lock.writeLock(); writeLock.lock(); try { Set<NodeStatus> lost = nodes(UP).stream() - .filter(status -> now - status.touched() > status.heartbeatPeriod().toMillis()) + .filter(status -> now - status.touched() > status.heartbeatPeriod().toMillis() * 2) .collect(toSet()); Set<NodeStatus> resurrected = nodes(DOWN).stream() .filter(status -> now - status.touched() <= status.heartbeatPeriod().toMillis()) @@ -192,6 +191,7 @@ public class GridModel { resurrected.stream() .map(node -> String.format("%s (uri: %s)", node.getId(), node.getUri())) .collect(joining(", ")))); + nodes(DOWN).removeAll(resurrected); nodes(UP).addAll(resurrected); } if (dead.size() > 0) {
[grid] Waiting for two missed heartbeats to mark a node DOWN and actually resurrect nodes after receiving a heartbeat event
SeleniumHQ_selenium
train
9efef940c151e5f85d72507f09a9cd81d3837ba5
diff --git a/pkg/smartos/esky/_syspaths.py b/pkg/smartos/esky/_syspaths.py index <HASH>..<HASH> 100644 --- a/pkg/smartos/esky/_syspaths.py +++ b/pkg/smartos/esky/_syspaths.py @@ -20,3 +20,6 @@ BASE_PILLAR_ROOTS_DIR = os.path.join(SRV_ROOT_DIR, 'pillar') BASE_MASTER_ROOTS_DIR = os.path.join(SRV_ROOT_DIR, 'salt-master') LOGS_DIR = os.path.join(ROOT_DIR, 'var', 'log', 'salt') PIDFILE_DIR = os.path.join(ROOT_DIR, 'var', 'run') +SPM_FORMULA_PATH = os.path.join(ROOT_DIR, 'spm', 'salt') +SPM_PILLAR_PATH = os.path.join(ROOT_DIR, 'spm', 'pillar') +SPM_REACTOR_PATH = os.path.join(ROOT_DIR, 'spm', 'reactor')
fix SPM paths on smartos/illumos esky
saltstack_salt
train
2025148a455158a05e170fcf78879c4ff28d13c7
diff --git a/provider/null/environ_test.go b/provider/null/environ_test.go index <HASH>..<HASH> 100644 --- a/provider/null/environ_test.go +++ b/provider/null/environ_test.go @@ -102,13 +102,12 @@ func (s *environSuite) TestEnvironSupportsCustomSources(c *gc.C) { func (s *environSuite) TestEnvironBootstrapStorager(c *gc.C) { var sshScript = ` #!/bin/bash --norc -if [ "$*" = "hostname -- bash" ]; then +if echo "$*" | grep -q -v sudo; then # We're executing bash inside ssh. Wait # for input to be written before exiting. head -n 1 > /dev/null + echo JUJU-RC: $RC fi -exec 0<&- # close stdin -echo JUJU-RC: $RC `[1:] bin := c.MkDir() ssh := filepath.Join(bin, "ssh")
provider/null: fix intermittent test failure The ssh command line options changed, so we were checking for the wrong command line. Made it a bit less fragile by doing a negated grep.
juju_juju
train
6f947e060f996cfb5d537ad93aa00d23acf0f403
diff --git a/src/Console/Commands/SeedCommand.php b/src/Console/Commands/SeedCommand.php index <HASH>..<HASH> 100644 --- a/src/Console/Commands/SeedCommand.php +++ b/src/Console/Commands/SeedCommand.php @@ -5,14 +5,11 @@ declare(strict_types=1); namespace Cortex\Contacts\Console\Commands; use Illuminate\Console\Command; -use Rinvex\Fort\Traits\AbilitySeeder; -use Rinvex\Fort\Traits\ArtisanHelper; -use Illuminate\Support\Facades\Schema; +use Rinvex\Support\Traits\SeederHelper; class SeedCommand extends Command { - use AbilitySeeder; - use ArtisanHelper; + use SeederHelper; /** * The name and signature of the console command. @@ -37,37 +34,8 @@ class SeedCommand extends Command { $this->warn('Seed cortex/contacts:'); - if ($this->ensureExistingContactsTables()) { - // No seed data at the moment! + if ($this->ensureExistingDatabaseTables('rinvex/fort')) { + $this->seedResources(app('rinvex.fort.ability'), realpath(__DIR__.'/../../../resources/data/abilities.json'), ['name', 'description']); } - - if ($this->ensureExistingFortTables()) { - $this->seedAbilities(realpath(__DIR__.'/../../../resources/data/abilities.json')); - } - } - - /** - * Ensure existing contacts tables. - * - * @return bool - */ - protected function ensureExistingContactsTables() - { - if (! $this->hasContactsTables()) { - $this->call('cortex:migrate:contacts'); - } - - return true; - } - - /** - * Check if all required contacts tables exists. - * - * @return bool - */ - protected function hasContactsTables() - { - return Schema::hasTable(config('rinvex.contacts.tables.contacts')) - && Schema::hasTable(config('rinvex.contacts.tables.contact_relations')); } }
Refactor SeedCommand to use the new SeederHelper
rinvex_cortex-contacts
train
373ba292df04e8bffd5f16f5ec84012e4766ac9d
diff --git a/packages/find-and-replace/FindAndReplaceManager.js b/packages/find-and-replace/FindAndReplaceManager.js index <HASH>..<HASH> 100644 --- a/packages/find-and-replace/FindAndReplaceManager.js +++ b/packages/find-and-replace/FindAndReplaceManager.js @@ -27,6 +27,10 @@ class FindAndReplaceManager { } + dispose() { + this.editorSession.off(this) + } + /* NOTE: We remember findString and replaceString for the next search action */ diff --git a/packages/find-and-replace/FindAndReplaceTool.js b/packages/find-and-replace/FindAndReplaceTool.js index <HASH>..<HASH> 100644 --- a/packages/find-and-replace/FindAndReplaceTool.js +++ b/packages/find-and-replace/FindAndReplaceTool.js @@ -4,6 +4,15 @@ class FindAndReplaceTool extends ToggleTool { didMount() { this.refs.findString.el.focus() + this.context.editorSession.onPostRender(this._onPostRender, this) + } + + dispose() { + this.context.editorSession.off(this) + } + + _onPostRender() { + this._scrollToSelectedMatch() } render($$) { @@ -154,13 +163,11 @@ class FindAndReplaceTool extends ToggleTool { _findNext() { let findAndReplaceManager = this.context.editorSession.getManager('find-and-replace') findAndReplaceManager.findNext() - this._scrollToSelectedMatch() } _replaceNext() { let findAndReplaceManager = this.context.editorSession.getManager('find-and-replace') findAndReplaceManager.replaceNext() - this._scrollToSelectedMatch() } _replaceAll() { @@ -207,7 +214,6 @@ class FindAndReplaceTool extends ToggleTool { let findAndReplaceManager = this.context.editorSession.getManager('find-and-replace') findAndReplaceManager.startFind(findString) - this._scrollToSelectedMatch() } _scrollToSelectedMatch() {
scrollToSelectedMatch on post render.
substance_substance
train
975fc830f9fcf697338f622cc1228a91ea325678
diff --git a/aws/resource_aws_ecs_task_definition_test.go b/aws/resource_aws_ecs_task_definition_test.go index <HASH>..<HASH> 100644 --- a/aws/resource_aws_ecs_task_definition_test.go +++ b/aws/resource_aws_ecs_task_definition_test.go @@ -118,6 +118,7 @@ func TestAccAWSEcsTaskDefinition_withDockerVolumeMinimalConfig(t *testing.T) { Config: testAccAWSEcsTaskDefinitionWithDockerVolumesMinimalConfig(tdName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.sleep", &def), + testAccCheckAWSTaskDefinitionDockerVolumeConfigurationAutoprovisionNil(&def), resource.TestCheckResourceAttr( "aws_ecs_task_definition.sleep", "volume.#", "1"), resource.TestCheckResourceAttr( @@ -461,6 +462,22 @@ func testAccCheckAWSEcsTaskDefinitionExists(name string, def *ecs.TaskDefinition } } +func testAccCheckAWSTaskDefinitionDockerVolumeConfigurationAutoprovisionNil(def *ecs.TaskDefinition) resource.TestCheckFunc { + return func(s *terraform.State) error { + if len(def.Volumes) != 1 { + return fmt.Errorf("Expected (1) volumes, got (%d)", len(def.Volumes)) + } + config := def.Volumes[0].DockerVolumeConfiguration + if config == nil { + return fmt.Errorf("Expected docker_volume_configuration, got nil") + } + if config.Autoprovision != nil { + return fmt.Errorf("Expected autoprovision to be nil, got %t", *config.Autoprovision) + } + return nil + } +} + func testAccAWSEcsTaskDefinition_constraint(tdName string) string { return fmt.Sprintf(` resource "aws_ecs_task_definition" "jenkins" { @@ -901,7 +918,7 @@ TASK_DEFINITION volume { name = "database_scratch" docker_volume_configuration { - autoprovision = true + scope = "task" } } }
Modified test for task scoped docker volume configuration.
terraform-providers_terraform-provider-aws
train
d604cad10f5c7dbf7fa34bc655a27dac5f4a0c28
diff --git a/pagoda/cooper.py b/pagoda/cooper.py index <HASH>..<HASH> 100644 --- a/pagoda/cooper.py +++ b/pagoda/cooper.py @@ -273,6 +273,7 @@ class Markers: joint.setAnchor2Rel(self.attach_offsets[label]) joint.setParam(ode.ParamCFM, self.cfms[frame_no, j] / f) joint.setParam(ode.ParamERP, self.erp) + joint.name = label self.joints.append(joint) def reposition(self, frame_no):
Add text name to each marker joint.
EmbodiedCognition_pagoda
train
53847ca90c254540fb491e66dee61f5b900b9995
diff --git a/src/Symfony/Framework/DoctrineBundle/Logger/DbalLogger.php b/src/Symfony/Framework/DoctrineBundle/Logger/DbalLogger.php index <HASH>..<HASH> 100644 --- a/src/Symfony/Framework/DoctrineBundle/Logger/DbalLogger.php +++ b/src/Symfony/Framework/DoctrineBundle/Logger/DbalLogger.php @@ -35,9 +35,12 @@ class DbalLogger extends DebugStack $this->logger = $logger; } - public function logSql($sql, array $params = null) + /** + * {@inheritdoc} + */ + public function logSQL($sql, array $params = null, $executionMS = null) { - parent::logSql($sql, $params); + parent::logSql($sql, $params, $executionMS); if (null !== $this->logger) { $this->log($sql.' ('.str_replace("\n", '', var_export($params, true)).')');
[DoctrineBundle] fixed interface to reflect a change upstream
symfony_symfony
train
d1828f618c1548f269224eccf8b9b5ba3b205706
diff --git a/composer.json b/composer.json index <HASH>..<HASH> 100644 --- a/composer.json +++ b/composer.json @@ -10,8 +10,9 @@ } ], "require": { - "php": ">=7.2.0", - "psr/http-message": "~1.0" + "php": ">=7.4.0", + "psr/http-message": "~1.0", + "psr/http-server-middleware": "^1.0" }, "autoload": { "psr-4": { diff --git a/src/RouterDispatcherInterface.php b/src/RouterDispatcherInterface.php index <HASH>..<HASH> 100644 --- a/src/RouterDispatcherInterface.php +++ b/src/RouterDispatcherInterface.php @@ -7,10 +7,10 @@ use Psr\Http\Message\ServerRequestInterface; interface RouterDispatcherInterface { - const NOT_FOUND = 0; - const FOUND = 1; - const METHOD_NOT_ALLOWED = 2; - const METHOD_DONT_ACCEPTS = 3; + public const NOT_FOUND = 0; + public const FOUND = 1; + public const METHOD_NOT_ALLOWED = 2; + public const METHOD_DONT_ACCEPTS = 3; public function dispatch(ServerRequestInterface $request): RouteInterface; } diff --git a/src/RouterMiddleware.php b/src/RouterMiddleware.php index <HASH>..<HASH> 100644 --- a/src/RouterMiddleware.php +++ b/src/RouterMiddleware.php @@ -10,8 +10,9 @@ use Psr\Http\Server\RequestHandlerInterface; class RouterMiddleware implements MiddlewareInterface { - /** @var RouterDispatcherInterface */ - protected $routerDispatcher; + public const ATTRIBUTE_KEY = 'polus:route'; + + protected RouterDispatcherInterface $routerDispatcher; public function __construct(RouterDispatcherInterface $routerDispatcher) { @@ -19,12 +20,7 @@ class RouterMiddleware implements MiddlewareInterface } /** - * Process an incoming server request and return a response, optionally delegating - * response creation to a handler. - * - * @param ServerRequestInterface $request - * @param RequestHandlerInterface $handler - * @return ResponseInterface + * @inheritDoc */ public function process(ServerRequestInterface $request, RequestHandlerInterface $handler): ResponseInterface { @@ -34,7 +30,7 @@ class RouterMiddleware implements MiddlewareInterface $request = $request->withAttribute($key, $value); } } - $request = $request->withAttribute('route', $route); + $request = $request->withAttribute(self::ATTRIBUTE_KEY, $route); return $handler->handle($request); }
Bump php version to <I>
polusphp_router
train
2c3c8d49541ae53a09b7096345a2d26bf69b4646
diff --git a/gosrc/data.go b/gosrc/data.go index <HASH>..<HASH> 100644 --- a/gosrc/data.go +++ b/gosrc/data.go @@ -11,13 +11,93 @@ const ( ) var pathFlags = map[string]int{ - "C": 2, - "archive": 1, - "archive/tar": 3, - "archive/zip": 3, - "bufio": 3, - "builtin": 3, - "bytes": 3, + "C": 2, + "archive": 1, + "archive/tar": 3, + "archive/zip": 3, + "bufio": 3, + "builtin": 3, + "bytes": 3, + "cmd": 1, + "cmd/addr2line": 3, + "cmd/asm": 3, + "cmd/asm/internal": 1, + "cmd/asm/internal/arch": 3, + "cmd/asm/internal/asm": 3, + "cmd/asm/internal/flags": 3, + "cmd/asm/internal/lex": 3, + "cmd/cgo": 3, + "cmd/compile": 3, + "cmd/compile/internal": 1, + "cmd/compile/internal/amd64": 3, + "cmd/compile/internal/arm": 3, + "cmd/compile/internal/arm64": 3, + "cmd/compile/internal/big": 3, + "cmd/compile/internal/gc": 3, + "cmd/compile/internal/mips64": 3, + "cmd/compile/internal/ppc64": 3, + "cmd/compile/internal/s390x": 3, + "cmd/compile/internal/ssa": 3, + "cmd/compile/internal/test": 3, + "cmd/compile/internal/x86": 3, + "cmd/cover": 3, + "cmd/dist": 3, + "cmd/doc": 3, + "cmd/fix": 3, + "cmd/go": 3, + "cmd/gofmt": 3, + "cmd/internal": 1, + "cmd/internal/bio": 3, + "cmd/internal/gcprog": 3, + "cmd/internal/goobj": 3, + "cmd/internal/obj": 3, + "cmd/internal/obj/arm": 3, + "cmd/internal/obj/arm64": 3, + "cmd/internal/obj/mips": 3, + "cmd/internal/obj/ppc64": 3, + "cmd/internal/obj/s390x": 3, + "cmd/internal/obj/x86": 3, + "cmd/internal/objfile": 3, + "cmd/internal/pprof": 1, + "cmd/internal/pprof/commands": 3, + "cmd/internal/pprof/driver": 3, + "cmd/internal/pprof/fetch": 3, + "cmd/internal/pprof/plugin": 3, + "cmd/internal/pprof/profile": 3, + "cmd/internal/pprof/report": 3, + "cmd/internal/pprof/svg": 3, + "cmd/internal/pprof/symbolizer": 3, + "cmd/internal/pprof/symbolz": 3, + "cmd/internal/pprof/tempfile": 3, + "cmd/internal/sys": 3, + "cmd/link": 3, + "cmd/link/internal": 1, + "cmd/link/internal/amd64": 3, + "cmd/link/internal/arm": 3, + "cmd/link/internal/arm64": 3, + "cmd/link/internal/ld": 3, + "cmd/link/internal/mips64": 3, + "cmd/link/internal/ppc64": 3, + "cmd/link/internal/s390x": 3, + "cmd/link/internal/x86": 3, + "cmd/nm": 3, + "cmd/objdump": 3, + "cmd/pack": 3, + "cmd/pprof": 3, + "cmd/trace": 3, + "cmd/vendor": 1, + "cmd/vendor/golang.org": 1, + "cmd/vendor/golang.org/x": 1, + "cmd/vendor/golang.org/x/arch": 1, + "cmd/vendor/golang.org/x/arch/arm": 1, + "cmd/vendor/golang.org/x/arch/arm/armasm": 3, + "cmd/vendor/golang.org/x/arch/x86": 1, + "cmd/vendor/golang.org/x/arch/x86/x86asm": 3, + "cmd/vet": 3, + "cmd/vet/internal": 1, + "cmd/vet/internal/cfg": 3, + "cmd/vet/internal/whitelist": 3, + "cmd/yacc": 3, "compress": 1, "compress/bzip2": 3, "compress/flate": 3, diff --git a/gosrc/gen.go b/gosrc/gen.go index <HASH>..<HASH> 100644 --- a/gosrc/gen.go +++ b/gosrc/gen.go @@ -57,7 +57,7 @@ func main() { // Build map of standard repository path flags. - cmd := exec.Command("go", "list", "std") + cmd := exec.Command("go", "list", "std", "cmd") p, err := cmd.Output() if err != nil { log.Fatal(err) @@ -67,9 +67,6 @@ func main() { "C": packagePath, } for _, path := range strings.Fields(string(p)) { - if strings.HasPrefix(path, "cmd/") { - continue - } pathFlags[path] |= packagePath | goRepoPath for { i := strings.LastIndex(path, "/")
gosrc: Do not exclude cmd/... from standard packages. This change makes it possible to view documentation for all standard Go packages, including commands and other packages that match import path pattern "cmd/...". That may slightly "pollute" the list of standard Go packages at <URL>
golang_gddo
train
0edcd4192591e523bd8e1a6b8646e0fece47d6d2
diff --git a/grade/exceptions.php b/grade/exceptions.php index <HASH>..<HASH> 100644 --- a/grade/exceptions.php +++ b/grade/exceptions.php @@ -115,6 +115,7 @@ // these were items that have since been deleted unset($listgrade_items[$grade_item->id]); delete_records('grade_item', 'id', $grade_item->id); + delete_records('grade_exceptions', 'grade_itemid', $grade_item->id, 'courseid', $course->id); } natcasesort($listmembers[$grade_item->id]); natcasesort($nonmembers[$grade_item->id]); diff --git a/grade/lib.php b/grade/lib.php index <HASH>..<HASH> 100644 --- a/grade/lib.php +++ b/grade/lib.php @@ -626,6 +626,7 @@ function grade_get_grades() { else { // delete this item from the grade_item table since it was deleted through the mod interface delete_records('grade_item', 'modid', $mods->modid, 'courseid', $course->id); + delete_records('grade_exceptions', 'grade_itemid', $mod->id, 'courseid', $course->id); } } else { @@ -1699,6 +1700,7 @@ function grade_view_category_grades($view_by_student) { $link_id = grade_get_module_link($course->id, $all_categories[$category][$assignment]['cminstance'], $all_categories[$category][$assignment]['modid']); $link = $CFG->wwwroot.'/mod/'.$all_categories[$category][$assignment]['modname'].'/view.php?id='.$link_id->id; + $all_categories[$category][$assignment]['link'] = $link; if ($all_categories[$category][$assignment]['hidden'] == 0) { $header .= '<th colspan="'.$grade_columns.'"><a href="'.$link.'">'.format_string($assignment,true).'</a>'; } @@ -1738,7 +1740,7 @@ function grade_view_category_grades($view_by_student) { // display points if ($preferences->show_points) { - $row .= '<td align="right">' . $items[$assignment]['grade'] . '</td>'; + $row .= '<td align="right"><a href="'.$all_categories[$category][$assignment]['link'].'">' . $items[$assignment]['grade'] . '</a></td>'; } if ($preferences->show_percent) {
updated code to delete database exception entries for grade items that no longer exist also added link to to grade points that takes a user to the graded item
moodle_moodle
train
488c73cfa20f70b7bb661315b35f45823e2a022b
diff --git a/src/resources/Tiled.js b/src/resources/Tiled.js index <HASH>..<HASH> 100644 --- a/src/resources/Tiled.js +++ b/src/resources/Tiled.js @@ -115,6 +115,7 @@ class Tiled extends Resource height: tilesetInfo.imageheight, tileWidth: tilesetInfo.tilewidth, tileHeight: tilesetInfo.tileheight, + columns: tilesetInfo.columns, spacing: tilesetInfo.spacing | 0, margin: tilesetInfo.margin | 0 } @@ -234,6 +235,7 @@ class Tiled extends Resource parseTsxTileset(node, gid, rootPath) { const tileWidth = parseInt(node.getAttribute("tilewidth")) const tileHeight = parseInt(node.getAttribute("tileheight")) + const columns = parseInt(node.getAttribute("columns")) const data = { type: "Tileset", gid, @@ -242,6 +244,7 @@ class Tiled extends Resource height: 0, tileWidth, tileHeight, + columns, offsetX: 0, offsetY: 0, spacing: 0, diff --git a/src/resources/Tileset.js b/src/resources/Tileset.js index <HASH>..<HASH> 100644 --- a/src/resources/Tileset.js +++ b/src/resources/Tileset.js @@ -15,6 +15,7 @@ class Tileset extends Texture this.gid = 1 this.tileWidth = 0 this.tileHeight = 0 + this.columns = 0 this.offsetX = 0 this.offsetY = 0 this.spacing = 0 @@ -30,6 +31,7 @@ class Tileset extends Texture this.height = cfg.height || 0 this.tileWidth = cfg.tileWidth || 0 this.tileHeight = cfg.tileHeight || 0 + this.columns = cfg.columns || 0 this.offsetX = cfg.offsetX || 0 this.offsetY = cfg.offsetY || 0 this.spacing = cfg.spacing || 0 @@ -37,7 +39,7 @@ class Tileset extends Texture } updateFrames() { - const tilesX = Math.floor(this.width / this.tileWidth) + const tilesX = this.columns || Math.floor(this.width / this.tileWidth) const tilesY = Math.floor(this.height / this.tileHeight) const uvOffsetX = 1.0 / this.width const uvOffsetY = 1.0 / this.height diff --git a/src/tilemap/Tilemap.js b/src/tilemap/Tilemap.js index <HASH>..<HASH> 100644 --- a/src/tilemap/Tilemap.js +++ b/src/tilemap/Tilemap.js @@ -14,6 +14,7 @@ class Tilemap extends Entity this.numTilesY = 0 this.tileWidth = 0 this.tileHeight = 0 + this.columns this.type = Tilemap.Type.Orthographic this.tilesets = [] if(resource) { diff --git a/src/tilemap/TilemapIsometricLayer.js b/src/tilemap/TilemapIsometricLayer.js index <HASH>..<HASH> 100644 --- a/src/tilemap/TilemapIsometricLayer.js +++ b/src/tilemap/TilemapIsometricLayer.js @@ -10,9 +10,12 @@ class TilemapIsometricLayer extends TilemapLayer updateSize() { const halfTileWidth = this.tileWidth * 0.5 const halfTileHeight = this.tileHeight * 0.5 + // this.size.set( + // this.tileset.offsetX + halfTileWidth + (this.numTilesX * halfTileWidth) + ((this.numTilesY - 1) * halfTileWidth), + // this.tileset.tileHeight + this.tileset.offsetY + (this.numTilesX * halfTileHeight) + ((this.numTilesY - 1) * halfTileHeight)) this.size.set( - this.tileset.offsetX + halfTileWidth + (this.numTilesX * halfTileWidth) + ((this.numTilesY - 1) * halfTileWidth), - this.tileset.tileHeight + this.tileset.offsetY + (this.numTilesX * halfTileHeight) + ((this.numTilesY - 1) * halfTileHeight)) + this.tileset.offsetX + this.tileset.tileWidth + ((this.numTilesX - 1) * halfTileWidth) + (this.numTilesY - 1) * halfTileWidth, + this.tileset.offsetY + this.tileset.tileHeight + ((this.numTilesX - 1) * halfTileHeight) + (this.numTilesY - 1) * halfTileHeight) } updateMesh() { @@ -38,7 +41,7 @@ class TilemapIsometricLayer extends TilemapLayer for(let x = 0; x < this.numTilesX; x++) { for(let y = 0; y < this.numTilesY; y++) { const id = x + (y * this.numTilesX) - let gid = this.data[id] - this.tileset.gid + const gid = this.data[id] - this.tileset.gid if(gid > -1) { const frame = this.tileset.getFrame(gid) const flip = frame[4] diff --git a/src/tilemap/TilemapLayer.js b/src/tilemap/TilemapLayer.js index <HASH>..<HASH> 100644 --- a/src/tilemap/TilemapLayer.js +++ b/src/tilemap/TilemapLayer.js @@ -42,14 +42,19 @@ class TilemapLayer extends Renderable this.dataInfo = new Array(numTilesX * numTilesY) this.updateData(data) this.extractTileset() - this.updateSize() + if(this.tileset) { + this.updateSize() + } + else { + this.hidden = true + } } updateSize() {} updateData(data) { this.data = data - this.needUpdateInfo = true + this.needUpdateMesh = true } extractTileset() {
Add support for "column" property in Tileset resource
tenjou_meta2d
train
ba9d95c34d1c847e8b7a44a150c336fbbf9a767d
diff --git a/ui/app/adapters/application.js b/ui/app/adapters/application.js index <HASH>..<HASH> 100644 --- a/ui/app/adapters/application.js +++ b/ui/app/adapters/application.js @@ -96,6 +96,7 @@ export default class ApplicationAdapter extends RESTAdapter { let prefix = this.urlPrefix(); if (modelName) { + /* eslint-disable-next-line ember/no-string-prototype-extensions */ path = modelName.camelize(); if (path) { url.push(path); diff --git a/ui/app/components/job-page/parts/job-client-status-summary.js b/ui/app/components/job-page/parts/job-client-status-summary.js index <HASH>..<HASH> 100644 --- a/ui/app/components/job-page/parts/job-client-status-summary.js +++ b/ui/app/components/job-page/parts/job-client-status-summary.js @@ -24,6 +24,7 @@ export default class JobClientStatusSummary extends Component { @action onSliceClick(ev, slice) { + /* eslint-disable-next-line ember/no-string-prototype-extensions */ this.gotoClients([slice.className.camelize()]); } diff --git a/ui/app/components/job-page/parts/summary.js b/ui/app/components/job-page/parts/summary.js index <HASH>..<HASH> 100644 --- a/ui/app/components/job-page/parts/summary.js +++ b/ui/app/components/job-page/parts/summary.js @@ -24,6 +24,7 @@ export default class Summary extends Component { @action onSliceClick(ev, slice) { + /* eslint-disable-next-line ember/no-string-prototype-extensions */ this.gotoAllocations([slice.label.camelize()]); } diff --git a/ui/app/helpers/css-class.js b/ui/app/helpers/css-class.js index <HASH>..<HASH> 100644 --- a/ui/app/helpers/css-class.js +++ b/ui/app/helpers/css-class.js @@ -9,6 +9,7 @@ import { helper } from '@ember/component/helper'; * Differs from dasherize by handling slashes. */ export function cssClass([updateType]) { + /* eslint-disable-next-line ember/no-string-prototype-extensions */ return updateType.replace(/\//g, '-').dasherize(); } diff --git a/ui/app/serializers/application.js b/ui/app/serializers/application.js index <HASH>..<HASH> 100644 --- a/ui/app/serializers/application.js +++ b/ui/app/serializers/application.js @@ -1,3 +1,4 @@ +/* eslint-disable ember/no-string-prototype-extensions */ import { copy } from 'ember-copy'; import { get } from '@ember/object'; import { makeArray } from '@ember/array'; diff --git a/ui/app/serializers/volume.js b/ui/app/serializers/volume.js index <HASH>..<HASH> 100644 --- a/ui/app/serializers/volume.js +++ b/ui/app/serializers/volume.js @@ -49,6 +49,7 @@ export default class VolumeSerializer extends ApplicationSerializer { keyForRelationship(attr, relationshipType) { //Embedded relationship attributes don't end in IDs + /* eslint-disable-next-line ember/no-string-prototype-extensions */ if (this.embeddedRelationships.includes(attr)) return attr.capitalize(); return super.keyForRelationship(attr, relationshipType); } diff --git a/ui/blueprints/story/index.js b/ui/blueprints/story/index.js index <HASH>..<HASH> 100644 --- a/ui/blueprints/story/index.js +++ b/ui/blueprints/story/index.js @@ -1,3 +1,4 @@ +/* eslint-disable ember/no-string-prototype-extensions */ const getPathOption = require('ember-cli-get-component-path-option'); const stringUtil = require('ember-cli-string-utils'); const path = require('path'); diff --git a/ui/tests/integration/components/agent-monitor-test.js b/ui/tests/integration/components/agent-monitor-test.js index <HASH>..<HASH> 100644 --- a/ui/tests/integration/components/agent-monitor-test.js +++ b/ui/tests/integration/components/agent-monitor-test.js @@ -1,3 +1,4 @@ +/* eslint-disable ember/no-string-prototype-extensions */ import { run } from '@ember/runloop'; import { module, test } from 'qunit'; import { setupRenderingTest } from 'ember-qunit';
ui: disable no-string-protype-extensions where its currently in use
hashicorp_nomad
train
b25ffb672dd405d543b3d7761cdfd11f71e17fe0
diff --git a/tests/arg.test.js b/tests/arg.test.js index <HASH>..<HASH> 100644 --- a/tests/arg.test.js +++ b/tests/arg.test.js @@ -11,7 +11,7 @@ describe('arg.js', function () { it('should construct an Arg', function () { - var node = Arg('foo', null, 'child1', 'child2'); + var node = Arg('foo', null); expect(node).to.be.ok; expect(node.name).to.equal('foo'); @@ -20,6 +20,12 @@ expect(node._type).to.equal('arg'); }); + it('should throw an error if has children', function () { + var anError = /children/i; + + expect(Arg.bind(null, 'foo', null, 'child')).to.throw(anError); + }); + }); })(); diff --git a/tests/flag.test.js b/tests/flag.test.js index <HASH>..<HASH> 100644 --- a/tests/flag.test.js +++ b/tests/flag.test.js @@ -11,7 +11,7 @@ describe('flag.js', function () { it('should construct a Flag', function () { - var node = Flag('foo', {alias: 'bar'}, 'child1', 'child2'); + var node = Flag('foo', {alias: 'bar'}); expect(node).to.be.ok; expect(node.name).to.equal('foo'); @@ -20,6 +20,12 @@ expect(node._type).to.equal('flag'); }); + it('should throw an error if has children', function () { + var anError = /children/i; + + expect(Flag.bind(null, 'foo', null, 'child')).to.throw(anError); + }); + }); })(); diff --git a/tests/kwarg.test.js b/tests/kwarg.test.js index <HASH>..<HASH> 100644 --- a/tests/kwarg.test.js +++ b/tests/kwarg.test.js @@ -11,7 +11,7 @@ describe('kwarg.js', function () { it('should construct a KWArg', function () { - var node = KWArg('foo', {alias: 'bar'}, 'child1', 'child2'); + var node = KWArg('foo', {alias: 'bar'}); expect(node).to.be.ok; expect(node.name).to.equal('foo'); @@ -20,6 +20,12 @@ expect(node._type).to.equal('kwarg'); }); + it('should throw an error if has children', function () { + var anError = /children/i; + + expect(KWArg.bind(null, 'foo', null, 'child')).to.throw(anError); + }); + }); })();
Update KWArg, Flag, and Arg tests
JakeSidSmith_jargs
train
d1af395718c92ae1390f893af44ea8a4b9d9384d
diff --git a/testrail/helper.py b/testrail/helper.py index <HASH>..<HASH> 100644 --- a/testrail/helper.py +++ b/testrail/helper.py @@ -30,6 +30,8 @@ def class_name(meth): def singleresult(func): def func_wrapper(*args, **kw): items = func(*args) + if hasattr(items, '__iter__'): + items = list(items) if len(items) > 1: raise TestRailError( 'identifier "%s" returned multiple results' % args[1])
Update singleresult to correctly handle iterators - closes #<I> - Updates singleresult function to detect when an interator has been passed in, and transform it into a list
travispavek_testrail-python
train
7c0da6574e7426307112bcd3d249afe3e7407128
diff --git a/lib/acts_as_paranoid/core.rb b/lib/acts_as_paranoid/core.rb index <HASH>..<HASH> 100644 --- a/lib/acts_as_paranoid/core.rb +++ b/lib/acts_as_paranoid/core.rb @@ -219,23 +219,6 @@ module ActsAsParanoid recover(options) end - def recover_dependent_associations(deleted_value, options) - self.class.dependent_associations.each do |reflection| - recover_dependent_association(reflection, deleted_value, options) - end - end - - def destroy_dependent_associations! - self.class.dependent_associations.each do |reflection| - assoc = association(reflection.name) - next unless (klass = assoc.klass).paranoid? - - klass - .only_deleted.merge(get_association_scope(assoc)) - .each(&:destroy!) - end - end - def deleted? return true if @destroyed @@ -258,6 +241,23 @@ module ActsAsParanoid private + def recover_dependent_associations(deleted_value, options) + self.class.dependent_associations.each do |reflection| + recover_dependent_association(reflection, deleted_value, options) + end + end + + def destroy_dependent_associations! + self.class.dependent_associations.each do |reflection| + assoc = association(reflection.name) + next unless (klass = assoc.klass).paranoid? + + klass + .only_deleted.merge(get_association_scope(assoc)) + .each(&:destroy!) + end + end + def recover_dependent_association(reflection, deleted_value, options) assoc = association(reflection.name) return unless (klass = assoc.klass).paranoid?
Make helper methods for dependent associations private These methods should only be used by ActsAsParanoid internally.
ActsAsParanoid_acts_as_paranoid
train
8f27c236aa58501b426ec1fd0306b1b38e92bde1
diff --git a/src/services/requested-fields-extractor.js b/src/services/requested-fields-extractor.js index <HASH>..<HASH> 100644 --- a/src/services/requested-fields-extractor.js +++ b/src/services/requested-fields-extractor.js @@ -27,6 +27,12 @@ function extractRequestedFieldsForAssociation(associationName, requestedFields, return requestedFields.map((fieldName) => `${associationName}.${fieldName}`); } +function extractRequestedSmartField(requestedFields, schema) { + const schemaSmartFields = schema.fields.filter(({ isVirtual }) => isVirtual); + return requestedFields[schema.name].split(',') + .filter((fieldName) => schemaSmartFields.some(({ field }) => field === fieldName)); +} + /** * @param {Record<string, string>} requestedFields * @param {*} modelOrAssociation @@ -58,9 +64,12 @@ function extractRequestedFields(requestedFields, modelOrAssociation, schemas) { const modelFields = requestedFields[modelOrAssociation.name].split(',') .filter((fieldName) => !requestedFields[fieldName]); + const smartFields = extractRequestedSmartField(requestedFields, schemas[modelOrAssociation.name]); + return Array.from(new Set([ ...primaryKeyArray, ...modelFields, + ...smartFields, ...allAssociationFields, ])); } diff --git a/test/services/requested-fields-extractor.test.js b/test/services/requested-fields-extractor.test.js index <HASH>..<HASH> 100644 --- a/test/services/requested-fields-extractor.test.js +++ b/test/services/requested-fields-extractor.test.js @@ -25,7 +25,17 @@ describe('services > requested-fields-extractor', () => { associations: {}, }; - const result = extractRequestedFields(fields, model); + const schemas = { + user: { + name: 'user', + fields: [{ + field: 'name', + isVirtual: false, + }], + }, + }; + + const result = extractRequestedFields(fields, model, schemas); expect(result).toStrictEqual(['id', 'name']); }); @@ -43,7 +53,17 @@ describe('services > requested-fields-extractor', () => { associations: {}, }; - const result = extractRequestedFields(fields, model); + const schemas = { + user: { + name: 'user', + fields: [{ + field: 'name', + isVirtual: false, + }], + }, + }; + + const result = extractRequestedFields(fields, model, schemas); expect(result).toStrictEqual(['id', 'name']); }); @@ -70,7 +90,15 @@ describe('services > requested-fields-extractor', () => { }; const schemas = { + user: { + name: 'user', + fields: [{ + field: 'name', + isVirtual: false, + }], + }, addresses: { + name: 'addresses', fields: [{ field: 'street', isVirtual: false, @@ -105,7 +133,15 @@ describe('services > requested-fields-extractor', () => { }; const schemas = { + user: { + name: 'user', + fields: [{ + field: 'name', + isVirtual: false, + }], + }, addresses: { + name: 'addresses', fields: [ { field: 'street', @@ -147,7 +183,15 @@ describe('services > requested-fields-extractor', () => { }; const schemas = { + user: { + name: 'user', + fields: [{ + field: 'name', + isVirtual: false, + }], + }, addresses: { + name: 'addresses', fields: [ { field: 'street', @@ -166,4 +210,38 @@ describe('services > requested-fields-extractor', () => { 'homeAddress', ]); }); + + it('should include requested smart field', () => { + expect.assertions(1); + + const fields = { + user: 'smartField', + }; + + const model = { + name: 'user', + primaryKeys: { id: null, uid: null }, + associations: {}, + }; + + const schemas = { + user: { + name: 'user', + fields: [{ + field: 'smartField', + isVirtual: true, + }, { + field: 'anotherSmartField', + isVirtual: true, + }], + }, + }; + + const result = extractRequestedFields(fields, model, schemas); + + expect(result).toStrictEqual([ + 'id', + 'smartField', + ]); + }); });
fix(smart-field): smart field should allow to request all fields (#<I>)
ForestAdmin_forest-express-sequelize
train
378a243ae4b9d5d7cb8f04522915458d14691e7f
diff --git a/pkg/project/registry/projectrequest/delegated/delegated.go b/pkg/project/registry/projectrequest/delegated/delegated.go index <HASH>..<HASH> 100644 --- a/pkg/project/registry/projectrequest/delegated/delegated.go +++ b/pkg/project/registry/projectrequest/delegated/delegated.go @@ -79,8 +79,8 @@ func (r *REST) NewList() runtime.Object { var _ = rest.Creater(&REST{}) var ( - forbiddenNames = []string{"openshift", "kubernetes", "kube"} - forbiddenPrefixes = []string{"openshift-", "kubernetes-", "kube-"} + ForbiddenNames = []string{"openshift", "kubernetes", "kube"} + ForbiddenPrefixes = []string{"openshift-", "kubernetes-", "kube-"} ) func (r *REST) Create(ctx apirequest.Context, obj runtime.Object, includeUninitialized bool) (runtime.Object, error) { @@ -90,12 +90,12 @@ func (r *REST) Create(ctx apirequest.Context, obj runtime.Object, includeUniniti } projectRequest := obj.(*projectapi.ProjectRequest) - for _, s := range forbiddenNames { + for _, s := range ForbiddenNames { if projectRequest.Name == s { return nil, kapierror.NewForbidden(projectapi.Resource("project"), projectRequest.Name, fmt.Errorf("cannot request a project with the name %q", s)) } } - for _, s := range forbiddenPrefixes { + for _, s := range ForbiddenPrefixes { if strings.HasPrefix(projectRequest.Name, s) { return nil, kapierror.NewForbidden(projectapi.Resource("project"), projectRequest.Name, fmt.Errorf("cannot request a project starting with %q", s)) } diff --git a/pkg/quota/admission/clusterresourceoverride/admission.go b/pkg/quota/admission/clusterresourceoverride/admission.go index <HASH>..<HASH> 100644 --- a/pkg/quota/admission/clusterresourceoverride/admission.go +++ b/pkg/quota/admission/clusterresourceoverride/admission.go @@ -3,6 +3,7 @@ package clusterresourceoverride import ( "fmt" "io" + "strings" "github.com/golang/glog" @@ -18,6 +19,7 @@ import ( oadmission "github.com/openshift/origin/pkg/cmd/server/admission" configlatest "github.com/openshift/origin/pkg/cmd/server/api/latest" "github.com/openshift/origin/pkg/project/cache" + "github.com/openshift/origin/pkg/project/registry/projectrequest/delegated" "github.com/openshift/origin/pkg/quota/admission/clusterresourceoverride/api" "github.com/openshift/origin/pkg/quota/admission/clusterresourceoverride/api/validation" ) @@ -145,6 +147,20 @@ func (a *clusterResourceOverridePlugin) Validate() error { return v.Validate() } +func isExemptedNamespace(name string) bool { + for _, s := range delegated.ForbiddenNames { + if name == s { + return true + } + } + for _, s := range delegated.ForbiddenPrefixes { + if strings.HasPrefix(name, s) { + return true + } + } + return false +} + // TODO this will need to update when we have pod requests/limits func (a *clusterResourceOverridePlugin) Admit(attr admission.Attributes) error { glog.V(6).Infof("%s admission controller is invoked", api.PluginName) @@ -158,15 +174,21 @@ func (a *clusterResourceOverridePlugin) Admit(attr admission.Attributes) error { glog.V(5).Infof("%s is looking at creating pod %s in project %s", api.PluginName, pod.Name, attr.GetNamespace()) // allow annotations on project to override - if ns, err := a.ProjectCache.GetNamespace(attr.GetNamespace()); err != nil { + ns, err := a.ProjectCache.GetNamespace(attr.GetNamespace()) + if err != nil { glog.Warningf("%s got an error retrieving namespace: %v", api.PluginName, err) return admission.NewForbidden(attr, err) // this should not happen though - } else { - projectEnabledPlugin, exists := ns.Annotations[clusterResourceOverrideAnnotation] - if exists && projectEnabledPlugin != "true" { - glog.V(5).Infof("%s is disabled for project %s", api.PluginName, attr.GetNamespace()) - return nil // disabled for this project, do nothing - } + } + + projectEnabledPlugin, exists := ns.Annotations[clusterResourceOverrideAnnotation] + if exists && projectEnabledPlugin != "true" { + glog.V(5).Infof("%s is disabled for project %s", api.PluginName, attr.GetNamespace()) + return nil // disabled for this project, do nothing + } + + if isExemptedNamespace(ns.Name) { + glog.V(5).Infof("%s is skipping exempted project %s", api.PluginName, attr.GetNamespace()) + return nil // project is exempted, do nothing } // Reuse LimitRanger logic to apply limit/req defaults from the project. Ignore validation
clusterresourceoverride: exempt known infra projects from enforcement
openshift_origin
train
fad84870ebfd32979f136c07db1c39fd46100306
diff --git a/lib/hosts.py b/lib/hosts.py index <HASH>..<HASH> 100755 --- a/lib/hosts.py +++ b/lib/hosts.py @@ -209,7 +209,8 @@ class Host(object): db = self.connection.admin logger.debug("add admin user {login}/{password}".format(login=self.login, password=self.password)) db.add_user(self.login, self.password, - roles=['clusterAdmin', + roles=['__system', + 'clusterAdmin', 'dbAdminAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase']) diff --git a/lib/rs.py b/lib/rs.py index <HASH>..<HASH> 100755 --- a/lib/rs.py +++ b/lib/rs.py @@ -54,7 +54,8 @@ class ReplicaSet(object): try: c = self.connection() c.admin.add_user(self.login, self.password, - roles=['clusterAdmin', + roles=['__system', + 'clusterAdmin', 'dbAdminAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase']) diff --git a/lib/shards.py b/lib/shards.py index <HASH>..<HASH> 100755 --- a/lib/shards.py +++ b/lib/shards.py @@ -57,7 +57,8 @@ class Shard(object): except OperationFailure: client = MongoClient(self.router['hostname'], **self.kwargs) client.admin.add_user(self.login, self.password, - roles=['clusterAdmin', + roles=['__system', + 'clusterAdmin', 'dbAdminAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase'])
added in the __system role for when auth is getting setup.
10gen_mongo-orchestration
train
f8d30c1b1e196addf1ccbee522fc3afaffd04df6
diff --git a/test/e2e/kubectl.go b/test/e2e/kubectl.go index <HASH>..<HASH> 100644 --- a/test/e2e/kubectl.go +++ b/test/e2e/kubectl.go @@ -238,6 +238,51 @@ var _ = Describe("Kubectl client", func() { }) }) + Describe("Kubectl logs", func() { + It("should find a string in pod logs", func() { + mkpath := func(file string) string { + return filepath.Join(testContext.RepoRoot, "examples/guestbook-go", file) + } + controllerJson := mkpath("redis-master-controller.json") + nsFlag := fmt.Sprintf("--namespace=%v", ns) + By("creating Redis RC") + runKubectl("create", "-f", controllerJson, nsFlag) + By("checking logs") + forEachPod(c, ns, "app", "redis", func(pod api.Pod) { + _, err := lookForStringInLog(ns, pod.Name, "redis-master", "The server is now ready to accept connections", podStartTimeout) + Expect(err).NotTo(HaveOccurred()) + }) + }) + }) + + Describe("Kubectl patch", func() { + It("should add annotations for pods in rc", func() { + mkpath := func(file string) string { + return filepath.Join(testContext.RepoRoot, "examples/guestbook-go", file) + } + controllerJson := mkpath("redis-master-controller.json") + nsFlag := fmt.Sprintf("--namespace=%v", ns) + By("creating Redis RC") + runKubectl("create", "-f", controllerJson, nsFlag) + By("patching all pods") + forEachPod(c, ns, "app", "redis", func(pod api.Pod) { + runKubectl("patch", "pod", pod.Name, nsFlag, "-p", "{\"metadata\":{\"annotations\":{\"x\":\"y\"}}}") + }) + + By("checking annotations") + forEachPod(c, ns, "app", "redis", func(pod api.Pod) { + found := false + for key, val := range pod.Annotations { + if key == "x" && val == "y" { + found = true + } + } + if !found { + Failf("Added annation not found") + } + }) + }) + }) }) func curl(addr string) (string, error) {
E2E tests for kubectl logs and patch
kubernetes_kubernetes
train
ee45b35d835eb0e92ea663cb77b5f41b2b5582f2
diff --git a/integration/sawtooth_integration/tests/test_intkey_smoke.py b/integration/sawtooth_integration/tests/test_intkey_smoke.py index <HASH>..<HASH> 100644 --- a/integration/sawtooth_integration/tests/test_intkey_smoke.py +++ b/integration/sawtooth_integration/tests/test_intkey_smoke.py @@ -89,7 +89,10 @@ class TestIntkeySmoke(unittest.TestCase): batch = IntkeyMessageFactory().create_batch(batch) LOGGER.info('Posting batch') _post_batch(batch) - time.sleep(1) + + while (how_many_updates + 1) > _get_block_num(): + time.sleep(1) + self.verify_state_after_n_updates(how_many_updates) def verify_state_after_n_updates(self, num): @@ -134,6 +137,12 @@ def _get_state(): response = _query_rest_api('/state') return response['data'] + +def _get_block_num(): + response = _query_rest_api('/blocks?count=1') + return response['data'][0]['header']['block_num'] + + def _query_rest_api(suffix='', data=None, headers={}): url = 'http://rest_api:8080' + suffix request = urllib.request.Request(url, data, headers)
Wait for expected block num Intkey smoke occasionally fails tests due to block commits occasionally taking longer than 1 second. Wait for the expected block number before verifying the state to mitigate this issue.
hyperledger_sawtooth-core
train
833b1ac56065dd077c5b7f5462d641afa4aa3e37
diff --git a/src/Laravel/Cashier/StripeGateway.php b/src/Laravel/Cashier/StripeGateway.php index <HASH>..<HASH> 100644 --- a/src/Laravel/Cashier/StripeGateway.php +++ b/src/Laravel/Cashier/StripeGateway.php @@ -118,8 +118,6 @@ class StripeGateway { 'quantity' => $this->quantity, 'trial_end' => $this->getTrialEndForUpdate(), ]; - if ($this->coupon) $payload['coupon'] = $this->coupon; - return $payload; }
Removing Coupon from Subscription Previously, the coupon was added to both the Customer AND Subscription objects. This caused the coupon to be redeemed twice, which presented problems for coupons with a limited redemption count. Now, the coupon is only added to the Customer object.
laravel_cashier
train
9ac8b98167fb40982a350be1c52deda0c166365b
diff --git a/atomic_reactor/plugins/post_pulp_pull.py b/atomic_reactor/plugins/post_pulp_pull.py index <HASH>..<HASH> 100644 --- a/atomic_reactor/plugins/post_pulp_pull.py +++ b/atomic_reactor/plugins/post_pulp_pull.py @@ -16,7 +16,7 @@ discover the image ID Docker will give it. from __future__ import unicode_literals from atomic_reactor.constants import PLUGIN_PULP_PUSH_KEY, PLUGIN_PULP_SYNC_KEY -from atomic_reactor.plugin import PostBuildPlugin +from atomic_reactor.plugin import PostBuildPlugin, ExitPlugin from atomic_reactor.plugins.exit_remove_built_image import defer_removal from atomic_reactor.util import get_manifest_digests from docker.errors import NotFound @@ -28,7 +28,11 @@ class CraneTimeoutError(Exception): pass -class PulpPullPlugin(PostBuildPlugin): +# Note: We use multiple inheritance here only to make it explicit that +# this plugin needs to act as both an exit plugin (since arrangement +# version 4) and as a post-build plugin (arrangement version < 4). In +# fact, ExitPlugin is a subclass of PostBuildPlugin. +class PulpPullPlugin(ExitPlugin, PostBuildPlugin): key = 'pulp_pull' is_allowed_to_fail = False diff --git a/tests/plugins/test_pulp_pull.py b/tests/plugins/test_pulp_pull.py index <HASH>..<HASH> 100644 --- a/tests/plugins/test_pulp_pull.py +++ b/tests/plugins/test_pulp_pull.py @@ -6,6 +6,7 @@ This software may be modified and distributed under the terms of the BSD license. See the LICENSE file for details. """ +from atomic_reactor.plugin import PostBuildPlugin, ExitPlugin from atomic_reactor.plugins.post_pulp_pull import (PulpPullPlugin, CraneTimeoutError) from atomic_reactor.inner import TagConf, PushConf @@ -336,3 +337,10 @@ class TestPostPulpPull(object): # Image ID is updated in workflow assert workflow.builder.image_id == test_id + + def test_plugin_type(self): + # arrangement versions < 4 + assert issubclass(PulpPullPlugin, PostBuildPlugin) + + # arrangement version >= 4 + assert issubclass(PulpPullPlugin, ExitPlugin)
Allow pulp_pull to act as an exit plugin This is required for arrangement version 4, in which pulp_pull runs as an exit plugin. It must run in that phase because it has to run after pulp_publish, also an exit plugin -- pulp_publish has work to do when a build is failing, i.e. remove v1 content. For arrangement versions earlier than 4, pulp_pull must still be able to run as a post-build plugin.
projectatomic_atomic-reactor
train
b9d71ed7b9ea1bcf3f9427d740f26f8b84207e34
diff --git a/core/common/src/main/java/alluxio/underfs/UnderFileSystemConfiguration.java b/core/common/src/main/java/alluxio/underfs/UnderFileSystemConfiguration.java index <HASH>..<HASH> 100644 --- a/core/common/src/main/java/alluxio/underfs/UnderFileSystemConfiguration.java +++ b/core/common/src/main/java/alluxio/underfs/UnderFileSystemConfiguration.java @@ -44,13 +44,6 @@ public final class UnderFileSystemConfiguration { } /** - * @return the map of user-customized configuration - */ - public Map<String, String> userSpecifiedConf() { - return Collections.unmodifiableMap(mUfsConf); - } - - /** * Gets the value of the given key in the given UFS configuration or the global configuration * (in case the key is not found in the UFS configuration), throw {@link RuntimeException} if the * key is not found in both configurations. @@ -69,31 +62,9 @@ public final class UnderFileSystemConfiguration { } /** - * Gets the value of the given key in the given UFS configuration or the global configuration - * (in case the key is not found in the UFS configuration), throw {@link RuntimeException} if the - * key is not found in both configurations. - * - * @param key property key - * @param ufsConf configuration for the UFS - * @return the value associated with the given key - */ - public static String getValue1(PropertyKey key, Map<String, String> ufsConf) { - if (ufsConf != null && ufsConf.containsKey(key.toString())) { - return ufsConf.get(key.toString()); - } - if (Configuration.containsKey(key)) { - return Configuration.get(key); - } - throw new RuntimeException("key " + key + " not found"); - } - - /** - * @param key property key - * @param ufsConf configuration for the UFS - * @return true if the key is contained in the given UFS configuration or global configuration + * @return the map of user-customized configuration */ - public static boolean containsKey1(PropertyKey key, Map<String, String> ufsConf) { - return (ufsConf != null && ufsConf.containsKey(key.toString())) || Configuration - .containsKey(key); + public Map<String, String> userSpecifiedConf() { + return Collections.unmodifiableMap(mUfsConf); } }
[ALLUXIO-<I>] Remove unused methods
Alluxio_alluxio
train
8db8d01712707bf88fa81572fca44dbb2e0ae3d5
diff --git a/rafthttp/util.go b/rafthttp/util.go index <HASH>..<HASH> 100644 --- a/rafthttp/util.go +++ b/rafthttp/util.go @@ -16,7 +16,6 @@ package rafthttp import ( "crypto/tls" - "encoding/binary" "fmt" "io" "net" @@ -27,7 +26,6 @@ import ( "github.com/coreos/etcd/pkg/transport" "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft/raftpb" "github.com/coreos/etcd/version" "github.com/coreos/go-semver/semver" ) @@ -61,31 +59,6 @@ func newStreamRoundTripper(tlsInfo transport.TLSInfo, dialTimeout time.Duration) return transport.NewTimeoutTransport(tlsInfo, dialTimeout, ConnReadTimeout, ConnWriteTimeout) } -func writeEntryTo(w io.Writer, ent *raftpb.Entry) error { - size := ent.Size() - if err := binary.Write(w, binary.BigEndian, uint64(size)); err != nil { - return err - } - b, err := ent.Marshal() - if err != nil { - return err - } - _, err = w.Write(b) - return err -} - -func readEntryFrom(r io.Reader, ent *raftpb.Entry) error { - var l uint64 - if err := binary.Read(r, binary.BigEndian, &l); err != nil { - return err - } - buf := make([]byte, int(l)) - if _, err := io.ReadFull(r, buf); err != nil { - return err - } - return ent.Unmarshal(buf) -} - // createPostRequest creates a HTTP POST request that sends raft message. func createPostRequest(u url.URL, path string, body io.Reader, ct string, urls types.URLs, from, cid types.ID) *http.Request { uu := u diff --git a/rafthttp/util_test.go b/rafthttp/util_test.go index <HASH>..<HASH> 100644 --- a/rafthttp/util_test.go +++ b/rafthttp/util_test.go @@ -16,6 +16,8 @@ package rafthttp import ( "bytes" + "encoding/binary" + "io" "net/http" "reflect" "testing" @@ -191,3 +193,28 @@ func TestCheckVersionCompatibility(t *testing.T) { } } } + +func writeEntryTo(w io.Writer, ent *raftpb.Entry) error { + size := ent.Size() + if err := binary.Write(w, binary.BigEndian, uint64(size)); err != nil { + return err + } + b, err := ent.Marshal() + if err != nil { + return err + } + _, err = w.Write(b) + return err +} + +func readEntryFrom(r io.Reader, ent *raftpb.Entry) error { + var l uint64 + if err := binary.Read(r, binary.BigEndian, &l); err != nil { + return err + } + buf := make([]byte, int(l)) + if _, err := io.ReadFull(r, buf); err != nil { + return err + } + return ent.Unmarshal(buf) +}
rafthttp: move test-only functions to '_test.go' Not used in actual code base, only used in tests
etcd-io_etcd
train
2e9a5977688b7aac64666dac78c6658cc7863174
diff --git a/pixiedust/display/templates/pd_executeDisplay.js b/pixiedust/display/templates/pd_executeDisplay.js index <HASH>..<HASH> 100644 --- a/pixiedust/display/templates/pd_executeDisplay.js +++ b/pixiedust/display/templates/pd_executeDisplay.js @@ -265,20 +265,16 @@ } } if (reply_callbacks && reply_callbacks.answer_input_reply){ - setTimeout(function(){ - var answer = reply_callbacks.answer_input_reply; - reply_callbacks.answer_input_reply = null; - send_input_reply(reply_callbacks, answer, pd_controls); - }, 0); + var answer = reply_callbacks.answer_input_reply; + reply_callbacks.answer_input_reply = null; + send_input_reply(reply_callbacks, answer, pd_controls); }else{ var next_input_reply = pixiedust.input_reply_queue.queue.shift(); if (next_input_reply && next_input_reply.command == "no_op_delay"){ next_input_reply = pixiedust.input_reply_queue.queue.shift(); } if (next_input_reply){ - setTimeout(function(){ - send_input_reply(next_input_reply.callbacks, next_input_reply.command, pd_controls); - }, 0); + send_input_reply(next_input_reply.callbacks, next_input_reply.command, pd_controls); }else{ pixiedust.input_reply_queue.inflight = null; } @@ -434,6 +430,12 @@ if (user_controls.send_input_reply){ command = user_controls.send_input_reply; callbacks.answer_input_reply = user_controls.answer_input_reply; + {#remove any no_op command to avoid hang#} + pixiedust.input_reply_queue.queue = pixiedust.input_reply_queue.queue.filter( + function(item){ + return item.command!='no_op'; + } + ); if (pixiedust.input_reply_queue.inflight || pixiedust.input_reply_queue.queue.length > 0 || command == "no_op_delay"){ pixiedust.input_reply_queue.queue.push({"command":command, "callbacks":callbacks}); }else{
Fixed race condition when user clicks too fast or network is too slow
pixiedust_pixiedust
train
850e54ca7b72c7d53e6c989e7ae41f8f8c706aab
diff --git a/go/pfconfigdriver/structs.go b/go/pfconfigdriver/structs.go index <HASH>..<HASH> 100644 --- a/go/pfconfigdriver/structs.go +++ b/go/pfconfigdriver/structs.go @@ -440,27 +440,28 @@ type ClusterSummary struct { type PfConfAdvanced struct { StructConfig - PfconfigMethod string `val:"hash_element"` - PfconfigNS string `val:"config::Pf"` - PfconfigHashNS string `val:"advanced"` - HashingCost string `val:"hashing_cost"` - ScanOnAccounting string `val:"scan_on_accounting"` - PffilterProcesses string `val:"pffilter_processes"` - UpdateIplogWithAccounting string `val:"update_iplog_with_accounting"` - AdminCspSecurityHeaders string `val:"admin_csp_security_headers"` - Multihost string `val:"multihost"` - SsoOnAccessReevaluation string `val:"sso_on_access_reevaluation"` - VlanPoolTechnique string `val:"vlan_pool_technique"` - DisablePfDomainAuth string `val:"disable_pf_domain_auth"` - TimingStatsLevel string `val:"timing_stats_level"` - SsoOnDhcp string `val:"sso_on_dhcp"` - Language string `val:"language"` - StatsdListenPort string `val:"statsd_listen_port"` - SsoOnAccounting string `val:"sso_on_accounting"` - LocationlogCloseOnAccountingStop string `val:"locationlog_close_on_accounting_stop"` - PortalCspSecurityHeaders string `val:"portal_csp_security_headers"` - HashPasswords string `val:"hash_passwords"` - SourceToSendSmsWhenCreatingUsers string `val:"source_to_send_sms_when_creating_users"` - ActiveDirectoryOsJoinCheckBypass string `val:"active_directory_os_join_check_bypass"` - LdapAttributes string `val:"ldap_attributes"` + PfconfigMethod string `val:"hash_element"` + PfconfigNS string `val:"config::Pf"` + PfconfigHashNS string `val:"advanced"` + HashingCost string `json:"hashing_cost"` + ScanOnAccounting string `json:"scan_on_accounting"` + PffilterProcesses string `json:"pffilter_processes"` + UpdateIplogWithAccounting string `json:"update_iplog_with_accounting"` + AdminCspSecurityHeaders string `json:"admin_csp_security_headers"` + Multihost string `json:"multihost"` + SsoOnAccessReevaluation string `json:"sso_on_access_reevaluation"` + VlanPoolTechnique string `json:"vlan_pool_technique"` + DisablePfDomainAuth string `json:"disable_pf_domain_auth"` + TimingStatsLevel string `json:"timing_stats_level"` + SsoOnDhcp string `json:"sso_on_dhcp"` + Language string `json:"language"` + StatsdListenPort string `json:"statsd_listen_port"` + SsoOnAccounting string `json:"sso_on_accounting"` + LocationlogCloseOnAccountingStop string `json:"locationlog_close_on_accounting_stop"` + PortalCspSecurityHeaders string `json:"portal_csp_security_headers"` + HashPasswords string `json:"hash_passwords"` + SourceToSendSmsWhenCreatingUsers string `json:"source_to_send_sms_when_creating_users"` + ActiveDirectoryOsJoinCheckBypass string `json:"active_directory_os_join_check_bypass"` + PfperlApiTimeout string `json:"pfperl_api_timeout"` + LdapAttributes []string `json:"ldap_attributes"` }
Fixed PfConfAdvanced struct
inverse-inc_packetfence
train
a0720061c8d049276d19164e0cc087b358691733
diff --git a/ui/src/admin/containers/AdminChronografPage.js b/ui/src/admin/containers/AdminChronografPage.js index <HASH>..<HASH> 100644 --- a/ui/src/admin/containers/AdminChronografPage.js +++ b/ui/src/admin/containers/AdminChronografPage.js @@ -79,20 +79,10 @@ class AdminChronografPage extends Component { } } - handleBatchDeleteUsers = () => { - console.log('Delete Users') - } - - handleBatchRemoveOrgFromUsers = arg => { - console.log(arg) - } - - handleBatchAddOrgToUsers = arg => { - console.log(arg) - } - handleBatchChangeUsersRole = arg => { - console.log(arg) - } + handleBatchDeleteUsers = () => {} + handleBatchRemoveOrgFromUsers = () => {} + handleBatchAddOrgToUsers = () => {} + handleBatchChangeUsersRole = () => {} handleUpdateUserRole = () => (user, currentRole, newRole) => { console.log(user, currentRole, newRole)
Clear console logs on batch actions for now
influxdata_influxdb
train
93dc4bc8951f69a490f85658da77a7c43f0993ce
diff --git a/lib/commonAPI/coreapi/public/api/Rho.ORM.js b/lib/commonAPI/coreapi/public/api/Rho.ORM.js index <HASH>..<HASH> 100644 --- a/lib/commonAPI/coreapi/public/api/Rho.ORM.js +++ b/lib/commonAPI/coreapi/public/api/Rho.ORM.js @@ -453,8 +453,8 @@ if(what === undefined) what = "all"; var found = opalModel.$find(what,options); - if(JSON.stringify(found).length == 2) - return []; + //if(JSON.stringify(found).length == 2) + //return []; switch (what) { case 'all' : return wrapOpalObjects(found); case 'count': return found ;
(Ashikh Pai, <I>/<I>/<I>) As the piece of code which I have commented was restricting the count value to 9 i.e, a single digit number only. So by commenting it now I'm able to read the count value greater 9 i.e, a two digit number can also be read.
rhomobile_rhodes
train
1f29626bba14f6b4e21971377c0f28e95e8687a3
diff --git a/website/source/javascripts/app/Engine.Point.js b/website/source/javascripts/app/Engine.Point.js index <HASH>..<HASH> 100644 --- a/website/source/javascripts/app/Engine.Point.js +++ b/website/source/javascripts/app/Engine.Point.js @@ -53,6 +53,11 @@ Engine.Point.prototype = { y: 0 }, + resize: function(){ + this.target.x = this.pos.x = this.ref.x * this.shapeSize.x; + this.target.y = this.pos.y = this.ref.y * this.shapeSize.y; + }, + updateBreathingPhysics: function(){ this.stiffness = 0.1; this.friction = 0.05; diff --git a/website/source/javascripts/app/Engine.Shape.js b/website/source/javascripts/app/Engine.Shape.js index <HASH>..<HASH> 100644 --- a/website/source/javascripts/app/Engine.Shape.js +++ b/website/source/javascripts/app/Engine.Shape.js @@ -47,6 +47,20 @@ Engine.Shape.prototype = { breathLength: 1, breatheIn: false, + resize: function(newSize, offset){ + var len, p; + + this.size.x = newSize; + this.size.y = newSize; + + this.pos.x = -(newSize / 2); + this.pos.y = -(newSize / 2 + offset); + + for (p = 0, len = this.points.length; p < len; p++) { + this.points[p].resize(); + } + }, + startBreathing: function(){ var p; diff --git a/website/source/javascripts/app/Engine.js b/website/source/javascripts/app/Engine.js index <HASH>..<HASH> 100644 --- a/website/source/javascripts/app/Engine.js +++ b/website/source/javascripts/app/Engine.js @@ -115,12 +115,6 @@ Engine = Base.extend({ }, setupEvents: function(){ - if (window.innerWidth < 570) { - this.height = 560; - } else { - this.height = 700; - } - this.resize = this.resize.bind(this); this.resize(); window.addEventListener('resize', this.resize, false); @@ -290,7 +284,14 @@ Engine = Base.extend({ }, resize: function(){ - var scale = this.scale; + var scale = this.scale, + size, offset; + + if (window.innerWidth < 570) { + this.height = 560; + } else { + this.height = 700; + } this.width = window.innerWidth; @@ -306,6 +307,17 @@ Engine = Base.extend({ if (this.grid) { this.grid.resize(this.width, this.height); } + + if (this.logo) { + if (this.height === 560) { + size = 300; + offset = 0; + } else { + size = 360; + offset = 40; + } + this.logo.resize(size, offset); + } }, _handleMouseCoords: function(event){
Sometimes you like, fuggit, make it responsive So now the logo dynamically changes based on screensize...
hashicorp_terraform
train
3721a0905c97e2ecba9566826876026e245e8f3c
diff --git a/maven/io.sarl.maven.docs.generator/src/main/java/io/sarl/maven/docs/generator/SARLDocGenerate.java b/maven/io.sarl.maven.docs.generator/src/main/java/io/sarl/maven/docs/generator/SARLDocGenerate.java index <HASH>..<HASH> 100644 --- a/maven/io.sarl.maven.docs.generator/src/main/java/io/sarl/maven/docs/generator/SARLDocGenerate.java +++ b/maven/io.sarl.maven.docs.generator/src/main/java/io/sarl/maven/docs/generator/SARLDocGenerate.java @@ -78,7 +78,7 @@ public class SARLDocGenerate extends XtendTestCompile { * @parameter default-value="${basedir}/target/jnario-doc" * @required */ - private String docOutputDirectory; + protected String docOutputDirectory; /** * Location of the generated JUnit XML reports. @@ -86,50 +86,54 @@ public class SARLDocGenerate extends XtendTestCompile { * @parameter default-value="${basedir}/target/surefire-reports" * @required */ - private String reportsDirectory; + protected String reportsDirectory; /** * Location of the generated JUnit XML reports. * * @parameter */ - private String sourceDirectory; + protected String sourceDirectory; /** * Location of the generated JUnit XML reports. * * @parameter default-value="true" */ - private boolean sectionNumbering; + protected boolean sectionNumbering; /** * The project itself. This parameter is set by maven. * - * @parameter project="${project}" + * @parameter property="project" * @required */ - private MavenProject hiddenProjectReference; + @SuppressWarnings("hiding") + protected MavenProject project; /** * Set this to true to skip compiling Xtend sources. * - * @parameter default-value="false" expression="${skipXtend}" + * @parameter default-value="false" property="skipXtend" */ - private boolean hiddenSkipXtendFlag; + @SuppressWarnings("hiding") + protected boolean skipXtend; /** * Xtend-File encoding argument for the compiler. * - * @parameter expression="${encoding}" default-value="${project.build.sourceEncoding}" + * @parameter property="encoding" default-value="${project.build.sourceEncoding}" */ - private String hiddenEncodingSpecification; + @SuppressWarnings("hiding") + protected String encoding; /** * Set this to false to suppress the creation of *._trace files. * - * @parameter default-value="true" expression="${writeTraceFiles}" + * @parameter default-value="true" property="writeTraceFiles" */ - private boolean hiddenWriteTraceFilesFlag; + @SuppressWarnings("hiding") + protected boolean writeTraceFiles; @Inject private RuntimeWorkspaceConfigProvider workspaceConfigProvider; @@ -139,10 +143,10 @@ public class SARLDocGenerate extends XtendTestCompile { @Override protected void internalExecute() throws MojoExecutionException { // Only for injecting the attributes from Maven. - this.project = this.hiddenProjectReference; - this.skipXtend = this.hiddenSkipXtendFlag; - this.encoding = this.hiddenEncodingSpecification; - this.writeTraceFiles = this.hiddenWriteTraceFilesFlag; + super.project = this.project; + super.skipXtend = this.skipXtend; + super.encoding = this.encoding; + super.writeTraceFiles = this.writeTraceFiles; getLog().debug("Set section numbering: " + Boolean.toString(this.sectionNumbering)); //$NON-NLS-1$ MavenConfig.setSectionNumbering(this.sectionNumbering);
[maven] Fixing deprecated property declarations.
sarl_sarl
train
44a698be0b32b94b67d5c18e114b4c1d04ffdbc3
diff --git a/lib/neovim/client.rb b/lib/neovim/client.rb index <HASH>..<HASH> 100644 --- a/lib/neovim/client.rb +++ b/lib/neovim/client.rb @@ -55,6 +55,11 @@ module Neovim rpc_response(:vim_change_directory, dir) end + def current_buffer + index = rpc_response(:vim_get_current_buffer) + Buffer.new(index, self) + end + def current_line rpc_response(:vim_get_current_line) end diff --git a/spec/neovim/client_spec.rb b/spec/neovim/client_spec.rb index <HASH>..<HASH> 100644 --- a/spec/neovim/client_spec.rb +++ b/spec/neovim/client_spec.rb @@ -78,6 +78,13 @@ module Neovim end end + describe "#current_buffer" do + it "returns a buffer" do + buffer = client.current_buffer + expect(buffer).to be_a(Buffer) + end + end + describe "#current_line=" do it "sets the content of the current line" do client.current_line = "New content"
add #current_buffer to client
neovim_neovim-ruby
train
9802ed0cbb547ad8379d443585c37b5681e50a34
diff --git a/src/main/java/net/greghaines/jesque/Config.java b/src/main/java/net/greghaines/jesque/Config.java index <HASH>..<HASH> 100644 --- a/src/main/java/net/greghaines/jesque/Config.java +++ b/src/main/java/net/greghaines/jesque/Config.java @@ -139,6 +139,21 @@ public class Config implements Serializable return this.jobPackage; } + /** + * @return the Redis protocol URI this Config will connect to + */ + public String getURI() + { + return "redis://" + this.host + ":" + this.port + "/" + this.database; + } + + @Override + public String toString() + { + return "<" + getURI() + " namespace=" + this.namespace + + " jobPackage=" + this.jobPackage + " timeout=" + this.timeout + ">"; + } + @Override public int hashCode() {
Added a meaningful toString() to Config and also added Config.getURI().
gresrun_jesque
train
c454edd54615a0b6d17a29e7e27719aafcff2eae
diff --git a/wakatime/heartbeat.py b/wakatime/heartbeat.py index <HASH>..<HASH> 100644 --- a/wakatime/heartbeat.py +++ b/wakatime/heartbeat.py @@ -255,6 +255,9 @@ class Heartbeat(object): if self._file_exists(): return + if not self.entity: + return + self.args.local_file = self._to_unc_path(self.entity) def _to_unc_path(self, filepath):
fix: Don't try to map entity to unc path if entity is not set.
wakatime_wakatime
train
3e505ecf29a73e4c720b5e8e078d2f0938f0c47d
diff --git a/strftime.go b/strftime.go index <HASH>..<HASH> 100644 --- a/strftime.go +++ b/strftime.go @@ -49,31 +49,37 @@ var directives = map[byte]appender{ '%': verbatim("%"), } -func compile(wl *appenderList, p string) error { - var prev appender - var prevCanCombine bool - var appendw = func(w appender) { - if prevCanCombine { - if wc, ok := w.(combiner); ok && wc.canCombine() { - prev = prev.(combiner).combine(wc) - (*wl)[len(*wl)-1] = prev - return - } +type combiningAppend struct { + list appenderList + prev appender + prevCanCombine bool +} + +func (ca *combiningAppend) Append(w appender) { + if ca.prevCanCombine { + if wc, ok := w.(combiner); ok && wc.canCombine() { + ca.prev = ca.prev.(combiner).combine(wc) + ca.list[len(ca.list)-1] = ca.prev + return } + } - *wl = append(*wl, w) - prev = w - prevCanCombine = false - if comb, ok := w.(combiner); ok { - if comb.canCombine() { - prevCanCombine = true - } + ca.list = append(ca.list, w) + ca.prev = w + ca.prevCanCombine = false + if comb, ok := w.(combiner); ok { + if comb.canCombine() { + ca.prevCanCombine = true } } +} + +func compile(wl *appenderList, p string) error { + var ca combiningAppend for l := len(p); l > 0; l = len(p) { i := strings.IndexByte(p, '%') if i < 0 || i == l-1 { - appendw(verbatim(p)) + ca.Append(verbatim(p)) // this is silly, but I don't trust break keywords when there's a // possibility of this piece of code being rearranged p = p[l:] @@ -84,7 +90,7 @@ func compile(wl *appenderList, p string) error { // we already know that i < l - 1 // everything up to the i is verbatim if i > 0 { - appendw(verbatim(p[:i])) + ca.Append(verbatim(p[:i])) p = p[i:] } @@ -92,10 +98,12 @@ func compile(wl *appenderList, p string) error { if !ok { return errors.Errorf(`unknown time format specification '%c'`, p[1]) } - appendw(directive) + ca.Append(directive) p = p[2:] } + *wl = ca.list + return nil }
this doesn't really change anything, but we get to save on a closure
lestrrat-go_strftime
train
6336eb24745589c45487dbce59371e810cb80963
diff --git a/domain-management/src/test/java/org/jboss/as/domain/management/security/adduser/PropertyFileFinderPermissionsTestCase.java b/domain-management/src/test/java/org/jboss/as/domain/management/security/adduser/PropertyFileFinderPermissionsTestCase.java index <HASH>..<HASH> 100644 --- a/domain-management/src/test/java/org/jboss/as/domain/management/security/adduser/PropertyFileFinderPermissionsTestCase.java +++ b/domain-management/src/test/java/org/jboss/as/domain/management/security/adduser/PropertyFileFinderPermissionsTestCase.java @@ -69,6 +69,12 @@ public class PropertyFileFinderPermissionsTestCase extends PropertyTestHelper { return propertyUserFile; } + // As we're reusing StateValues across invocation of PropertyFileFinder execute() + // we need to reset permissions state prior to executing newxt test. + private void resetStateValuePermissionsFlags() { + values.setValidFilePermissions(true); + values.setFilePermissionsProblemPath(null); + } @Test public void testPropertyFileFinderFilePermissions() throws IOException { @@ -91,6 +97,8 @@ public class PropertyFileFinderPermissionsTestCase extends PropertyTestHelper { State nextState = propertyFileFinder.execute(); assertTrue(nextState instanceof ErrorState); standaloneDir.setReadable(true); + // Don't forget to reset permissions state + resetStateValuePermissionsFlags(); } // Test parent dir without execute @@ -98,6 +106,8 @@ public class PropertyFileFinderPermissionsTestCase extends PropertyTestHelper { State nextState = propertyFileFinder.execute(); assertTrue(nextState instanceof ErrorState); standaloneDir.setExecutable(true); + // Don't forget to reset permissions state + resetStateValuePermissionsFlags(); } // Test file without read @@ -105,6 +115,8 @@ public class PropertyFileFinderPermissionsTestCase extends PropertyTestHelper { State nextState = propertyFileFinder.execute(); assertTrue(nextState instanceof ErrorState); standaloneMgmtUserFile.setReadable(true); + // Don't forget to reset permissions state + resetStateValuePermissionsFlags(); } // Test file without write @@ -112,6 +124,8 @@ public class PropertyFileFinderPermissionsTestCase extends PropertyTestHelper { State nextState = propertyFileFinder.execute(); assertTrue(nextState instanceof ErrorState); standaloneMgmtUserFile.setWritable(true); + // Don't forget to reset permissions state + resetStateValuePermissionsFlags(); } }
WFCORE-<I> - Updated unit test case. As PropertyFileFinder.execute() is called multiple times in succession, need to reset the StateValues permissions flag between each invocation.
wildfly_wildfly-core
train
567df74e0b2f3c09a0e9a00afa36d41a40604558
diff --git a/lib/index.js b/lib/index.js index <HASH>..<HASH> 100644 --- a/lib/index.js +++ b/lib/index.js @@ -43,7 +43,7 @@ function spawn(command, args, options) { p.on('close', function (code) { if (code !== 0) { var commandStr = command + (args.length ? (' ' + args.join(' ')) : ''); -- deferred.reject({ + deferred.reject({ code: code, message: '"' + commandStr + '" failed with code ' + code });
Remove extraneous dash char
patrick-steele-idem_child-process-promise
train
6e87beff39bdba284893d99f3faedec054469bfa
diff --git a/lib/butterfli/caching.rb b/lib/butterfli/caching.rb index <HASH>..<HASH> 100644 --- a/lib/butterfli/caching.rb +++ b/lib/butterfli/caching.rb @@ -8,7 +8,12 @@ module Butterfli::Caching @cache = p end def cache - return @cache || (self.cache = ( Butterfli.configuration.cache && Butterfli.configuration.cache.instantiate)) + return @cache || (self.cache = default_cache) + end + + private + def default_cache + (Butterfli.configuration.cache && Butterfli.configuration.cache.instantiate) || Butterfli::Configuration::MemoryCache.new.instantiate end end end
Changed: Default to memory cache if no cache is configured
delner_butterfli
train
a57cd3878d80857ec73573941d90a70ad46de801
diff --git a/activerecord/lib/active_record/connection_adapters/abstract/schema_statements.rb b/activerecord/lib/active_record/connection_adapters/abstract/schema_statements.rb index <HASH>..<HASH> 100644 --- a/activerecord/lib/active_record/connection_adapters/abstract/schema_statements.rb +++ b/activerecord/lib/active_record/connection_adapters/abstract/schema_statements.rb @@ -1161,31 +1161,34 @@ module ActiveRecord end protected - def add_index_sort_order(option_strings, column_names, options = {}) - if options.is_a?(Hash) && order = options[:order] + + def add_index_sort_order(quoted_columns, **options) + if order = options[:order] case order when Hash - column_names.each {|name| option_strings[name] += " #{order[name].upcase}" if order.has_key?(name)} + quoted_columns.each { |name, column| column << " #{order[name].upcase}" if order[name].present? } when String - column_names.each {|name| option_strings[name] += " #{order.upcase}"} + quoted_columns.each { |name, column| column << " #{order.upcase}" if order.present? } end end - return option_strings + quoted_columns end # Overridden by the MySQL adapter for supporting index lengths - def quoted_columns_for_index(column_names, options = {}) - return [column_names] if column_names.is_a?(String) - - option_strings = Hash[column_names.map {|name| [name, ""]}] - - # add index sort order if supported + def add_options_for_index_columns(quoted_columns, **options) if supports_index_sort_order? - option_strings = add_index_sort_order(option_strings, column_names, options) + quoted_columns = add_index_sort_order(quoted_columns, options) end - column_names.map {|name| quote_column_name(name) + option_strings[name]} + quoted_columns + end + + def quoted_columns_for_index(column_names, **options) + return [column_names] if column_names.is_a?(String) + + quoted_columns = Hash[column_names.map { |name| [name, quote_column_name(name).dup] }] + add_options_for_index_columns(quoted_columns, options).values end def index_name_for_remove(table_name, options = {}) diff --git a/activerecord/lib/active_record/connection_adapters/abstract_mysql_adapter.rb b/activerecord/lib/active_record/connection_adapters/abstract_mysql_adapter.rb index <HASH>..<HASH> 100644 --- a/activerecord/lib/active_record/connection_adapters/abstract_mysql_adapter.rb +++ b/activerecord/lib/active_record/connection_adapters/abstract_mysql_adapter.rb @@ -713,32 +713,25 @@ module ActiveRecord MySQL::TypeMetadata.new(super(sql_type), extra: extra, strict: strict_mode?) end - def add_index_length(option_strings, column_names, options = {}) - if options.is_a?(Hash) && length = options[:length] + def add_index_length(quoted_columns, **options) + if length = options[:length] case length when Hash - column_names.each {|name| option_strings[name] += "(#{length[name]})" if length.has_key?(name) && length[name].present?} + quoted_columns.each { |name, column| column << "(#{length[name]})" if length[name].present? } when Integer - column_names.each {|name| option_strings[name] += "(#{length})"} + quoted_columns.each { |name, column| column << "(#{length})" } end end - return option_strings + quoted_columns end - def quoted_columns_for_index(column_names, options = {}) - option_strings = Hash[column_names.map {|name| [name, ""]}] - - # add index length - option_strings = add_index_length(option_strings, column_names, options) - - # add index sort order - option_strings = add_index_sort_order(option_strings, column_names, options) - - column_names.map {|name| quote_column_name(name) + option_strings[name]} + def add_options_for_index_columns(quoted_columns, **options) + quoted_columns = add_index_length(quoted_columns, options) + super end - # See https://dev.mysql.com/doc/refman/5.7/en/error-messages-server.html + # See https://dev.mysql.com/doc/refman/5.7/en/error-messages-server.html ER_DUP_ENTRY = 1062 ER_NO_REFERENCED_ROW_2 = 1452 ER_DATA_TOO_LONG = 1406
Refactor `quoted_columns_for_index` by extracted `add_options_for_index_columns`
rails_rails
train
838291cd14f0920cc42dc3a6f29112409314f0c4
diff --git a/session.go b/session.go index <HASH>..<HASH> 100644 --- a/session.go +++ b/session.go @@ -419,6 +419,10 @@ func (s *Session) maybeSendPacket() error { return s.sendPacket() } + if maxPacketSize == 0 { + return nil + } + if s.smallPacketDelayedOccurranceTime.IsZero() { s.smallPacketDelayedOccurranceTime = time.Now() }
don't set the small packets timer when no packet is ready for sending
lucas-clemente_quic-go
train
0e72ef1a105cfb295112df414f926957092869b4
diff --git a/spring-boot-integration-tests/spring-boot-gradle-tests/src/test/java/org/springframework/boot/starter/StarterDependenciesIntegrationTests.java b/spring-boot-integration-tests/spring-boot-gradle-tests/src/test/java/org/springframework/boot/starter/StarterDependenciesIntegrationTests.java index <HASH>..<HASH> 100644 --- a/spring-boot-integration-tests/spring-boot-gradle-tests/src/test/java/org/springframework/boot/starter/StarterDependenciesIntegrationTests.java +++ b/spring-boot-integration-tests/spring-boot-gradle-tests/src/test/java/org/springframework/boot/starter/StarterDependenciesIntegrationTests.java @@ -34,8 +34,6 @@ import org.junit.runners.Parameterized.Parameters; import org.springframework.boot.gradle.ProjectCreator; import org.springframework.boot.gradle.Versions; -import static org.junit.Assert.fail; - /** * Tests for the various starter projects to check that they don't pull in unwanted * transitive dependencies when used with Gradle @@ -109,11 +107,7 @@ public class StarterDependenciesIntegrationTests { project.newBuild().forTasks(task).withArguments(this.buildArguments).run(); } catch (BuildException ex) { - Throwable root = ex; - while (root.getCause() != null) { - root = root.getCause(); - } - fail(root.getMessage()); + throw new RuntimeException(ex); } }
Improve diagnostics for in StarterDependenciesIntegrationTests The root cause of the build failure something has a null message which means there's no information available about why the build failed. Instead of calling fail when a BuildException occurs, this commit wraps it in a RuntimeException and rethrows it. This should make the entire chain of exceptions available when a failure occurs.
spring-projects_spring-boot
train
37e1e2cbed184344ff42d9c0c6df470051fc7383
diff --git a/src/com/connectsdk/service/RokuService.java b/src/com/connectsdk/service/RokuService.java index <HASH>..<HASH> 100644 --- a/src/com/connectsdk/service/RokuService.java +++ b/src/com/connectsdk/service/RokuService.java @@ -928,6 +928,15 @@ public class RokuService extends DeviceService implements Launcher, MediaPlayer, // mServiceReachability.start(); connected = true; + + Util.runOnUI(new Runnable() { + + @Override + public void run() { + if (listener != null) + listener.onConnectionSuccess(RokuService.this); + } + }); } @Override
Fixed a problem with Roku service not firing a proper event.
ConnectSDK_Connect-SDK-Android
train
48bb7ce902f16670c350b217da1779d88325f796
diff --git a/roast/resource.go b/roast/resource.go index <HASH>..<HASH> 100644 --- a/roast/resource.go +++ b/roast/resource.go @@ -132,11 +132,15 @@ func AssignResource(model coal.Model, res *jsonapi.Resource) error { // handle to many if rel.ToMany { - ids := make([]coal.ID, 0, len(doc.Data.Many)) - for _, rel := range doc.Data.Many { - ids = append(ids, coal.MustFromHex(rel.ID)) + if len(doc.Data.Many) > 0 { + ids := make([]coal.ID, 0, len(doc.Data.Many)) + for _, rel := range doc.Data.Many { + ids = append(ids, coal.MustFromHex(rel.ID)) + } + stick.MustSet(model, rel.Name, ids) + } else { + stick.MustSet(model, rel.Name, []coal.ID(nil)) } - stick.MustSet(model, rel.Name, ids) } } diff --git a/roast/resource_test.go b/roast/resource_test.go index <HASH>..<HASH> 100644 --- a/roast/resource_test.go +++ b/roast/resource_test.go @@ -22,7 +22,7 @@ func TestConvertModel(t *testing.T) { ID: id.Hex(), Attributes: jsonapi.Map{ "string": "String", - "bool": false, + "bool": false, }, Relationships: map[string]*jsonapi.Document{ "one": { @@ -54,7 +54,7 @@ func TestConvertModel(t *testing.T) { ID: id.Hex(), Attributes: jsonapi.Map{ "string": "String", - "bool": false, + "bool": false, }, Relationships: map[string]*jsonapi.Document{ "one": { @@ -89,7 +89,7 @@ func TestConvertModel(t *testing.T) { ID: id.Hex(), Attributes: jsonapi.Map{ "string": "String", - "bool": false, + "bool": false, }, Relationships: map[string]*jsonapi.Document{ "one": { @@ -198,7 +198,7 @@ func TestAssignResource(t *testing.T) { }) assert.NoError(t, err) assert.Equal(t, fooModel{ - Many: []coal.ID{}, + Many: nil, }, foo) foo = fooModel{} diff --git a/roast/tester.go b/roast/tester.go index <HASH>..<HASH> 100644 --- a/roast/tester.go +++ b/roast/tester.go @@ -214,7 +214,7 @@ func (t *Tester) Create(tt *testing.T, model, response, result coal.Model) Resul result.GetBase().DocID = res.ID() rec := coal.GetMeta(model).Make() t.Tester.Fetch(rec, res.ID()) - assert.Equal(tt, result, res) + assert.Equal(tt, result, rec) } return Result{ diff --git a/roast/tester_test.go b/roast/tester_test.go index <HASH>..<HASH> 100644 --- a/roast/tester_test.go +++ b/roast/tester_test.go @@ -18,7 +18,7 @@ func TestTester(t *testing.T) { tt.List(t, &fooModel{}, nil) - post := &fooModel{String: "String", Many: []coal.ID{}} + post := &fooModel{String: "String"} post = tt.Create(t, post, post, post).Model.(*fooModel) tt.List(t, &fooModel{}, []coal.Model{
assign empty relationship always as nil
256dpi_fire
train
b964cdfe0f56d6c7d05af7acea05480148b67923
diff --git a/hotdoc/core/doc_repo.py b/hotdoc/core/doc_repo.py index <HASH>..<HASH> 100644 --- a/hotdoc/core/doc_repo.py +++ b/hotdoc/core/doc_repo.py @@ -53,6 +53,13 @@ Run hotdoc {subcommand} -h for more info """ +LANG_MAPPING = { + 'xml': 'markup', + 'html': 'markup', + 'py': 'python' +} + + class CoreExtension(BaseExtension): """ Banana banana @@ -71,7 +78,8 @@ class CoreExtension(BaseExtension): else: split = os.path.splitext(include_path) if len(split) == 2: - lang = split[1].strip('.') + ext = split[1].strip('.') + lang = LANG_MAPPING.get(ext) or ext with io.open(include_path, 'r', encoding='utf-8') as _: return _.read(), lang
doc_repo: add basic extension-based language mapping
hotdoc_hotdoc
train
5f466e98d86c970c2f6315039b2ae0893eb46a71
diff --git a/styx-scheduler-service/src/main/java/com/spotify/styx/docker/KubernetesDockerRunner.java b/styx-scheduler-service/src/main/java/com/spotify/styx/docker/KubernetesDockerRunner.java index <HASH>..<HASH> 100644 --- a/styx-scheduler-service/src/main/java/com/spotify/styx/docker/KubernetesDockerRunner.java +++ b/styx-scheduler-service/src/main/java/com/spotify/styx/docker/KubernetesDockerRunner.java @@ -205,7 +205,7 @@ class KubernetesDockerRunner implements DockerRunner { // Delete keys and secrets for all inactive service accounts and let them be recreated by future executions for (Secret secret : inactiveServiceAccountSecrets) { final String name = secret.getMetadata().getName(); - final String serviceAcountEmail = serviceAccountEmail(secret); + final String serviceAcount = secret.getMetadata().getAnnotations().get(STYX_WORKFLOW_SA_ID_ANNOTATION); try { final Map<String, String> annotations = secret.getMetadata().getAnnotations(); @@ -218,11 +218,11 @@ class KubernetesDockerRunner implements DockerRunner { LOG.info("[AUDIT] Deleting service account key: {}", p12KeyName); tryDeleteServiceAccountKey(p12KeyName); - LOG.info("[AUDIT] Deleting service account {} secret {}", serviceAcountEmail, name); + LOG.info("[AUDIT] Deleting service account {} secret {}", serviceAcount, name); client.secrets().delete(secret); } catch (IOException | KubernetesClientException e) { LOG.warn("[AUDIT] Failed to delete service account {} keys and/or secret {}", - serviceAcountEmail, name); + serviceAcount, name); } } } @@ -243,10 +243,6 @@ class KubernetesDockerRunner implements DockerRunner { } } - private String serviceAccountEmail(Secret secret) { - return secret.getMetadata().getAnnotations().get(STYX_WORKFLOW_SA_ID_ANNOTATION); - } - private void ensureSecrets(WorkflowInstance workflowInstance, RunSpec runSpec, String secretEpoch) throws IOException { ensureServiceAccountKeySecret(workflowInstance, runSpec, secretEpoch);
s/serviceAccountEmail/serviceAccount
spotify_styx
train
3f894c16d9c9e2807e7c5c5bfac6bd0a9d9a81cc
diff --git a/spambl.py b/spambl.py index <HASH>..<HASH> 100644 --- a/spambl.py +++ b/spambl.py @@ -44,6 +44,25 @@ class DNSBL(object): msg_template = 'Using a code value "{}" unsupported by DNSBL instance representing {}' raise UnknownCodeError(msg_template.format(code, self.identifier)), None, exc_info()[2] + def _get_host_and_query_prefix(self, host_collection): + ''' Get valid hosts and query prefixes from hosts given in host_collection + + :param host_collection: a container with valid host values + :returns: a tuple with original host and a value prepared for lookup + ''' + if self.lists_ips: + for ip in host_collection.ips: + ip = str(ip) + suffix = '.in-addr.arpa' if ip.version == 4 else '.ip6.arpa' + reverse = ip.replace(suffix, '') + + yield ip, reverse + + if self.lists_uris: + for hostname in host_collection.hostnames: + hostname = str(hostname) + yield hostname, hostname.rstrip('.') + if __name__ == '__main__': pass \ No newline at end of file
Add _get_host_and_query_prefix method to DNSBL class
piotr-rusin_spam-lists
train
bd1b289adb0a4571c87de40431eba6f872e026d4
diff --git a/cacheback/base.py b/cacheback/base.py index <HASH>..<HASH> 100644 --- a/cacheback/base.py +++ b/cacheback/base.py @@ -47,6 +47,7 @@ class JobBase(RenameMethodsBase): renamed_methods = ( ('get_constructor_args', 'get_init_args', RemovedInCacheback13Warning), ('get_constructor_kwargs', 'get_init_kwargs', RemovedInCacheback13Warning), + ('cache_set', 'store', RemovedInCacheback13Warning), ) @@ -158,7 +159,7 @@ class Job(six.with_metaclass(JobBase)): # empty result which will be returned until the cache is # refreshed. result = self.empty() - self.cache_set(key, self.timeout(*args, **kwargs), result) + self.store(key, self.timeout(*args, **kwargs), result) self.async_refresh(*args, **kwargs) return self.process_result( result, call=call, cache_status=self.MISS, @@ -192,7 +193,7 @@ class Job(six.with_metaclass(JobBase)): # prevents cache hammering but guards against a 'limbo' situation # where the refresh task fails for some reason. timeout = self.timeout(*args, **kwargs) - self.cache_set(key, timeout, data) + self.store(key, timeout, data) self.async_refresh(*args, **kwargs) return self.process_result( data, call=call, cache_status=self.STALE, sync_fetch=False) @@ -211,7 +212,7 @@ class Job(six.with_metaclass(JobBase)): item = self.cache.get(key) if item is not None: expiry, data = item - self.cache_set(key, self.timeout(*args, **kwargs), data) + self.store(key, self.timeout(*args, **kwargs), data) self.async_refresh(*args, **kwargs) def delete(self, *raw_args, **raw_kwargs): @@ -235,7 +236,7 @@ class Job(six.with_metaclass(JobBase)): def prepare_kwargs(self, **kwargs): return kwargs - def cache_set(self, key, expiry, data): + def store(self, key, expiry, data): """ Add a result to the cache @@ -260,9 +261,7 @@ class Job(six.with_metaclass(JobBase)): Fetch the result SYNCHRONOUSLY and populate the cache """ result = self.fetch(*args, **kwargs) - self.cache_set(self.key(*args, **kwargs), - self.expiry(*args, **kwargs), - result) + self.store(self.key(*args, **kwargs), self.expiry(*args, **kwargs), result) return result def async_refresh(self, *args, **kwargs): diff --git a/tests/test_base_job.py b/tests/test_base_job.py index <HASH>..<HASH> 100644 --- a/tests/test_base_job.py +++ b/tests/test_base_job.py @@ -116,7 +116,7 @@ class TestJob: def test_store(self): job = DummyJob() - job.cache_set(job.key('foo'), job.expiry(), True) + job.store(job.key('foo'), job.expiry(), True) assert job.key('foo') in job.cache def test_store_verify_fail(self, settings): @@ -126,7 +126,7 @@ class TestJob: job = DummyJob() with pytest.raises(RuntimeError) as exc: - job.cache_set(job.key('foo'), job.expiry(), True) + job.store(job.key('foo'), job.expiry(), True) assert 'Unable to save' in str(exc.value) @@ -135,7 +135,7 @@ class TestJob: 'default': {'BACKEND': 'django.core.cache.backends.dummy.DummyCache'}} settings.CACHEBACK_VERIFY_CACHE_WRITE = False job = DummyJob() - job.cache_set(job.key('foo'), job.expiry(), True) + job.store(job.key('foo'), job.expiry(), True) assert job.key('foo') not in job.cache def test_refresh(self):
Rename cache_set to store to be more consistent in method naming.
codeinthehole_django-cacheback
train
c7e5c8de10a0bd396031df8c9328f3b44c793066
diff --git a/test/get-collection-spec.js b/test/get-collection-spec.js index <HASH>..<HASH> 100644 --- a/test/get-collection-spec.js +++ b/test/get-collection-spec.js @@ -11,7 +11,7 @@ describe('GET request for collection', function() { beforeEach(function() { fetchAllCalled = false; - var mockModel = getMockModel('foo'); + var mockModel = getMockModel('items'); this.k.expose(mockModel); mockResponse = new MockResponse(); spyOn(mockResponse, 'send'); @@ -23,7 +23,7 @@ describe('GET request for collection', function() { }); it('should respond with the result of the fetchAll promise', function() { - expect(mockResponse.send.calls.argsFor(0)[0]).toEqual('foo'); + expect(mockResponse.send.calls.argsFor(0)[0]).toEqual('items'); }); }); @@ -95,7 +95,7 @@ describe('GET request for collection', function() { beforeEach(function() { fetchAllCalled = false; - var mockModel = getMockModel('foo'); + var mockModel = getMockModel('items'); hooks = { before: function() { }, after: function() { }, @@ -125,7 +125,7 @@ describe('GET request for collection', function() { it('should call the after hook with the correct arguments', function() { expect(hooks.after.calls.argsFor(0)) - .toEqual(['foo', mockRequest, mockResponse]); + .toEqual(['items', mockRequest, mockResponse]); }); it('should call the beforeGetCollection hook with the correct arguments', @@ -137,7 +137,7 @@ describe('GET request for collection', function() { it('should call the afterGetCollection hook with the correct arguments', function() { expect(hooks.afterGetCollection.calls.argsFor(0)) - .toEqual(['foo', mockRequest, mockResponse]); + .toEqual(['items', mockRequest, mockResponse]); }); it('should call fetchAll', function() {
Changed 'foo' to 'items'
mikec_kalamata
train
67bc17b8237a34eaf5e8ee1d28ea6d78497ddcd9
diff --git a/html5lib/tests/test_tokenizer.py b/html5lib/tests/test_tokenizer.py index <HASH>..<HASH> 100644 --- a/html5lib/tests/test_tokenizer.py +++ b/html5lib/tests/test_tokenizer.py @@ -148,9 +148,6 @@ def runTokenizerTest(test): expected = concatenateCharacterTokens(test['output']) if 'lastStartTag' not in test: test['lastStartTag'] = None - outBuffer = cStringIO.StringIO() - stdout = sys.stdout - sys.stdout = outBuffer parser = TokenizerTestParser(test['initialState'], test['lastStartTag']) tokens = parser.parse(test['input'])
Remove debug omission of stdout from tokenzier tests.
html5lib_html5lib-python
train
fed34e0943a7c6bd0935209d772984e9195cdbe4
diff --git a/kbinxml/sixbit.py b/kbinxml/sixbit.py index <HASH>..<HASH> 100644 --- a/kbinxml/sixbit.py +++ b/kbinxml/sixbit.py @@ -1,5 +1,21 @@ # python 3 style, ints instead of b'' -from builtins import bytes +from builtins import bytes as newbytes + +def py2_int_to_bytes(n, length): + h = '%x' % n + s = ('0'*(len(h) % 2) + h).zfill(length*2).decode('hex') + return newbytes(s) + +try: + # python 3 + int.from_bytes + int_from_bytes = lambda b : int.from_bytes(b, byteorder='big') + int_to_bytes = lambda i, length : i.to_bytes(length, byteorder='big') +except AttributeError: + # python 2 + int_from_bytes = lambda b : int(bytes(b).encode('hex'), 16) + int_to_bytes = py2_int_to_bytes + charmap = '0123456789:ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz' bytemap = {charmap[i] : i for i in range(len(charmap))} @@ -14,7 +30,7 @@ def pack_sixbit(string, byteBuf): bits <<= 6 bits |= c bits <<= padding - data = bytes(bits.to_bytes((len(string)*6 + padding) // 8, byteorder='big')) + data = int_to_bytes(bits, (len(string)*6 + padding) // 8) byteBuf.append_bytes((len(string),)) byteBuf.append_bytes(data) @@ -25,7 +41,7 @@ def unpack_sixbit(byteBuf): padding = 8 - (length_bits % 8) if padding == 8: padding = 0 - bits = int.from_bytes(bytes(byteBuf.get_bytes(length_bytes)), byteorder='big') + bits = int_from_bytes(byteBuf.get_bytes(length_bytes)) bits >>= padding result = [] for _ in range(length):
Fix python 2 (again)
mon_kbinxml
train
602cf46c10947a649dfb2e09eb4289f5593a16e4
diff --git a/lib/OpenLayers/Control/SelectFeature.js b/lib/OpenLayers/Control/SelectFeature.js index <HASH>..<HASH> 100644 --- a/lib/OpenLayers/Control/SelectFeature.js +++ b/lib/OpenLayers/Control/SelectFeature.js @@ -586,7 +586,7 @@ OpenLayers.Control.SelectFeature = OpenLayers.Class(OpenLayers.Control, { }, /** - * Method: setLayer + * APIMethod: setLayer * Attach a new layer to the control, overriding any existing layers. * * Parameters:
Marking setLayer on SelectFeature Control as API, non-functional change git-svn-id: <URL>
openlayers_openlayers
train
871ff0d14ccc03edbc6a5697a9e3a744e37b3ec0
diff --git a/src/includes/lib/tabs.php b/src/includes/lib/tabs.php index <HASH>..<HASH> 100644 --- a/src/includes/lib/tabs.php +++ b/src/includes/lib/tabs.php @@ -22,7 +22,7 @@ function papi_get_tab_options( $options ) { if ( ! is_array( $options ) ) { if ( is_object( $options ) ) { - $options = (array)$options; + $options = (array) $options; } else { return null; } @@ -53,7 +53,7 @@ function papi_setup_tabs( $tabs ) { $_tabs = array(); foreach ( $tabs as $tab ) { - $tab = (object)$tab; + $tab = (object) $tab; if ( ! isset( $tab->options ) ) { continue; @@ -69,7 +69,8 @@ function papi_setup_tabs( $tabs ) { $tabs = papi_sort_order( $_tabs ); // Generate unique names for all tabs. - for ( $i = 0; $i < count( $tabs ); $i ++ ) { + $len = count( $tabs ); + for ( $i = 0; $i < $len; $i ++ ) { if ( empty( $tabs[$i] ) ) { continue; @@ -97,7 +98,7 @@ function papi_tab( $file_or_options, $properties = array() ) { // The tab key is important, it's says that we should render a tab meta box. // This may change in later version of Papi. - return (object)array( + return (object) array( 'options' => $options, 'properties' => $properties, 'tab' => true
Added len var in tabs for. Fixed space between casting
wp-papi_papi
train
192d6a46f921c4757476ab5989a116c6b7868da5
diff --git a/nodeup/pkg/model/kube_apiserver.go b/nodeup/pkg/model/kube_apiserver.go index <HASH>..<HASH> 100644 --- a/nodeup/pkg/model/kube_apiserver.go +++ b/nodeup/pkg/model/kube_apiserver.go @@ -71,8 +71,8 @@ func (b *KubeAPIServerBuilder) Build(c *fi.ModelBuilderContext) error { } key := "encryptionconfig" - encryptioncfg, _ := b.SecretStore.Secret(key) - if encryptioncfg != nil { + encryptioncfg, err := b.SecretStore.Secret(key) + if err == nil { contents := string(encryptioncfg.Data) t := &nodetasks.File{ Path: *encryptionConfigPath, @@ -81,6 +81,8 @@ func (b *KubeAPIServerBuilder) Build(c *fi.ModelBuilderContext) error { Type: nodetasks.FileType_File, } c.AddTask(t) + } else { + return fmt.Errorf("encryptionConfig enabled, but could not load encryptionconfig secret: %v", err) } } } diff --git a/upup/pkg/fi/cloudup/apply_cluster.go b/upup/pkg/fi/cloudup/apply_cluster.go index <HASH>..<HASH> 100644 --- a/upup/pkg/fi/cloudup/apply_cluster.go +++ b/upup/pkg/fi/cloudup/apply_cluster.go @@ -331,6 +331,19 @@ func (c *ApplyClusterCmd) Run(ctx context.Context) error { } } + if fi.BoolValue(c.Cluster.Spec.EncryptionConfig) { + secret, err := secretStore.FindSecret("encryptionconfig") + if err != nil { + return fmt.Errorf("could not load encryptionconfig secret: %v", err) + } + if secret == nil { + fmt.Println("") + fmt.Println("You have encryptionConfig enabled, but no encryptionconfig secret has been set.") + fmt.Println("See `kops create secret encryptionconfig -h` and https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/") + return fmt.Errorf("could not find encryptionconfig secret") + } + } + if err := c.addFileAssets(assetBuilder); err != nil { return err }
Errors when encryptionConfig is enabled, but no encryptionconfig secret When encryptionConfig is enabled, but the secret is missing, there is no visible errors anywhere. kube-apiserver just goes into a crashloop without any complains. This PR adds warnings both on the client side and through nodeup.
kubernetes_kops
train
f46d3badf6b74187dcb9200ea9ffa333183b848d
diff --git a/Neos.Eel/Classes/FlowQuery/Operations/AddOperation.php b/Neos.Eel/Classes/FlowQuery/Operations/AddOperation.php index <HASH>..<HASH> 100644 --- a/Neos.Eel/Classes/FlowQuery/Operations/AddOperation.php +++ b/Neos.Eel/Classes/FlowQuery/Operations/AddOperation.php @@ -15,7 +15,9 @@ use Neos\Eel\FlowQuery\FlowQuery; use Neos\Flow\Annotations as Flow; /** - * Add another $flowQuery object to the current one. + * Adds the given items to the current context. + * The operation accepts one argument that may be an Array, a FlowQuery + * or an Object. */ class AddOperation extends AbstractOperation {
DOCS: Mention the different supported argument types of the `add` operation in documentation. The operation accepts Arrays, Objects and FlowQueries. Previously only FlowQuery was mentioned.
neos_flow-development-collection
train
20bddde1995319d540122e3dd6d39ff5faf30883
diff --git a/lib/rom/repository.rb b/lib/rom/repository.rb index <HASH>..<HASH> 100644 --- a/lib/rom/repository.rb +++ b/lib/rom/repository.rb @@ -12,6 +12,18 @@ module ROM # Setup a repository # + # @overload setup(type, *args) + # Sets up a single-repository given a repository type. + # For custom repositories, create an instance and pass it directly. + # + # @param [Symbol] type + # @param [Array] *args + # + # @overload setup(repository) + # @param [Repository] repository + # + # @return [Repository] a specific repository subclass + # # @example # module SuperDB # class Repository < ROM::Repository @@ -25,6 +37,10 @@ module ROM # Repository.setup(:super_db, some: 'options') # # SuperDB::Repository.new(some: 'options') is called # + # # or alternatively + # super_db = Repository.setup(SuperDB::Repository.new(some: 'options')) + # Repository.setup(super_db) + # # @api public def self.setup(repository_or_scheme, *args) case repository_or_scheme
Improve doc for ROM::Repository.setup [ci skip]
rom-rb_rom
train
08e622e0c1b0683e2336ec32a018f8899870a9c5
diff --git a/app/src/analytics/__tests__/make-event.test.js b/app/src/analytics/__tests__/make-event.test.js index <HASH>..<HASH> 100644 --- a/app/src/analytics/__tests__/make-event.test.js +++ b/app/src/analytics/__tests__/make-event.test.js @@ -53,6 +53,22 @@ describe('analytics events map', () => { }) }) + test('robot:SESSION_RESPONSE error -> protocolUpload event', () => { + const state = {} + const success = {type: 'robot:SESSION_RESPONSE'} + const failure = {type: 'robot:SESSION_RESPONSE', error: new Error('AH')} + + expect(makeEvent(state, success)).toEqual({ + name: 'protocolUpload', + properties: {success: true, error: ''} + }) + + expect(makeEvent(state, failure)).toEqual({ + name: 'protocolUpload', + properties: {success: false, error: 'AH'} + }) + }) + test('robot:RUN -> runStart event', () => { const state = {} const action = {type: 'robot:RUN'} @@ -63,6 +79,59 @@ describe('analytics events map', () => { }) }) + test('robot:PAUSE_RESPONSE -> runPause event', () => { + const state = {} + const success = {type: 'robot:PAUSE_RESPONSE'} + const failure = {type: 'robot:PAUSE_RESPONSE', error: new Error('AH')} + + expect(makeEvent(state, success)).toEqual({ + name: 'runPause', + properties: { + success: true, + error: '' + } + }) + + expect(makeEvent(state, failure)).toEqual({ + name: 'runPause', + properties: { + success: false, + error: 'AH' + } + }) + }) + + test('robot:CANCEL_REPSONSE -> runCancel event', () => { + const state = { + robot: { + session: { + startTime: 1000, + runTime: 5000 + } + } + } + const success = {type: 'robot:CANCEL_RESPONSE'} + const failure = {type: 'robot:CANCEL_RESPONSE', error: new Error('AH')} + + expect(makeEvent(state, success)).toEqual({ + name: 'runCancel', + properties: { + runTime: 4, + success: true, + error: '' + } + }) + + expect(makeEvent(state, failure)).toEqual({ + name: 'runCancel', + properties: { + runTime: 4, + success: false, + error: 'AH' + } + }) + }) + test('robot:RUN_RESPONSE success -> runFinish event', () => { const state = { robot: { @@ -79,4 +148,14 @@ describe('analytics events map', () => { properties: {runTime: 4} }) }) + + test('robot:RUN_RESPONSE error -> runError event', () => { + const state = {} + const action = {type: 'robot:RUN_RESPONSE', error: new Error('AH')} + + expect(makeEvent(state, action)).toEqual({ + name: 'runError', + properties: {error: 'AH'} + }) + }) }) diff --git a/app/src/analytics/make-event.js b/app/src/analytics/make-event.js index <HASH>..<HASH> 100644 --- a/app/src/analytics/make-event.js +++ b/app/src/analytics/make-event.js @@ -35,6 +35,17 @@ export default function makeEvent (state: State, action: Action): ?Event { } } + // $FlowFixMe(ka, 2018-06-5): flow type robot:SESSION_RESPONSE + case 'robot:SESSION_RESPONSE': + // TODO (ka, 2018-6-6): add file open type 'button' | 'drag-n-drop' (work required in action meta) + return { + name: 'protocolUpload', + properties: { + success: !action.error, + error: (action.error && action.error.message) || '' + } + } + // $FlowFixMe(mc, 2018-05-28): flow type robot:RUN case 'robot:RUN': return {name: 'runStart', properties: {}} @@ -45,8 +56,33 @@ export default function makeEvent (state: State, action: Action): ?Event { const runTime = robotSelectors.getRunSeconds(state) return {name: 'runFinish', properties: {runTime}} } else { - // TODO(mc, 2018-05-28): runError event - return null + return { + name: 'runError', + properties: { + error: action.error.message + } + } + } + // $FlowFixMe(ka, 2018-06-5): flow type robot:PAUSE_RESPONSE + case 'robot:PAUSE_RESPONSE': + return { + name: 'runPause', + properties: { + success: !action.error, + error: (action.error && action.error.message) || '' + } + } + + // $FlowFixMe(ka, 2018-06-5): flow type robot:CANCEL + case 'robot:CANCEL_RESPONSE': + const runTime = robotSelectors.getRunSeconds(state) + return { + name: 'runCancel', + properties: { + runTime, + success: !action.error, + error: (action.error && action.error.message) || '' + } } }
fix(app): Add priority 2 analytics events (#<I>) closes #<I>
Opentrons_opentrons
train
3f8e8ec91db4694c56343c3fda93a9d70a384d14
diff --git a/test/tools/javac/lambda/TargetType53.java b/test/tools/javac/lambda/TargetType53.java index <HASH>..<HASH> 100644 --- a/test/tools/javac/lambda/TargetType53.java +++ b/test/tools/javac/lambda/TargetType53.java @@ -26,7 +26,6 @@ * @bug 8007464 * @summary Add graph inference support * smoke test for graph inference - * @ignore 8008682: Core stream API classes * @compile TargetType53.java */ import java.util.*; diff --git a/test/tools/javac/lambda/TargetType54.java b/test/tools/javac/lambda/TargetType54.java index <HASH>..<HASH> 100644 --- a/test/tools/javac/lambda/TargetType54.java +++ b/test/tools/javac/lambda/TargetType54.java @@ -26,7 +26,6 @@ * @bug 8007464 * @summary Add graph inference support * smoke test for graph inference - * @ignore 8008682: Core stream API classes * @compile TargetType54.java */ import java.util.stream.*; diff --git a/test/tools/javac/lambda/TargetType58.java b/test/tools/javac/lambda/TargetType58.java index <HASH>..<HASH> 100644 --- a/test/tools/javac/lambda/TargetType58.java +++ b/test/tools/javac/lambda/TargetType58.java @@ -26,7 +26,6 @@ * @bug 8007464 * @summary Add graph inference support * more smoke tests for graph inference - * @ignore 8008682: Core stream API classes * @compile TargetType58.java */ import java.util.*; diff --git a/test/tools/javac/lambda/TargetType59.java b/test/tools/javac/lambda/TargetType59.java index <HASH>..<HASH> 100644 --- a/test/tools/javac/lambda/TargetType59.java +++ b/test/tools/javac/lambda/TargetType59.java @@ -26,7 +26,6 @@ * @bug 8007464 * @summary Add graph inference support * more smoke tests for graph inference - * @ignore 8008682: Core stream API classes * @compile TargetType59.java */ import java.util.*; diff --git a/test/tools/javac/lambda/TargetType62.java b/test/tools/javac/lambda/TargetType62.java index <HASH>..<HASH> 100644 --- a/test/tools/javac/lambda/TargetType62.java +++ b/test/tools/javac/lambda/TargetType62.java @@ -26,7 +26,6 @@ * @bug 8007464 * @summary Add graph inference support * check that new wildcards inference strategy doesn't run into 7190296 - * @ignore 8008682: Core stream API classes * @compile TargetType62.java */ import java.util.*;
<I>: Five lambda TargetType tests have @ignore Summary: Remove @ignore flags from tests that now pass Reviewed-by: jjg
wmdietl_jsr308-langtools
train
993e60bfc8afcab95c4f085b6f004b0425f7ab51
diff --git a/test/mobilize-base_test.rb b/test/mobilize-base_test.rb index <HASH>..<HASH> 100644 --- a/test/mobilize-base_test.rb +++ b/test/mobilize-base_test.rb @@ -59,8 +59,6 @@ describe "Mobilize" do assert test_destination_sheet.to_tsv == test_source_sheet.to_tsv - puts "stop test redis" - Mobilize::Jobtracker.stop_test_redis end end
removed redis drop at the end for chained testing
DeNA_mobilize-base
train
e980866d1abb811a1e4d75e53cb26f97a23ad0d5
diff --git a/dense_inference.py b/dense_inference.py index <HASH>..<HASH> 100644 --- a/dense_inference.py +++ b/dense_inference.py @@ -1,14 +1,18 @@ #!/usr/bin/python +""" +Usage: python dense_inference.py image annotations output + +Adapted from the original C++ example: densecrf/examples/dense_inference.cpp +http://www.philkr.net/home/densecrf Version 2.2 +""" + import numpy as np import cv2 import densecrf as dcrf from skimage.segmentation import relabel_sequential import sys -# Usage: -# python dense_inference.py image annotations output - img = cv2.imread(sys.argv[1], 1) labels = relabel_sequential(cv2.imread(sys.argv[2], 0))[0].flatten() output = sys.argv[3]
cleaner docstring and added reference to original C++ file
lucasb-eyer_pydensecrf
train
b372ac421e83c85b69869a15b101a3c2aa987813
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index <HASH>..<HASH> 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3142,16 +3142,7 @@ class Index(IndexOpsMixin, PandasObject): elif is_positional: indexer = key else: - try: - indexer = self.slice_indexer(start, stop, step, kind=kind) - except Exception: - if is_index_slice: - if self.is_integer(): - raise - else: - indexer = key - else: - raise + indexer = self.slice_indexer(start, stop, step, kind=kind) return indexer @@ -4676,11 +4667,11 @@ class Index(IndexOpsMixin, PandasObject): raise InvalidIndexError(key) else: raise e1 - except Exception: # pragma: no cover + except Exception: raise e1 except TypeError: - # python 3 - if is_scalar(key): # pragma: no cover + # e.g. "[False] is an invalid key" + if is_scalar(key): raise IndexError(key) raise InvalidIndexError(key) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index <HASH>..<HASH> 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -457,7 +457,11 @@ class PeriodIndex(DatetimeIndexOpsMixin, Int64Index, PeriodDelegateMixin): try: self.get_loc(key) return True - except Exception: + except (ValueError, TypeError, KeyError): + # TypeError can be reached if we pass a tuple that is not hashable + # ValueError can be reached if pass a 2-tuple and parse_time_string + # raises with the wrong number of return values + # TODO: the latter is a bug in parse_time_string return False @cache_readonly @@ -765,7 +769,9 @@ class PeriodIndex(DatetimeIndexOpsMixin, Int64Index, PeriodDelegateMixin): _, parsed, reso = parse_time_string(label, self.freq) bounds = self._parsed_string_to_bounds(reso, parsed) return bounds[0 if side == "left" else 1] - except Exception: + except ValueError: + # string cannot be parsed as datetime-like + # TODO: we need tests for this case raise KeyError(label) elif is_integer(label) or is_float(label): self._invalid_indexer("slice", label) diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index <HASH>..<HASH> 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -630,7 +630,8 @@ class TimedeltaIndex( if _is_convertible_to_td(item): try: item = Timedelta(item) - except Exception: + except ValueError: + # e.g. str that can't be parsed to timedelta pass elif is_scalar(item) and isna(item): # GH 18295
CLN: tighten exception catching in indexes (#<I>)
pandas-dev_pandas
train
9f0b827ab653383ea0aeb50e90534a1d551fcfb0
diff --git a/src/ButtonsServiceProvider.php b/src/ButtonsServiceProvider.php index <HASH>..<HASH> 100644 --- a/src/ButtonsServiceProvider.php +++ b/src/ButtonsServiceProvider.php @@ -18,9 +18,11 @@ class ButtonsServiceProvider extends ServiceProvider { $this->loadViewsFrom(__DIR__ . '/resources/views', 'datatables'); - $this->publishAssets(); + if ($this->app->runningInConsole()) { + $this->publishAssets(); - $this->registerCommands(); + $this->registerCommands(); + } } /**
Publishes and commands registration only in console application
yajra_laravel-datatables-buttons
train
6ce62d79dbb201e3d7c593efdc2977f25193c7d9
diff --git a/base/core/src/main/java/org/openscience/cdk/atomtype/CDKAtomTypeMatcher.java b/base/core/src/main/java/org/openscience/cdk/atomtype/CDKAtomTypeMatcher.java index <HASH>..<HASH> 100755 --- a/base/core/src/main/java/org/openscience/cdk/atomtype/CDKAtomTypeMatcher.java +++ b/base/core/src/main/java/org/openscience/cdk/atomtype/CDKAtomTypeMatcher.java @@ -410,7 +410,7 @@ public class CDKAtomTypeMatcher implements IAtomTypeMatcher { } else if (atom.getFormalCharge() == -1) { IBond.Order maxBondOrder = getMaximumBondOrder(connectedBonds); if (maxBondOrder == CDKConstants.BONDORDER_SINGLE && connectedBonds.size() <= 3) { - if (isRingAtom(atom, atomContainer) && bothNeighborsAreSp2(atom, atomContainer, connectedBonds)) { + if (bothNeighborsAreSp2(atom, atomContainer, connectedBonds) && isRingAtom(atom, atomContainer)) { IAtomType type = getAtomType("C.minus.planar"); if (isAcceptable(atom, atomContainer, type, connectedBonds)) return type; } @@ -614,7 +614,7 @@ public class CDKAtomTypeMatcher implements IAtomTypeMatcher { int connectedHeavyAtoms = connectedBonds.size() - explicitHydrogens; if (connectedHeavyAtoms == 2) { // a O.sp3 which is expected to take part in an aromatic system - if (isRingAtom(atom, atomContainer) && bothNeighborsAreSp2(atom, atomContainer, connectedBonds)) { + if (bothNeighborsAreSp2(atom, atomContainer, connectedBonds) && isRingAtom(atom, atomContainer)) { IAtomType type = getAtomType("O.planar3"); if (isAcceptable(atom, atomContainer, type, connectedBonds)) return type; } @@ -990,7 +990,11 @@ public class CDKAtomTypeMatcher implements IAtomTypeMatcher { } private boolean isRingAtom(IAtom atom, IAtomContainer atomContainer) { - RingSearch searcher = new RingSearch(atomContainer); + return isRingAtom(atom, atomContainer, null); + } + + private boolean isRingAtom(IAtom atom, IAtomContainer atomContainer, RingSearch searcher) { + if (searcher == null) searcher = new RingSearch(atomContainer); return searcher.cyclic(atom); } @@ -1215,7 +1219,7 @@ public class CDKAtomTypeMatcher implements IAtomTypeMatcher { if (isAcceptable(atom, atomContainer, type, connectedBonds)) return type; } } else if (neighborcount == 2) { - if (isRingAtom(atom, atomContainer) && bothNeighborsAreSp2(atom, atomContainer, connectedBonds)) { + if (bothNeighborsAreSp2(atom, atomContainer, connectedBonds) && isRingAtom(atom, atomContainer)) { if (countAttachedDoubleBonds(connectedBonds, atom) == 2) { IAtomType type = getAtomType("S.inyl.2"); if (isAcceptable(atom, atomContainer, type, connectedBonds)) return type;
Ring searching is relative expensive: postpone it as much as possible, and be ready for reusing the data
cdk_cdk
train
1ce3e6c1736a2416db6a3e2a8bb881e02794a826
diff --git a/GSSHPY/gsshapy/orm/prj.py b/GSSHPY/gsshapy/orm/prj.py index <HASH>..<HASH> 100644 --- a/GSSHPY/gsshapy/orm/prj.py +++ b/GSSHPY/gsshapy/orm/prj.py @@ -44,6 +44,7 @@ class ProjectFile(DeclarativeBase, GsshaPyFileObjectBase): # Value Columns name = Column(String, nullable=False) + mapType = Column(Integer, nullable=False) # Relationship Properties projectCards = relationship('ProjectCard', back_populates='projectFile') @@ -67,6 +68,8 @@ class ProjectFile(DeclarativeBase, GsshaPyFileObjectBase): linkNodeDatasets = relationship('LinkNodeDatasetFile', back_populates='projectFile') # File Properties + MAP_TYPES_SUPPORTED = (1,) + INPUT_FILES = {'#PROJECTION_FILE': {'filename': None, 'fileio': ProjectionFile}, # WMS 'MAPPING_TABLE': {'filename': None, 'fileio': MapTableFile}, # Mapping Table 'ST_MAPPING_TABLE': {'filename': None, 'fileio': None}, @@ -215,6 +218,11 @@ class ProjectFile(DeclarativeBase, GsshaPyFileObjectBase): # Associate ProjectCard with ProjectFile prjCard.projectFile = self + + # Extract MAP_TYPE card value for convenience working + # with output maps + if card['name'] == 'MAP_TYPE': + self.mapType = int(card['value']) # Assemble list of files for reading @@ -417,30 +425,36 @@ class ProjectFile(DeclarativeBase, GsshaPyFileObjectBase): ''' GSSHA Project Read Map Files from File Method ''' - for card, afile in fileDict.iteritems(): - filename = afile['filename'] - - if filename != None: - # Create GSSHAPY RasterMapFile object - self._readFile(fileIO=RasterMapFile, - filename=filename) + if self.mapType in self.MAP_TYPES_SUPPORTED: + for card, afile in fileDict.iteritems(): + filename = afile['filename'] + + if filename != None: + # Create GSSHAPY RasterMapFile object + self._readFile(fileIO=RasterMapFile, + filename=filename) + else: + print 'Error: Could not read map files. MAP_TYPE', self.mapType, 'not supported.' def _writeXputMaps(self, session, directory, fileDict, newName=None): ''' GSSHAPY Project Write Map Files to File Method ''' - for card, afile in fileDict.iteritems(): - if afile['filename'] != None: - - # Determine new filename - filename = self._replaceNewFilename(afile['filename'], newName) - - # Write map file - self._writeFile(fileIO=RasterMapFile, - session=session, - directory=directory, - filename=filename) - + if self.mapType in self.MAP_TYPES_SUPPORTED: + for card, afile in fileDict.iteritems(): + if afile['filename'] != None: + + # Determine new filename + filename = self._replaceNewFilename(afile['filename'], newName) + + # Write map file + self._writeFile(fileIO=RasterMapFile, + session=session, + directory=directory, + filename=filename) + else: + print 'Error: Could not write map files. MAP_TYPE', self.mapType, 'not supported.' + def _readFile(self, fileIO, filename): ''' Initiate File Read Method on Other Files diff --git a/GSSHPY/gsshapy/test/init_db.py b/GSSHPY/gsshapy/test/init_db.py index <HASH>..<HASH> 100644 --- a/GSSHPY/gsshapy/test/init_db.py +++ b/GSSHPY/gsshapy/test/init_db.py @@ -7,12 +7,13 @@ * License: BSD 2-Clause ******************************************************************************** ''' -import os - +import os, time from sqlalchemy import create_engine from gsshapy.orm import metadata from gsshapy import DBSession + + def delSQLiteDB(path): os.remove(path) print 'DB Deleted:', path @@ -22,8 +23,11 @@ def delSQLiteDB(path): # sqlalchemy_url = 'sqlite://' # SQLITE_MEMEORY # sqlalchemy_url = 'sqlite:///gsshapy_lite.db' # SQLITE_RELATIVE sqlalchemy_url = 'sqlite:////Users/swainn/testing/db/gsshapy_lite.db' # SQLITE_ABSOLUTE -delSQLiteDB('/Users/swainn/testing/db/gsshapy_lite.db') # DELETE_DB +start = time.time() +delSQLiteDB('/Users/swainn/testing/db/gsshapy_lite.db') # DELETE_DB +print time.time() - start +start2 = time.time() engine = create_engine(sqlalchemy_url) metadata.create_all(engine) @@ -31,4 +35,4 @@ metadata.create_all(engine) DBSession.configure(bind=engine) DBSession.commit() -print 'SUCCESS: Database Initialized' \ No newline at end of file +print 'SUCCESS: Database Initialized', time.time()-start2 \ No newline at end of file
Only MAP_TYPE 1 is allowed now.
CI-WATER_gsshapy
train
d2f32d897ea5ec50413a902bb99fe56390cacaa7
diff --git a/saltcloud/utils/nb_popen.py b/saltcloud/utils/nb_popen.py index <HASH>..<HASH> 100644 --- a/saltcloud/utils/nb_popen.py +++ b/saltcloud/utils/nb_popen.py @@ -51,7 +51,7 @@ class NonBlockingPopen(subprocess.Popen): if obuff: logging.getLogger( 'saltcloud.Popen.STDOUT.PID-{0}'.format(self.pid) - ).warn(obuff.rstrip()) + ).debug(obuff.rstrip()) if self.stream_stds: sys.stdout.write(obuff) except IOError, err: @@ -67,7 +67,7 @@ class NonBlockingPopen(subprocess.Popen): if ebuff: logging.getLogger( 'saltcloud.Popen.STDERR.PID-{0}'.format(self.pid) - ).warn(ebuff.rstrip()) + ).debug(ebuff.rstrip()) if self.stream_stds: sys.stderr.write(ebuff) except IOError, err:
Popen output should log as debug not warning
saltstack_salt
train
68f3a7380a25da734fa520fdd59972c981f54ff1
diff --git a/test/test_ocf_physical_container.rb b/test/test_ocf_physical_container.rb index <HASH>..<HASH> 100644 --- a/test/test_ocf_physical_container.rb +++ b/test/test_ocf_physical_container.rb @@ -13,13 +13,7 @@ class TestOCFPhysicalContainer < Test::Unit::TestCase assert_equal @content, EPUB::OCF::PhysicalContainer.read(@container_path, @path).force_encoding('UTF-8') end - class TestZipruby < self - def setup - super - @class = EPUB::OCF::PhysicalContainer::Zipruby - @container = @class.new(@container_path) - end - + module ConcreteContainer def test_class_method_open @class.open @container_path do |container| assert_instance_of @class, container @@ -48,4 +42,14 @@ class TestOCFPhysicalContainer < Test::Unit::TestCase assert_equal @content, @container.read(@path).force_encoding('UTF-8') end end + + class TestZipruby < self + include ConcreteContainer + + def setup + super + @class = EPUB::OCF::PhysicalContainer::Zipruby + @container = @class.new(@container_path) + end + end end
Extract test cases for physical container as a module
KitaitiMakoto_epub-parser
train
e041b618fd9527b964c8546e80ffe1f39d57050f
diff --git a/test/plugin/test_in_tail.rb b/test/plugin/test_in_tail.rb index <HASH>..<HASH> 100644 --- a/test/plugin/test_in_tail.rb +++ b/test/plugin/test_in_tail.rb @@ -1011,8 +1011,6 @@ class TailInputTest < Test::Unit::TestCase end def test_truncate_file - omit "Permission denied error happen on Windows. Need fix" if Fluent.windows? - config = SINGLE_LINE_CONFIG File.open("#{@tmp_dir}/tail.txt", "wb") {|f| f.puts "test1" @@ -1028,7 +1026,13 @@ class TailInputTest < Test::Unit::TestCase f.flush } waiting(2) { sleep 0.1 until d.events.length == 2 } - File.truncate("#{@tmp_dir}/tail.txt", 6) + if Fluent.windows? + Fluent::FileWrapper.open("#{@tmp_dir}/tail.txt", "wb") { |f| + f.puts("test1"); + } + else + File.truncate("#{@tmp_dir}/tail.txt", 6) + end end expected = { @@ -1047,8 +1051,6 @@ class TailInputTest < Test::Unit::TestCase end def test_move_truncate_move_back - omit "Permission denied error happen on Windows. Need fix" if Fluent.windows? - config = SINGLE_LINE_CONFIG File.open("#{@tmp_dir}/tail.txt", "wb") {|f| f.puts "test1" @@ -1064,7 +1066,13 @@ class TailInputTest < Test::Unit::TestCase FileUtils.mv("#{@tmp_dir}/tail.txt", "#{@tmp_dir}/tail2.txt") end sleep(1) - File.truncate("#{@tmp_dir}/tail2.txt", 6) + if Fluent.windows? + Fluent::FileWrapper.open("#{@tmp_dir}/tail2.txt", "wb") { |f| + f.puts("test1"); + } + else + File.truncate("#{@tmp_dir}/tail2.txt", 6) + end sleep(1) if Fluent.windows? FileUtils.mv("#{@tmp_dir}/tail2.txt", "#{@tmp_dir}/tail.txt", force: true)
test_in_tail: Fix some omitted tests on Windows
fluent_fluentd
train