hash
stringlengths 40
40
| diff
stringlengths 131
114k
| message
stringlengths 7
980
| project
stringlengths 5
67
| split
stringclasses 1
value |
---|---|---|---|---|
d87bb430e386fa9ecb671d5283f18dafe8c65b20 | diff --git a/src/Entities/Menu.php b/src/Entities/Menu.php
index <HASH>..<HASH> 100644
--- a/src/Entities/Menu.php
+++ b/src/Entities/Menu.php
@@ -1,6 +1,7 @@
<?php namespace Arcanedev\Menus\Entities;
use Closure;
+use IteratorAggregate;
/**
* Class Menu
@@ -8,7 +9,7 @@ use Closure;
* @package Arcanedev\Menus\Entities
* @author ARCANEDEV <[email protected]>
*/
-class Menu
+class Menu implements IteratorAggregate
{
/* ------------------------------------------------------------------------------------------------
| Properties
@@ -44,6 +45,20 @@ class Menu
}
/* ------------------------------------------------------------------------------------------------
+ | Getters & Setters
+ | ------------------------------------------------------------------------------------------------
+ */
+ /**
+ * Get the menu items iterator.
+ *
+ * @return \Arcanedev\Menus\Entities\MenuItemCollection
+ */
+ public function getIterator()
+ {
+ return $this->items;
+ }
+
+ /* ------------------------------------------------------------------------------------------------
| Main Functions
| ------------------------------------------------------------------------------------------------
*/ | Adding IteratorAggregate Interface to Menu Entity | ARCANEDEV_Menus | train |
1baf805206e277859ebe7c097739fd50ef068053 | diff --git a/pom.xml b/pom.xml
index <HASH>..<HASH> 100644
--- a/pom.xml
+++ b/pom.xml
@@ -48,8 +48,8 @@
<gwt.version>2.7.0</gwt.version>
<jbox2d.version>2.2.1.1</jbox2d.version>
<lwjgl.version>2.9.3</lwjgl.version>
- <robovm.version>1.7.0</robovm.version>
- <robovm.maven.version>1.7.0</robovm.maven.version>
+ <robovm.version>1.8.0</robovm.version>
+ <robovm.maven.version>1.8.0</robovm.maven.version>
<!--
GWT log levels:
ERROR, WARN, INFO, TRACE, DEBUG, SPAM, or ALL
diff --git a/robovm/src/playn/robovm/RoboAssets.java b/robovm/src/playn/robovm/RoboAssets.java
index <HASH>..<HASH> 100644
--- a/robovm/src/playn/robovm/RoboAssets.java
+++ b/robovm/src/playn/robovm/RoboAssets.java
@@ -49,7 +49,7 @@ public class RoboAssets extends Assets {
plat.net().req(url).execute().
onSuccess(new Slot<Net.Response>() {
public void onEmit (Net.Response rsp) {
- image.succeed(toData(Scale.ONE, UIImage.create(new NSData(rsp.payload()))));
+ image.succeed(toData(Scale.ONE, new UIImage(new NSData(rsp.payload()))));
}
}).
onFailure(new Slot<Throwable>() {
@@ -97,7 +97,7 @@ public class RoboAssets extends Assets {
if (!fullPath.exists()) continue;
// plat.log().debug("Loading image: " + fullPath);
- UIImage img = UIImage.create(fullPath);
+ UIImage img = new UIImage(fullPath);
if (img != null) return toData(rsrc.scale, img);
// note this error if this is the lowest resolution image, but fall back to lower resolution
diff --git a/robovm/src/playn/robovm/RoboPlatform.java b/robovm/src/playn/robovm/RoboPlatform.java
index <HASH>..<HASH> 100644
--- a/robovm/src/playn/robovm/RoboPlatform.java
+++ b/robovm/src/playn/robovm/RoboPlatform.java
@@ -26,8 +26,7 @@ import org.robovm.apple.uikit.UIApplication;
import org.robovm.apple.uikit.UIDevice;
import org.robovm.apple.uikit.UIInterfaceOrientationMask;
import org.robovm.apple.uikit.UIWindow;
-import org.robovm.objc.Selector;
-import org.robovm.objc.annotation.BindSelector;
+import org.robovm.objc.block.VoidBlock1;
import org.robovm.rt.bro.annotation.Callback;
import playn.core.*;
@@ -186,8 +185,15 @@ public class RoboPlatform extends Platform {
}
void willTerminate () {
- // shutdown the GL and AL systems
- ResourceCleaner.terminate(this);
+ // shutdown the GL and AL systems after our configured delay
+ new NSTimer(config.timeForTermination, new VoidBlock1<NSTimer>() {
+ public void invoke (NSTimer timer) {
+ // shutdown the GL view completely
+ EAGLContext.setCurrentContext(null);
+ // stop and release the AL resources (if audio was ever initialized)
+ if (audio != null) audio.terminate();
+ }
+ }, null, false);
// let the app know that we're terminating
dispatchEvent(lifecycle, Lifecycle.EXIT);
}
@@ -197,31 +203,4 @@ public class RoboPlatform extends Platform {
int version = Integer.parseInt(systemVersion.split("\\.")[0]);
return version;
}
-
- private static class ResourceCleaner extends NSObject {
- private final static Selector SEL = Selector.register("cleanRelatedResources:");
- private RoboPlatform platform;
-
- private ResourceCleaner(RoboPlatform platform) {
- this.platform = platform;
- }
-
- // wait for the desired interval and then terminate the GL and AL systems
- public static void terminate(RoboPlatform platform) {
- NSTimer.createScheduled(platform.config.timeForTermination, new ResourceCleaner(platform),
- ResourceCleaner.SEL, null, false);
- }
-
- @Callback @BindSelector("cleanRelatedResources:")
- private static void cleanRelatedResources(ResourceCleaner self, Selector sel) {
- if (self.platform != null) {
- // shutdown the GL view completely
- EAGLContext.setCurrentContext(null);
- // stop and release the AL resources (if audio was ever initialized)
- if (self.platform.audio != null) self.platform.audio.terminate();
- // null out our platform reference
- self.platform = null;
- }
- }
- }
} | Upgrade to RoboVM <I>. | playn_playn | train |
e13ec14c65994c21fd40dde958292783603e3354 | diff --git a/cf/commands/application/app.go b/cf/commands/application/app.go
index <HASH>..<HASH> 100644
--- a/cf/commands/application/app.go
+++ b/cf/commands/application/app.go
@@ -175,7 +175,7 @@ func (cmd *ShowApp) ShowApp(app models.Application, orgName, spaceName string) {
}
//temp solution, diego app metrics only come from noaa, not CC
- if application.Diego {
+ if application.Diego && len(instances) > 0 {
instances, apiErr = cmd.appLogsNoaaRepo.GetContainerMetrics(app.Guid, instances)
for i := 0; i < len(instances); i++ {
diff --git a/cf/commands/application/app_test.go b/cf/commands/application/app_test.go
index <HASH>..<HASH> 100644
--- a/cf/commands/application/app_test.go
+++ b/cf/commands/application/app_test.go
@@ -193,6 +193,18 @@ var _ = Describe("app Command", func() {
runCommand("my-app")
Ω(appLogsNoaaRepo.GetContainerMetricsCallCount()).To(Equal(1))
})
+
+ It("does not gather metrics when instance count is 0", func() {
+ app.Diego = true
+ appSummaryRepo.GetSummarySummary = app
+ requirementsFactory.Application = app
+
+ appInstancesRepo.GetInstancesReturns([]models.AppInstanceFields{}, nil)
+
+ runCommand("my-app")
+ Ω(appLogsNoaaRepo.GetContainerMetricsCallCount()).To(Equal(0))
+ })
+
It("gracefully handles when /instances is down but /noaa is not", func() {
app.Diego = true
appSummaryRepo.GetSummarySummary = app | do not call GetContainerMetrics() when a diego app is stopped
- GetContainerMetrics() might still return metrics for an stopped app, while v2/app/summary endpoint reports instance=0, this causes CLI to crash.
[#<I>] | cloudfoundry_cli | train |
e673963057eb11cd0ec95935b1d163a69820ceda | diff --git a/lib/runner.js b/lib/runner.js
index <HASH>..<HASH> 100644
--- a/lib/runner.js
+++ b/lib/runner.js
@@ -159,6 +159,7 @@ Runner.prototype.hook = function(name, fn){
});
hook.run(function(err){
+ hook.removeAllListeners('error');
if (err) return self.failHook(hook, err);
self.emit('hook end', hook);
next(++i); | Fixed event emitter leak. Closes #<I> | mochajs_mocha | train |
cbbfff48fbb7cb2bfdbb06dec22f7e93e8da9dfe | diff --git a/openhtf/capabilities/__init__.py b/openhtf/capabilities/__init__.py
index <HASH>..<HASH> 100644
--- a/openhtf/capabilities/__init__.py
+++ b/openhtf/capabilities/__init__.py
@@ -175,6 +175,9 @@ def requires(**capabilities):
'Capabilities %s required multiple times on phase %s' % (
duplicates, func))
wrapper.capabilities.update(capabilities)
+ # functools.wraps doesn't explicitly save this anywhere, and we need it to
+ # pull out the source lines later.
+ wrapper.wraps = func
return wrapper
return result
diff --git a/openhtf/test_record.py b/openhtf/test_record.py
index <HASH>..<HASH> 100644
--- a/openhtf/test_record.py
+++ b/openhtf/test_record.py
@@ -41,7 +41,8 @@ class TestRecord(TestRecordBase):
@contextlib.contextmanager
def RecordPhaseTiming(self, phase):
- #TODO(madsci): Figure out how to get code from inside the capability wrapper.
+ while hasattr(phase, 'wraps'):
+ phase = phase.wraps
phase_record = PhaseRecord(phase.__name__, phase.__doc__,
inspect.getsource(phase), utils.TimeMillis())
try: | save wrapped phase func as an attribute on capability-wrapped phases | google_openhtf | train |
c3d9d9690ff40d428dba316714c82ea1af6b4622 | diff --git a/Gemfile.lock b/Gemfile.lock
index <HASH>..<HASH> 100644
--- a/Gemfile.lock
+++ b/Gemfile.lock
@@ -1,7 +1,7 @@
PATH
remote: .
specs:
- myreplicator (1.1.63)
+ myreplicator (1.1.64)
json
mysql2
net-sftp
diff --git a/lib/loader/loader.rb b/lib/loader/loader.rb
index <HASH>..<HASH> 100644
--- a/lib/loader/loader.rb
+++ b/lib/loader/loader.rb
@@ -66,11 +66,15 @@ module Myreplicator
# making the hash for mapping filepath to metadata object
all_files.each do |m|
- if !(@redis.hexists(@load_hash, m.filepath))
- @redis.hset(@load_hash, m.filepath, 0)
- @redis.sadd(@load_set, m.filepath)
+ if Myreplicator::Loader.transfer_completed? m
+ if !(@redis.hexists(@load_hash, m.filepath))
+ @redis.hset(@load_hash, m.filepath, 0)
+ @redis.sadd(@load_set, m.filepath)
+ end
+ files_to_metadata[m.filepath] = m
+ else
+ # for the fun of commenting: do nothing
end
- files_to_metadata[m.filepath] = m
end
# processing the files in "queue"
diff --git a/lib/loader/vertica/vertica_loader.rb b/lib/loader/vertica/vertica_loader.rb
index <HASH>..<HASH> 100644
--- a/lib/loader/vertica/vertica_loader.rb
+++ b/lib/loader/vertica/vertica_loader.rb
@@ -152,7 +152,7 @@ module Myreplicator
Loader.cleanup metadata #Remove incremental file
Kernel.p "===== Remove incremental file ====="
end
- elsif exp.nightly_refresh && (exp.nightly_refresh_frequency != 0) && ((Time.now() - exp.nightly_refresh_last_run) >= exp.nightly_refresh_frequency.minute)
+ elsif exp.nightly_refresh && (exp.nightly_refresh_frequency != 0) && ((Time.now() - exp.nightly_refresh_last_run) >= exp.nightly_refresh_frequency.minute) && (Time.now().hour >= 21)
Loader.clear_older_files metadata # clear old incremental files
exp.nightly_refresh_last_run = Time.now().change(:min => 0)
exp.save!
@@ -162,7 +162,7 @@ module Myreplicator
exp.export
elsif get_analyze_constraints(ops) > 0 # check for primary key/unique keys violations
Kernel.p "===== DROP CURRENT TABLE ====="
- sql = "DROP TABLE IF EXISTS #{options[:db]}.#{options[:destination_schema]}.#{options[:table_name]} CASCADE;"
+ sql = "TRUNCATE TABLE #{options[:db]}.#{options[:destination_schema]}.#{options[:table_name]};"
Myreplicator::DB.exec_sql("vertica",sql)
# run the export. The next time loader runs, it will load the file
exp.export
diff --git a/lib/myreplicator/version.rb b/lib/myreplicator/version.rb
index <HASH>..<HASH> 100644
--- a/lib/myreplicator/version.rb
+++ b/lib/myreplicator/version.rb
@@ -1,3 +1,3 @@
module Myreplicator
- VERSION = "1.1.63"
+ VERSION = "1.1.64"
end | Adding check for inveral refresh to run only at night and some other small fixes | Raybeam_myreplicator | train |
efaad45b0914f9d1016f63c210491433dd25c736 | diff --git a/.rubocop_todo.yml b/.rubocop_todo.yml
index <HASH>..<HASH> 100644
--- a/.rubocop_todo.yml
+++ b/.rubocop_todo.yml
@@ -49,57 +49,3 @@ Metrics/ModuleLength:
# Offense count: 1
Metrics/PerceivedComplexity:
Max: 10
-
-# Offense count: 1
-# Configuration parameters: NamePrefix, NamePrefixBlacklist, NameWhitelist.
-# NamePrefix: is_, has_, have_
-# NamePrefixBlacklist: is_, has_, have_
-# NameWhitelist: is_a?
-Naming/PredicateName:
- Exclude:
- - 'spec/**/*'
-
-# Offense count: 23
-# Configuration parameters: EnforcedStyle, SupportedStyles.
-# SupportedStyles: snake_case, normalcase, non_integer
-Naming/VariableNumber:
- Exclude:
- - 'testing/rspec/spec/cql_spec.rb'
- - 'testing/rspec/spec/predefined_filters_spec.rb'
- - 'testing/rspec/spec/repository_spec.rb'
-
-
-# Offense count: 19
-# Cop supports --auto-correct.
-# Configuration parameters: EnforcedStyle, SupportedStyles, ProceduralMethods, FunctionalMethods, IgnoredMethods.
-# SupportedStyles: line_count_based, semantic, braces_for_chaining
-# ProceduralMethods: benchmark, bm, bmbm, create, each_with_object, measure, new, realtime, tap, with_object
-# FunctionalMethods: let, let!, subject, watch
-# IgnoredMethods: lambda, proc, it
-Style/BlockDelimiters:
- Exclude:
- - 'testing/rspec/spec/cql_spec.rb'
- - 'testing/rspec/spec/line_filterable_specs.rb'
- - 'testing/rspec/spec/name_filterable_specs.rb'
- - 'testing/rspec/spec/queriable_specs.rb'
-
-# Offense count: 9
-Style/Documentation:
- Exclude:
- - 'spec/**/*'
- - 'test/**/*'
-
-
-# Offense count: 1
-Style/MethodMissing:
- Exclude:
- - 'lib/cql/dsl.rb'
-
-
-# Offense count: 3
-# Cop supports --auto-correct.
-# Configuration parameters: AutoCorrect, EnforcedStyle, SupportedStyles.
-# SupportedStyles: predicate, comparison
-Style/NumericPredicate:
- Exclude:
- - 'spec/**/*'
diff --git a/lib/cql/dsl.rb b/lib/cql/dsl.rb
index <HASH>..<HASH> 100644
--- a/lib/cql/dsl.rb
+++ b/lib/cql/dsl.rb
@@ -5,7 +5,7 @@ module CQL
# Any undefined method is assumed to mean its String equivalent, thus allowing a more convenient query syntax.
- def method_missing(method_name)
+ def method_missing(method_name) # rubocop:disable Style/MethodMissing - All it does is make a string. No real risk.
method_name.to_s
end
diff --git a/testing/rspec/spec/queriable_specs.rb b/testing/rspec/spec/queriable_specs.rb
index <HASH>..<HASH> 100644
--- a/testing/rspec/spec/queriable_specs.rb
+++ b/testing/rspec/spec/queriable_specs.rb
@@ -28,14 +28,14 @@ shared_examples_for 'a queriable object' do
it 'complains if a query is attempted without a query root being set' do
object.query_root = nil
- expect {
+ expect do
object.query do
select :model
from :all
end
- }.to raise_error(ArgumentError, 'Query cannot be run. No query root has been set.')
+ end.to raise_error(ArgumentError, 'Query cannot be run. No query root has been set.')
end
it 'starts with a query root' do | Fix some RuboCop violations
Fixed all remaining violations except for those related to length and
complexity. Those types of violations will be taken care of during the
big refactor that is soon to happen. | enkessler_cql | train |
e48f63b12dd79b9bb97e560b8e18ab280399fca2 | diff --git a/lib/travis/services/cancel_job.rb b/lib/travis/services/cancel_job.rb
index <HASH>..<HASH> 100644
--- a/lib/travis/services/cancel_job.rb
+++ b/lib/travis/services/cancel_job.rb
@@ -19,7 +19,7 @@ module Travis
end
def cancel
- publish!
+ publish! unless job.created?
job.cancel!
end
diff --git a/spec/travis/services/cancel_job_spec.rb b/spec/travis/services/cancel_job_spec.rb
index <HASH>..<HASH> 100644
--- a/spec/travis/services/cancel_job_spec.rb
+++ b/spec/travis/services/cancel_job_spec.rb
@@ -10,14 +10,26 @@ describe Travis::Services::CancelJob do
let(:service) { described_class.new(user, params) }
describe 'run' do
- it 'should cancel the job if it\'s cancelable' do
+ it 'should send cancel event to the worker if job has already been started' do
job.stubs(:cancelable?).returns(true)
service.stubs(:authorized?).returns(true)
+ job.update_attribute(:state, :started)
publisher = mock('publisher')
service.stubs(:publisher).returns(publisher)
publisher.expects(:publish).with(type: 'cancel_job', job_id: job.id)
+ service.run
+ end
+
+ it 'should cancel the job if it\'s cancelable' do
+ job.stubs(:cancelable?).returns(true)
+ service.stubs(:authorized?).returns(true)
+
+ publisher = mock('publisher')
+ service.stubs(:publisher).returns(publisher)
+ publisher.expects(:publish).never
+
expect {
service.run
}.to change { job.reload.state } | Don't send cancel event to the worker if a job hasn't started yet | travis-ci_travis-core | train |
5837222285832c73c0516e8ef07352c2b6dc0f66 | diff --git a/dateutils_tests.py b/dateutils_tests.py
index <HASH>..<HASH> 100644
--- a/dateutils_tests.py
+++ b/dateutils_tests.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
##
## $Id$
-##
+##
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 CERN.
##
@@ -13,7 +13,7 @@
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-## General Public License for more details.
+## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
@@ -30,7 +30,7 @@ class ConvertFromDateCVSTest(unittest.TestCase):
"""
Testing conversion of CVS dates.
"""
-
+
def test_convert_good_cvsdate(self):
"""dateutils - conversion of good CVS dates"""
# here we have to use '$' + 'Date...' here, otherwise the CVS
@@ -40,6 +40,13 @@ class ConvertFromDateCVSTest(unittest.TestCase):
self.assertEqual(dateutils.convert_datecvs_to_datestruct(datecvs)[:6],
datestruct_beginning_expected)
+ # here we have to use '$' + 'Date...' here, otherwise the CVS
+ # commit would erase this time format to put commit date:
+ datecvs = "$" + "Id: dateutils_tests.py,v 1.6 2007/02/14 18:33:02 tibor Exp $"
+ datestruct_beginning_expected = (2007, 2, 14, 18, 33, 02)
+ self.assertEqual(dateutils.convert_datecvs_to_datestruct(datecvs)[:6],
+ datestruct_beginning_expected)
+
def test_convert_bad_cvsdate(self):
"""dateutils - conversion of bad CVS dates"""
# here we have to use '$' + 'Date...' here, otherwise the CVS | Added test of handling CVS $id tag by convert_datecvs_to_datestruct(..). | inveniosoftware-attic_invenio-utils | train |
221b62b1680914142ea948ebeacc83d2d88659f2 | diff --git a/middleware/swagger/src/main/java/com/jetdrone/vertx/yoke/annotations/ResponseMessage.java b/middleware/swagger/src/main/java/com/jetdrone/vertx/yoke/annotations/ResponseMessage.java
index <HASH>..<HASH> 100644
--- a/middleware/swagger/src/main/java/com/jetdrone/vertx/yoke/annotations/ResponseMessage.java
+++ b/middleware/swagger/src/main/java/com/jetdrone/vertx/yoke/annotations/ResponseMessage.java
@@ -10,4 +10,5 @@ import java.lang.annotation.Target;
public @interface ResponseMessage {
int code();
String message();
+ String responseModel() default "";
}
diff --git a/middleware/swagger/src/main/java/com/jetdrone/vertx/yoke/annotations/processors/SwaggerProcessor.java b/middleware/swagger/src/main/java/com/jetdrone/vertx/yoke/annotations/processors/SwaggerProcessor.java
index <HASH>..<HASH> 100644
--- a/middleware/swagger/src/main/java/com/jetdrone/vertx/yoke/annotations/processors/SwaggerProcessor.java
+++ b/middleware/swagger/src/main/java/com/jetdrone/vertx/yoke/annotations/processors/SwaggerProcessor.java
@@ -137,11 +137,15 @@ public class SwaggerProcessor extends AbstractAnnotationHandler<Swagger> {
operations.putArray("responseMessages", jsonResponseMessages);
for (ResponseMessage responseMessage : doc.responseMessages()) {
- jsonResponseMessages.addObject(
- new JsonObject()
- .putNumber("code", responseMessage.code())
- .putString("message", responseMessage.message())
- );
+ JsonObject json = new JsonObject()
+ .putNumber("code", responseMessage.code())
+ .putString("message", responseMessage.message());
+
+ if (!responseMessage.responseModel().equals("")) {
+ json.putString("responseModel", responseMessage.responseModel());
+ }
+
+ jsonResponseMessages.addObject(json);
}
} | swagger add support for responseModel
(cherry picked from commit <I>ffb<I>) | pmlopes_yoke | train |
14d6014a379f51a2e602f6edde5fcb8a7a664de2 | diff --git a/cluster-master.js b/cluster-master.js
index <HASH>..<HASH> 100644
--- a/cluster-master.js
+++ b/cluster-master.js
@@ -7,6 +7,9 @@ var cluster = require("cluster")
, clusterSize = 0
, os = require("os")
, onmessage
+, repl = require('repl')
+, net = require('net')
+, fs = require('fs')
exports = module.exports = clusterMaster
exports.restart = restart
@@ -50,7 +53,56 @@ function clusterMaster (config) {
forkListener()
// now make it the right size
- resize()
+ console.error('resize and then setup repl')
+ resize(setupRepl)
+}
+
+function setupRepl () {
+ console.error('setup repl')
+ var socket = path.resolve('cluster-master-socket')
+ var connections = 0
+ fs.unlink(socket, function (er) {
+ if (er && er.code !== 'ENOENT') throw er
+ net.createServer(function (sock) {
+ connections ++
+ var r = repl.start({
+ prompt: 'ClusterMaster ' + process.pid + '> ',
+ input: sock,
+ output: sock,
+ terminal: true,
+ useGlobal: false
+ })
+ var context = {
+ repl: r,
+ resize: resize,
+ restart: restart,
+ quit: quit,
+ cluster: cluster,
+ get size () {
+ return clusterSize
+ },
+ get connections () {
+ return connections
+ },
+ sock: sock
+ }
+ var desc = Object.getOwnPropertyNames(context).map(function (n) {
+ return [n, Object.getOwnPropertyDescriptor(context, n)]
+ }).reduce(function (set, kv) {
+ set[kv[0]] = kv[1]
+ return set
+ }, {})
+ Object.defineProperties(r.context, desc)
+
+ console.error('context', r.context)
+ r.on('end', function () {
+ connections --
+ sock.end()
+ })
+ }).listen(socket, function () {
+ console.error('ClusterMaster repl listening on '+socket)
+ })
+ })
}
function forkListener () {
@@ -179,6 +231,8 @@ function restart (cb) {
var resizing = false
function resize (n, cb) {
+ if (typeof n === 'function') cb = n, n = clusterSize
+
if (resizing) {
return cb && cb()
} | Control the cluster with a repl | isaacs_cluster-master | train |
5e4d833eb44a714b130fdc2981d868189d6ae717 | diff --git a/mysqldump.php b/mysqldump.php
index <HASH>..<HASH> 100644
--- a/mysqldump.php
+++ b/mysqldump.php
@@ -28,7 +28,7 @@ class MySQLDump
$this->list_tables();
$this->create_sql();
- file_put_contents($this->filename, $this->sql_file);
+ return file_put_contents($this->filename, $this->sql_file);
}
public function create_sql() | Return status of backup
file_put_contents returns the number of bytes that were written, or false on failure.
This is useful for testing that the backup was performed successfully :) | ifsnop_mysqldump-php | train |
e9f683e06cb8351d0a2883eb8e49a4856c36779a | diff --git a/quarkc/_metadata.py b/quarkc/_metadata.py
index <HASH>..<HASH> 100644
--- a/quarkc/_metadata.py
+++ b/quarkc/_metadata.py
@@ -19,7 +19,7 @@ __all__ = [
"__license__", "__copyright__",
]
-__title__ = 'datawire-quark'
+__title__ = 'datawire-quarkdev'
__version__ = '0.6.74'
__summary__ = "Quark: an IDL for high level (micro)service interfaces" | Changed version to datawire-quarkdev, <I> (doc 1). [ci skip] | datawire_quark | train |
c7e1f49e783f92960441b88fe23bc1e933797daa | diff --git a/src/Composer/DependencyResolver/Rule.php b/src/Composer/DependencyResolver/Rule.php
index <HASH>..<HASH> 100644
--- a/src/Composer/DependencyResolver/Rule.php
+++ b/src/Composer/DependencyResolver/Rule.php
@@ -43,8 +43,6 @@ class Rule
protected $job;
- protected $ruleHash;
-
public function __construct(array $literals, $reason, $reasonData, $job = null)
{
// sort all packages ascending by id
@@ -58,13 +56,12 @@ class Rule
$this->job = $job;
$this->type = -1;
- $data = unpack('ihash', md5(implode(',', $this->literals), true));
- $this->ruleHash = $data['hash'];
}
public function getHash()
{
- return $this->ruleHash;
+ $data = unpack('ihash', md5(implode(',', $this->literals), true));
+ return $data['hash'];
}
public function setId($id)
@@ -113,10 +110,6 @@ class Rule
*/
public function equals(Rule $rule)
{
- if ($this->ruleHash !== $rule->ruleHash) {
- return false;
- }
-
if (count($this->literals) != count($rule->literals)) {
return false;
} | Rule hashes are only used in the rule set, so no need to store them | composer_composer | train |
4d711ebfb84276f53346ea246c93d1f629446929 | diff --git a/lib/bolt/inventory/group.rb b/lib/bolt/inventory/group.rb
index <HASH>..<HASH> 100644
--- a/lib/bolt/inventory/group.rb
+++ b/lib/bolt/inventory/group.rb
@@ -125,7 +125,7 @@ module Bolt
# Require nodes to be parseable as a Target.
begin
Target.new(n)
- rescue Addressable::URI::InvalidURIError => e
+ rescue Bolt::ParseError => e
@logger.debug(e)
raise ValidationError.new("Invalid node name #{n}", @name)
end
diff --git a/lib/bolt/target.rb b/lib/bolt/target.rb
index <HASH>..<HASH> 100644
--- a/lib/bolt/target.rb
+++ b/lib/bolt/target.rb
@@ -62,6 +62,8 @@ module Bolt
# Initialize with an empty scheme to ensure we parse the hostname correctly
Addressable::URI.parse("//#{string}")
end
+ rescue Addressable::URI::InvalidURIError => e
+ raise Bolt::ParseError, "Could not parse target URI: #{e.message}"
end
private :parse
diff --git a/spec/bolt/target_spec.rb b/spec/bolt/target_spec.rb
index <HASH>..<HASH> 100644
--- a/spec/bolt/target_spec.rb
+++ b/spec/bolt/target_spec.rb
@@ -45,7 +45,7 @@ describe Bolt::Target do
it "rejects unescaped special characters" do
expect {
Bolt::Target.new("#{user}:a/b@neptune")
- }.to raise_error(Addressable::URI::InvalidURIError,
+ }.to raise_error(Bolt::ParseError,
/Invalid port number/)
end | (BOLT-<I>) Convert Addressable::URI errors to Bolt errors
Previously exceptions raised by the `Addressable::URI.parse` method would be surfaced. This commit converts `InvalidURIError`s in to `Bolt::ParseError`s. | puppetlabs_bolt | train |
6b61f45379652dca1808c4a1098bb1f24bce97a9 | diff --git a/src/ol/color/color.js b/src/ol/color/color.js
index <HASH>..<HASH> 100644
--- a/src/ol/color/color.js
+++ b/src/ol/color/color.js
@@ -33,9 +33,17 @@ ol.Color;
/**
* @type {RegExp}
* @private
- * This RegExp matches # followed by 3, 4, 6, or 8 hex digits.
+ * This RegExp matches # followed by 3 or 6 hex digits.
*/
-ol.color.hexColorRe_ = /^#(?:[0-9a-f]{3,4}){1,2}$/i;
+ol.color.hexColorRe_ = /^#(?:[0-9a-f]{3}){1,2}$/i;
+
+
+/**
+ * @type {RegExp}
+ * @private
+ * This RegExp matches # followed by 4 or 8 hex digits.
+ */
+ol.color.hexaColorRe_ = /^#(?:[0-9a-f]{4}){1,2}$/i;
/**
@@ -210,8 +218,8 @@ ol.color.fromStringInternal_ = function(s) {
var r, g, b, a, color, match;
if (isHex || (match = ol.color.hexColorRe_.exec(s))) { // hex
var n = s.length - 1; // number of hex digits
- goog.asserts.assert(goog.array.indexOf([3, 4, 6, 8], n) != -1);
- var d = n < 6 ? 1 : 2; // number of digits per channel
+ goog.asserts.assert(n == 3 || n == 6);
+ var d = n == 3 ? 1 : 2; // number of digits per channel
r = parseInt(s.substr(1 + 0 * d, d), 16);
g = parseInt(s.substr(1 + 1 * d, d), 16);
b = parseInt(s.substr(1 + 2 * d, d), 16);
@@ -220,11 +228,7 @@ ol.color.fromStringInternal_ = function(s) {
g = (g << 4) + g;
b = (b << 4) + b;
}
- if ((n >> 1) & 1) {
- a = 1;
- } else { // has alpha channel
- a = parseInt(s.substr(1 + 3 * d, d), 16) / (d == 1 ? 15 : 255);
- }
+ a = 1;
color = [r, g, b, a];
goog.asserts.assert(ol.color.isValid(color));
return color;
diff --git a/test/spec/ol/color.test.js b/test/spec/ol/color.test.js
index <HASH>..<HASH> 100644
--- a/test/spec/ol/color.test.js
+++ b/test/spec/ol/color.test.js
@@ -21,19 +21,10 @@ describe('ol.color', function() {
expect(ol.color.fromString('#087')).to.eql([0, 136, 119, 1]);
});
- it('can parse 4-digit hex colors', function() {
- expect(ol.color.fromString('#1234')).to.eql([17, 34, 51, 68 / 255]);
- });
-
it('can parse 6-digit hex colors', function() {
expect(ol.color.fromString('#56789a')).to.eql([86, 120, 154, 1]);
});
- it('can parse 8-digit hex colors', function() {
- expect(ol.color.fromString('#bcdef012')).to.eql(
- [188, 222, 240, 18 / 255]);
- });
-
it('can parse rgb colors', function() {
expect(ol.color.fromString('rgb(0, 0, 255)')).to.eql([0, 0, 255, 1]);
}); | Remove support for 4- and 8- digit hex colors
These are not supported by canvas. | openlayers_openlayers | train |
3e9ba1ab5338e7f26c66068bba17783b38721ef8 | diff --git a/ChangeLog b/ChangeLog
index <HASH>..<HASH> 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,7 @@
+2012-06-15 Ingo Renner <[email protected]>
+
+ * Fixed a bug in status report configuration check that crashed the status report when a site was not configred correctly
+
2012-06-13 Ingo Renner <[email protected]>
* Added an option to define the sorting field in SOLR_RELATION mm tables
diff --git a/ext_tables.php b/ext_tables.php
index <HASH>..<HASH> 100644
--- a/ext_tables.php
+++ b/ext_tables.php
@@ -61,7 +61,7 @@ if (TYPO3_MODE == 'BE') {
$GLOBALS['TYPO3_CONF_VARS']['SC_OPTIONS']['reports']['tx_reports']['status']['providers']['solr'] = array(
'tx_solr_report_SchemaStatus',
'tx_solr_report_SolrconfigStatus',
-# 'tx_solr_report_SolrConfigurationStatus',
+ 'tx_solr_report_SolrConfigurationStatus',
'tx_solr_report_SolrStatus',
'tx_solr_report_SolrVersionStatus',
'tx_solr_report_AccessFilterPluginInstalledStatus',
diff --git a/report/class.tx_solr_report_solrconfigurationstatus.php b/report/class.tx_solr_report_solrconfigurationstatus.php
index <HASH>..<HASH> 100644
--- a/report/class.tx_solr_report_solrconfigurationstatus.php
+++ b/report/class.tx_solr_report_solrconfigurationstatus.php
@@ -147,9 +147,13 @@ class tx_solr_report_SolrConfigurationStatus implements tx_reports_StatusProvide
$rootPagesWithIndexingOff = array();
foreach ($rootPages as $rootPage) {
- tx_solr_Util::initializeTsfe($rootPage['uid']);
+ try {
+ tx_solr_Util::initializeTsfe($rootPage['uid']);
- if (!$GLOBALS['TSFE']->config['config']['index_enable']) {
+ if (!$GLOBALS['TSFE']->config['config']['index_enable']) {
+ $rootPagesWithIndexingOff[] = $rootPage;
+ }
+ } catch (RuntimeException $e) {
$rootPagesWithIndexingOff[] = $rootPage;
}
}
@@ -182,7 +186,7 @@ class tx_solr_report_SolrConfigurationStatus implements tx_reports_StatusProvide
$rootPages = $GLOBALS['TYPO3_DB']->exec_SELECTgetRows(
'uid, title',
'pages',
- 'is_siteroot = 1 AND deleted = 0 AND hidden = 0 AND pid != -1',
+ 'is_siteroot = 1 AND deleted = 0 AND hidden = 0 AND pid != -1 AND doktype IN(1,4) ',
'', '', '',
'uid'
); | Fixed a bug in status report configuration check that crashed the status report when a site was not configred correctly
git-svn-id: <URL> | TYPO3-Solr_ext-solr | train |
86345095cb4f1a871852a55967778750c2e6ff76 | diff --git a/linux_backend/container_pool/rootfs_provider/rootfs_provider_suite_test.go b/linux_backend/container_pool/rootfs_provider/rootfs_provider_suite_test.go
index <HASH>..<HASH> 100644
--- a/linux_backend/container_pool/rootfs_provider/rootfs_provider_suite_test.go
+++ b/linux_backend/container_pool/rootfs_provider/rootfs_provider_suite_test.go
@@ -1,9 +1,9 @@
package rootfs_provider_test
import (
- "net/url"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
+ "net/url"
"testing"
)
diff --git a/linux_backend/linux_backend.go b/linux_backend/linux_backend.go
index <HASH>..<HASH> 100644
--- a/linux_backend/linux_backend.go
+++ b/linux_backend/linux_backend.go
@@ -238,24 +238,19 @@ func (b *LinuxBackend) saveSnapshot(container Container) error {
log.Println("saving snapshot for", container.ID())
- tmpfile, err := ioutil.TempFile(os.TempDir(), "snapshot-"+container.ID())
- if err != nil {
- return &FailedToSnapshotError{err}
- }
+ snapshotPath := path.Join(b.snapshotsPath, container.ID())
- err = container.Snapshot(tmpfile)
+ snapshot, err := os.Create(snapshotPath)
if err != nil {
return &FailedToSnapshotError{err}
}
- snapshotPath := path.Join(b.snapshotsPath, container.ID())
-
- err = os.Rename(tmpfile.Name(), snapshotPath)
+ err = container.Snapshot(snapshot)
if err != nil {
return &FailedToSnapshotError{err}
}
- return nil
+ return snapshot.Close()
}
func (b *LinuxBackend) restore(snapshot io.Reader) (warden.Container, error) {
diff --git a/linux_backend/linux_backend_test.go b/linux_backend/linux_backend_test.go
index <HASH>..<HASH> 100644
--- a/linux_backend/linux_backend_test.go
+++ b/linux_backend/linux_backend_test.go
@@ -221,6 +221,9 @@ var _ = Describe("Stop", func() {
fakeSystemInfo,
path.Join(tmpdir, "snapshots"),
)
+
+ err = linuxBackend.Start()
+ Expect(err).ToNot(HaveOccurred())
})
It("takes a snapshot of each container", func() { | simplify snapshot writing strategy
renaming can't be relied upon as tmpdir -> destination may cross filesystem
boundaries | cloudfoundry-attic_garden-linux | train |
8ca95cbfc39ac040f7a0fd907c952a0ab01e1315 | diff --git a/lib/rango/core_ext.rb b/lib/rango/core_ext.rb
index <HASH>..<HASH> 100644
--- a/lib/rango/core_ext.rb
+++ b/lib/rango/core_ext.rb
@@ -102,3 +102,17 @@ class Hash
end
end
end
+
+module ParamsMixin
+ def [](key)
+ super(key.to_s)
+ end
+
+ def []=(key, value)
+ super(key.to_s, value)
+ end
+
+ def keys
+ super.map(&:to_sym)
+ end
+end
diff --git a/lib/rango/rack/request.rb b/lib/rango/rack/request.rb
index <HASH>..<HASH> 100644
--- a/lib/rango/rack/request.rb
+++ b/lib/rango/rack/request.rb
@@ -159,7 +159,10 @@ module Rango
protected
def normalize_params(hash)
- hash.deep_symbolize_keys.tap { |hash| hash.delete(:_method) }
+ hash.extend(ParamsMixin)
+ hash.each do |key, value|
+ normalize_params(value) if value.is_a?(Hash)
+ end
end
end
end | Fixed security issue, we can't symbolize params, otherwise attacker can run our server out of memory | botanicus_rango | train |
99c1ef6210c1485398f478ac5444a25099b5b065 | diff --git a/invoiceregistry.go b/invoiceregistry.go
index <HASH>..<HASH> 100644
--- a/invoiceregistry.go
+++ b/invoiceregistry.go
@@ -151,6 +151,8 @@ func (i *invoiceRegistry) SettleInvoice(rHash chainhash.Hash) error {
return
}
+ ltndLog.Infof("Payment received: %v", spew.Sdump(invoice))
+
i.notifyClients(invoice, true)
}() | lnd: add additional logging statement on payment recv | lightningnetwork_lnd | train |
03bf8b47ced8d03dc88663e1c0dc36fcfe281bde | diff --git a/lib/acme/client/resources/authorization.rb b/lib/acme/client/resources/authorization.rb
index <HASH>..<HASH> 100644
--- a/lib/acme/client/resources/authorization.rb
+++ b/lib/acme/client/resources/authorization.rb
@@ -3,7 +3,7 @@ class Acme::Client::Resources::Authorization
DNS01 = Acme::Client::Resources::Challenges::DNS01
TLSSNI01 = Acme::Client::Resources::Challenges::TLSSNI01
- attr_reader :domain, :status, :http01, :dns01, :tls_sni01
+ attr_reader :domain, :status, :expires, :http01, :dns01, :tls_sni01
def initialize(client, response)
@client = client
@@ -25,6 +25,7 @@ class Acme::Client::Resources::Authorization
end
def assign_attributes(body)
+ @expires = Time.parse(body['expires']) if body.has_key? 'expires'
@domain = body['identifier']['value']
@status = body['status']
end
diff --git a/spec/authorization_spec.rb b/spec/authorization_spec.rb
index <HASH>..<HASH> 100644
--- a/spec/authorization_spec.rb
+++ b/spec/authorization_spec.rb
@@ -10,6 +10,11 @@ describe Acme::Client::Resources::Authorization do
let(:authorization) { client.authorize(domain: 'example.org') }
+ it 'returns the correct metadata', vcr: { cassette_name: 'authorization' } do
+ expect(authorization.expires).to be_a(Time)
+ expect(authorization.expires).to be_within(1.second).of Time.mktime(2015, 12, 14, 21, 46, 33)
+ end
+
context '#http01' do
it 'returns a HTTP01 object', vcr: { cassette_name: 'authorization' } do
expect(authorization.http01).to be_a(Acme::Client::Resources::Challenges::HTTP01) | Expose the authorization expiration timestamp
The ACME server returns an optional timestamp that signifies the
expiration date of the domain authorization challenge. The time
format is RFC<I> and can be parsed by Time#parse.
See: <URL> | unixcharles_acme-client | train |
1d493b318c49900800ceb94bf8ae01e5da6128e1 | diff --git a/lib/locabulary/json_creator.rb b/lib/locabulary/json_creator.rb
index <HASH>..<HASH> 100644
--- a/lib/locabulary/json_creator.rb
+++ b/lib/locabulary/json_creator.rb
@@ -96,9 +96,9 @@ module Locabulary
def get_required_data_from_spreadsheet
formatted_data = []
- line = []
- spreadsheet_data.shift
+ spreadsheet_data.shift # Discard the header line
spreadsheet_data.each do |row|
+ line = []
final = {}
line << row[0]
line << row[1] if row[1] && !row[1].empty?
@@ -113,7 +113,6 @@ module Locabulary
final["activated_on"] = "2015-07-22"
final["deactivated_on"] = nil
formatted_data << final
- line = []
end
formatted_data
end | Minor method refactor
To better convey what is happening | ndlib_locabulary | train |
8a23738a92666bbebb5f7f88766ad1eee3af76c2 | diff --git a/hcn/hcnendpoint.go b/hcn/hcnendpoint.go
index <HASH>..<HASH> 100644
--- a/hcn/hcnendpoint.go
+++ b/hcn/hcnendpoint.go
@@ -304,6 +304,7 @@ func (endpoint *HostComputeEndpoint) Create() (*HostComputeEndpoint, error) {
return nil, err
}
+ logrus.Debugf("hcn::HostComputeEndpoint::Create JSON: %s", jsonString)
endpoint, hcnErr := createEndpoint(endpoint.HostComputeNetwork, string(jsonString))
if hcnErr != nil {
return nil, hcnErr
diff --git a/hcn/hcnloadbalancer.go b/hcn/hcnloadbalancer.go
index <HASH>..<HASH> 100644
--- a/hcn/hcnloadbalancer.go
+++ b/hcn/hcnloadbalancer.go
@@ -224,6 +224,7 @@ func (loadBalancer *HostComputeLoadBalancer) Create() (*HostComputeLoadBalancer,
return nil, err
}
+ logrus.Debugf("hcn::HostComputeLoadBalancer::Create JSON: %s", jsonString)
loadBalancer, hcnErr := createLoadBalancer(string(jsonString))
if hcnErr != nil {
return nil, hcnErr
diff --git a/hcn/hcnnamespace.go b/hcn/hcnnamespace.go
index <HASH>..<HASH> 100644
--- a/hcn/hcnnamespace.go
+++ b/hcn/hcnnamespace.go
@@ -299,6 +299,7 @@ func (namespace *HostComputeNamespace) Create() (*HostComputeNamespace, error) {
return nil, err
}
+ logrus.Debugf("hcn::HostComputeNamespace::Create JSON: %s", jsonString)
namespace, hcnErr := createNamespace(string(jsonString))
if hcnErr != nil {
return nil, hcnErr
diff --git a/hcn/hcnnetwork.go b/hcn/hcnnetwork.go
index <HASH>..<HASH> 100644
--- a/hcn/hcnnetwork.go
+++ b/hcn/hcnnetwork.go
@@ -293,6 +293,7 @@ func (network *HostComputeNetwork) Create() (*HostComputeNetwork, error) {
return nil, err
}
+ logrus.Debugf("hcn::HostComputeNetwork::Create JSON: %s", jsonString)
network, hcnErr := createNetwork(string(jsonString))
if hcnErr != nil {
return nil, hcnErr | Debug prints out the json provided to HNS. | Microsoft_hcsshim | train |
b10a127a0304dfccd8aaaaee2372b6d12fcfcf7a | diff --git a/src/main/java/com/plaid/client/DefaultPlaidUserClient.java b/src/main/java/com/plaid/client/DefaultPlaidUserClient.java
index <HASH>..<HASH> 100644
--- a/src/main/java/com/plaid/client/DefaultPlaidUserClient.java
+++ b/src/main/java/com/plaid/client/DefaultPlaidUserClient.java
@@ -91,12 +91,24 @@ public class DefaultPlaidUserClient implements PlaidUserClient {
}
@Override
+ public TransactionsResponse mfaConnectStep(String[] mfa, String type) throws PlaidMfaException {
+
+ return handleMfa("/connect/step", mfa, type, TransactionsResponse.class);
+ }
+
+ @Override
public AccountsResponse mfaAuthStep(String mfa, String type) throws PlaidMfaException {
return handleMfa("/auth/step", mfa, type, AccountsResponse.class);
}
@Override
+ public AccountsResponse mfaAuthStep(String[] mfa, String type) throws PlaidMfaException {
+
+ return handleMfa("/auth/step", mfa, type, AccountsResponse.class);
+ }
+
+ @Override
public AccountsResponse mfaAuthDeviceSelectionByDeviceType(String deviceType, String type) throws PlaidMfaException {
if (StringUtils.isEmpty(accessToken)) {
@@ -287,7 +299,7 @@ public class DefaultPlaidUserClient implements PlaidUserClient {
return handlePost("/info", requestParams, InfoResponse.class);
}
- private <T extends PlaidUserResponse> T handleMfa(String path, String mfa, String type, Class<T> returnTypeClass) throws PlaidMfaException {
+ private <T extends PlaidUserResponse> T handleMfa(String path, Object mfa, String type, Class<T> returnTypeClass) throws PlaidMfaException {
if (StringUtils.isEmpty(accessToken)) {
throw new PlaidClientsideException("No accessToken set");
diff --git a/src/main/java/com/plaid/client/PlaidUserClient.java b/src/main/java/com/plaid/client/PlaidUserClient.java
index <HASH>..<HASH> 100644
--- a/src/main/java/com/plaid/client/PlaidUserClient.java
+++ b/src/main/java/com/plaid/client/PlaidUserClient.java
@@ -25,10 +25,14 @@ public interface PlaidUserClient {
TransactionsResponse mfaConnectStep(String mfa, String type) throws PlaidMfaException;
+ TransactionsResponse mfaConnectStep(String[] mfa, String type) throws PlaidMfaException;
+
AccountsResponse achAuth(Credentials credentials, String type, ConnectOptions connectOptions) throws PlaidMfaException;
AccountsResponse mfaAuthStep(String mfa, String type) throws PlaidMfaException;
+ AccountsResponse mfaAuthStep(String[] mfa, String type) throws PlaidMfaException;
+
AccountsResponse mfaAuthDeviceSelectionByDeviceType(String deviceType, String type) throws PlaidMfaException;
AccountsResponse mfaAuthDeviceSelectionByDeviceMask(String deviceMask, String type) throws PlaidMfaException; | Handle mfa array for auth and connect step | plaid_plaid-java | train |
f55ca0e6ea2573879d2542b11ef22faeefa8e11e | diff --git a/lib/fog/google/examples/eric-fail.rb b/lib/fog/google/examples/eric-fail.rb
index <HASH>..<HASH> 100755
--- a/lib/fog/google/examples/eric-fail.rb
+++ b/lib/fog/google/examples/eric-fail.rb
@@ -12,7 +12,7 @@ def test
disk.wait_for { disk.ready? }
- server = connection.servers.create(defaults = {
+ server = connection.servers.create({
:name => name,
:disks => [disk],
:machine_type => "n1-standard-1",
@@ -26,5 +26,5 @@ def test
raise "Could not reload created server." unless server.reload
raise "Could not create sshable server." unless server.ssh("whoami")
- raise "Cloud note delete server." unless server.destroy
+ raise "Could not delete server." unless server.destroy
end | [google|compute] Fix spelling errors in example | fog_fog | train |
2388dfe22de67e2681a0fc9bb83114389848dc16 | diff --git a/lib/bridge.js b/lib/bridge.js
index <HASH>..<HASH> 100644
--- a/lib/bridge.js
+++ b/lib/bridge.js
@@ -22,6 +22,8 @@ const MembershipCache = require("./components/membership-cache");
const RoomLinkValidator = require("./components/room-link-validator").RoomLinkValidator;
const RLVStatus = require("./components/room-link-validator").validationStatuses;
const RoomUpgradeHandler = require("./components/room-upgrade-handler");
+const EventNotHandledError = require("./errors").EventNotHandledError;
+const InternalError = require("./errors").InternalError;
const EventQueue = require("./components/event-queue").EventQueue;
const log = require("./components/logging").get("bridge");
@@ -805,7 +807,12 @@ Bridge.prototype._onEvent = function(event) {
this._queue.push(event, dataReadyLimited);
this._queue.consume();
- return request.getPromise();
+
+ return (
+ request
+ .getPromise()
+ .catch(EventNotHandledError, e => this._handleEventError(event, e))
+ );
};
/**
@@ -825,6 +832,10 @@ Bridge.prototype._limited = function(promise, request) {
Bridge.prototype._onConsume = function(err, data) {
if (err) {
+ // The data for the event could not be retrieved.
+ // In this case we reject the request and the event will be delivered
+ // by the HS to the bridge again.
+ data.request.reject();
this.opts.controller.onLog("onEvent failure: " + err);
return;
}
@@ -847,6 +858,18 @@ Bridge.prototype._getBridgeContext = function(event) {
return contextReady.return(context);
}
+Bridge.prototype._handleEventError = function(event, error) {
+ if (!error instanceof EventNotHandledError) {
+ error = wrap(error, InternalError);
+ }
+ this._botIntent.signalBridgeError(
+ event.room_id,
+ event.event_id,
+ "Discord",
+ error.reason
+ );
+};
+
Bridge.prototype._updateIntents = function(event) {
if (event.type === "m.room.member") {
this._membershipCache.setMemberEntry(
@@ -1041,7 +1064,7 @@ BridgeContext.prototype.get = function(roomStore, userStore) {
if (mxSender) {
self.senders.matrix = mxSender;
}
- });
+ }).catch(e => {throw wrap(e, Error, "Could not retrieve bridge context");});
};
/** | Add logic for reacting to permanent errors | matrix-org_matrix-appservice-bridge | train |
6b4b356f826c1d101878be2718c2ef7032d96ef1 | diff --git a/mod/glossary/sql.php b/mod/glossary/sql.php
index <HASH>..<HASH> 100644
--- a/mod/glossary/sql.php
+++ b/mod/glossary/sql.php
@@ -18,14 +18,7 @@
/// fullpivot indicate if the whole pivot should be compared agasint the db or just the first letter
/// printpivot indicate if the pivot should be printed or not
- switch ($CFG->dbtype) {
- case 'postgres7':
- $as = 'as';
- break;
- case 'mysql':
- $as = '';
- break;
- }
+ $as = sql_as(); ///To use or no the AS keyword on column aliases
switch ( $sortkey ) {
case "CREATION": | Using the standard sql_as() function, although it could be not used. | moodle_moodle | train |
44d3c1ca74bd64700b97fd32cb9c3ebe54bc2c53 | diff --git a/lib/oneview-sdk/client.rb b/lib/oneview-sdk/client.rb
index <HASH>..<HASH> 100644
--- a/lib/oneview-sdk/client.rb
+++ b/lib/oneview-sdk/client.rb
@@ -108,12 +108,14 @@ module OneviewSDK
# Get array of all resources of a specified type
# @param [String] type Resource type
# @param [Integer] api_ver API module version to fetch resources from
+ # @param [String] variant API module variant to fetch resource from
# @return [Array<Resource>] Results
# @example Get all Ethernet Networks
# networks = @client.get_all('EthernetNetworks')
+ # synergy_networks = @client.get_all('EthernetNetworks', 300, 'Synergy')
# @raise [TypeError] if the type is invalid
- def get_all(type, api_ver = @api_version)
- klass = OneviewSDK.resource_named(type, api_ver)
+ def get_all(type, api_ver = @api_version, variant = nil)
+ klass = OneviewSDK.resource_named(type, api_ver, variant)
raise TypeError, "Invalid resource type '#{type}'. OneviewSDK::API#{api_ver} does not contain a class like it." unless klass
klass.get_all(self)
end
diff --git a/lib/oneview-sdk/resource/api300.rb b/lib/oneview-sdk/resource/api300.rb
index <HASH>..<HASH> 100644
--- a/lib/oneview-sdk/resource/api300.rb
+++ b/lib/oneview-sdk/resource/api300.rb
@@ -22,7 +22,7 @@ module OneviewSDK
# @param [String] variant Variant (C7000 or Synergy)
# @return [Class] Resource class or nil if not found
def self.resource_named(type, variant = @variant)
- raise "API300 variant #{variant} is not supported!" unless SUPPORTED_VARIANTS.include?(variant)
+ raise "API300 variant #{variant} is not supported!" unless SUPPORTED_VARIANTS.include?(variant.to_s)
new_type = type.to_s.downcase.gsub(/[ -_]/, '')
api_module = OneviewSDK::API300.const_get(variant)
api_module.constants.each do |c|
diff --git a/spec/unit/client_spec.rb b/spec/unit/client_spec.rb
index <HASH>..<HASH> 100644
--- a/spec/unit/client_spec.rb
+++ b/spec/unit/client_spec.rb
@@ -210,10 +210,20 @@ RSpec.describe OneviewSDK::Client do
include_context 'shared context'
it "calls the correct resource's get_all method" do
- expect(OneviewSDK::ServerProfile).to receive(:get_all).with(@client)
+ expect(OneviewSDK::API200::ServerProfile).to receive(:get_all).with(@client)
@client.get_all('ServerProfiles')
end
+ it 'accepts API version and variant parameters' do
+ expect(OneviewSDK::API300::Synergy::ServerProfile).to receive(:get_all).with(@client)
+ @client.get_all('ServerProfiles', 300, 'Synergy')
+ end
+
+ it 'accepts symbols instead of strings' do
+ expect(OneviewSDK::API300::Synergy::ServerProfile).to receive(:get_all).with(@client)
+ @client.get_all(:ServerProfiles, 300, :Synergy)
+ end
+
it 'fails when a bogus resource type is given' do
expect { @client.get_all('BogusResources') }.to raise_error(TypeError, /Invalid resource type/)
end | Updated client.get_all to support variant | HewlettPackard_oneview-sdk-ruby | train |
5bfb36e333b9e269ce2752202b31b3cfc54e2511 | diff --git a/python/src/nnabla/utils/converter/onnx/exporter.py b/python/src/nnabla/utils/converter/onnx/exporter.py
index <HASH>..<HASH> 100644
--- a/python/src/nnabla/utils/converter/onnx/exporter.py
+++ b/python/src/nnabla/utils/converter/onnx/exporter.py
@@ -1349,29 +1349,6 @@ class OnnxExporter:
return nl
def BaseDeconvolution(self, func_name, func):
- # For the case where the dilations parameter is '1', the conversion, inference, compare results is no problem.
- # If the dilations parameter is not '1', the conversion result is OK, But the inference results have problems,
- # the inference tools currently in use are CNTK, Caffe2, ONNXRuntime:
- # CNTK
- # Currently, CNTK does not support 'ConvTranspose'.
- #
- # Caffe2
- # Caffe2 seems not support the case that dilations not equal to '1'. According to investigation, when the input
- # shapes are same, dilations are different, Caffe2 output same infer shape, which is not expected. We supposed
- # Caffe2 did not support dilations != 1.
- #
- # ONNXRuntime
- # The formula used to infer output shape is different from NNabla.
- # ONNXRuntime calculates the output space size for 'ConvTranspose' as :
- # L_i = (Li - 1) Si + Ki + Di - 1 + adj - padhead - padtail
- # Where Li is the spatial size, Si is the stride, Ki is the kernel size, Di is the dilation,
- # adj is output_padding, padhead is the pad start, padtail is the pad end.
- #
- # NNabla is using the formula as :
- # L_i = (Li - 1) Si - 2Pi + Di (Ki - 1) + 1
- # Where Si is the stride, Li is the spatial size, Pi is the padding, Di is the dilation,
- # and Ki is the kernel size for i-th spatial dimension.
- # If the dilations parameter is not '1', the output shape is different.
nl = []
inputs = func.input[:]
input_x_shape = np.array(
@@ -1379,8 +1356,10 @@ class OnnxExporter:
output_y_shape = np.array(
[d for d in self._var_dict[func.output[0]].dim])
weight_shape = [d for d in self._var_dict[func.input[1]].dim]
+ output_padding = [0] * (len(weight_shape[2:]))
if func_name == "Deconvolution":
dp = func.deconvolution_param
+ output_padding = dp.output_padding.dim[:]
group = dp.group
elif func_name == "DepthwiseDeconvolution":
dp = func.depthwise_deconvolution_param
@@ -1403,6 +1382,7 @@ class OnnxExporter:
dilations += [1]
strides += [1]
pads += [0]
+ output_padding += [0]
input_x_shape = np.array(np.concatenate(
([np.prod(input_x_shape[:dp.base_axis])], input_x_shape[dp.base_axis:], [1])))
@@ -1432,7 +1412,8 @@ class OnnxExporter:
dilations=dilations,
strides=strides,
pads=pads * 2,
- group=group
+ group=group,
+ output_padding=output_padding
)
nl.append(n)
@@ -1450,7 +1431,8 @@ class OnnxExporter:
dilations=dilations,
strides=strides,
pads=pads * 2,
- group=group
+ group=group,
+ output_padding=output_padding
)
nl.append(n) | Support deconv's output_padding for ONNX Exporter. | sony_nnabla | train |
93e5a5a2cddedfbcdb4c84d64c32565152628e62 | diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc
index <HASH>..<HASH> 100644
--- a/CHANGELOG.asciidoc
+++ b/CHANGELOG.asciidoc
@@ -50,6 +50,13 @@ types, because it is only expected to be useful for coming OTel Bridge work.
[float]
===== Bug fixes
+- Fix a possible crash in the instrumentation of an incoming HTTP/2 request: if
+ the underlying Http2Session was destroyed before the APM transaction was
+ ended (on Http2Stream end). This resulted in the instrumentation using the
+ [`stream.session.socket`](https://nodejs.org/api/http2.html#http2sessionsocket)
+ proxy, which can throw `ERR_HTTP2_SOCKET_UNBOUND` after the session is
+ destroyed. ({issues}2670[#2670])
+
[[release-notes-3.32.0]]
==== 3.32.0 2022/04/27
diff --git a/lib/instrumentation/modules/http2.js b/lib/instrumentation/modules/http2.js
index <HASH>..<HASH> 100644
--- a/lib/instrumentation/modules/http2.js
+++ b/lib/instrumentation/modules/http2.js
@@ -61,9 +61,14 @@ module.exports = function (http2, agent, { enabled }) {
// core `http.IncomingMessage` and `http.ServerResponse` objects,
// sufficient for `parsers.getContextFromRequest()` and
// `parsers.getContextFromResponse()`, respectively.
+ // `remoteAddress` is fetched now, rather than at stream end, because
+ // this `stream.session.socket` is a proxy object that can throw
+ // `ERR_HTTP2_SOCKET_UNBOUND` if the Http2Session has been destroyed.
trans.req = {
headers,
- socket: stream.session.socket,
+ socket: {
+ remoteAddress: stream.session.socket.remoteAddress
+ },
method: headers[':method'],
url: headers[':path'],
httpVersion: '2.0'
@@ -141,7 +146,7 @@ module.exports = function (http2, agent, { enabled }) {
function updateHeaders (headers) {
var trans = agent._instrumentation.currTransaction()
- if (trans) {
+ if (trans && !trans.ended) {
var status = headers[':status'] || 200
trans.result = 'HTTP ' + status.toString()[0] + 'xx'
trans.res.statusCode = status
diff --git a/test/instrumentation/modules/http2.test.js b/test/instrumentation/modules/http2.test.js
index <HASH>..<HASH> 100644
--- a/test/instrumentation/modules/http2.test.js
+++ b/test/instrumentation/modules/http2.test.js
@@ -114,6 +114,56 @@ isSecure.forEach(secure => {
})
})
+ test(`http2.${method} stream end after session destroy`, t => {
+ t.plan(16)
+
+ resetAgent((data) => {
+ assert(t, data, secure, port)
+ server.close()
+ })
+
+ var port
+ var server = secure
+ ? http2.createSecureServer(pem)
+ : http2.createServer()
+
+ var onError = err => t.error(err)
+ server.on('error', onError)
+ server.on('socketError', onError)
+
+ server.on('stream', function (stream, headers) {
+ var trans = ins.currTransaction()
+ t.ok(trans, 'have current transaction')
+ t.strictEqual(trans.type, 'request')
+
+ stream.respond({
+ 'content-type': 'text/plain',
+ ':status': 200
+ })
+
+ // Destroying the Http2Session results in any usage of the
+ // `stream.session.socket` proxy possibly throwing
+ // `ERR_HTTP2_SOCKET_UNBOUND`. This test case ensures the APM agent
+ // doesn't blow up on this.
+ stream.session.destroy()
+
+ stream.end('foo')
+ })
+
+ server.listen(() => {
+ port = server.address().port
+ var client = connect(secure, port)
+ client.on('error', onError)
+ client.on('socketError', onError)
+
+ var req = client.request({ ':path': '/' })
+ assertResponse(t, req, '') // Do not expect 'foo' to get through.
+ req.resume()
+ req.on('end', () => client.destroy())
+ req.end()
+ })
+ })
+
test(`http2.${method} stream respondWithFD`, t => {
t.plan(17) | fix: possible crash in instrumentation of an incoming HTTP/2 request (#<I>)
This also fixes a crash that was possible in instrumentation if an
HTTP/2 server called `stream.respond()` after the stream had been
destroyed (and the transaction ended):
../lib/instrumentation/modules/http2.js:<I>
trans.res.statusCode = status
^
TypeError: Cannot set properties of null (setting 'statusCode')
Fixes: #<I> | elastic_apm-agent-nodejs | train |
8d7a39ab08bda371846c5d8ebdbd4123d618e37b | diff --git a/lib/falcor/operations/bind/bind.js b/lib/falcor/operations/bind/bind.js
index <HASH>..<HASH> 100644
--- a/lib/falcor/operations/bind/bind.js
+++ b/lib/falcor/operations/bind/bind.js
@@ -41,12 +41,10 @@ module.exports = function bind(boundPath) {
if(paths.length > 0) {
return boundModel.get.apply(boundModel, paths.concat(function() {
return boundModel;
- })).
- catchException(falcor.Observable.empty());
+ }))['catch'](falcor.Observable.empty());
}
return falcor.Observable.returnValue(boundModel);
- }).
- catchException(function(e) {
+ })['catch'](function(e) {
if (e instanceof InvalidModelError) {
throw e;
} | fix(bind): catchException => ['catch']
I'm working off master so this was a problem for me. This is the quick fix for me until the #<I> issue is resolved | Netflix_falcor | train |
587bd8c89a2bb7c6626517339ad6d569287196ec | diff --git a/policy/policy-authority.go b/policy/policy-authority.go
index <HASH>..<HASH> 100644
--- a/policy/policy-authority.go
+++ b/policy/policy-authority.go
@@ -44,6 +44,7 @@ func NewPolicyAuthorityImpl(dbMap *gorp.DbMap, enforceWhitelist bool, challengeT
}
// Take note of which challenges to offer
+ pa.supportedChallenges = map[string]bool{}
for _, challengeType := range challengeTypes {
pa.supportedChallenges[challengeType] = true
} | Initialize the challenge type map before using it | letsencrypt_boulder | train |
b2e623fb45b8044e64139e112a9a12ccf9f7d6b6 | diff --git a/lib/should.js b/lib/should.js
index <HASH>..<HASH> 100644
--- a/lib/should.js
+++ b/lib/should.js
@@ -632,14 +632,16 @@ Assertion.prototype = {
err = e;
}
- if (typeof message === 'string') {
- ok = message === err.message;
- } else if (message instanceof RegExp) {
- ok = message.test(err.message);
- }
+ if (ok) {
+ if (typeof message === 'string') {
+ ok = message === err.message;
+ } else if (message instanceof RegExp) {
+ ok = message.test(err.message);
+ }
- if (message && !ok) {
- errorInfo = ' matching \'' + message + '\'';
+ if (message && !ok) {
+ errorInfo = ' matching \'' + message + '\'';
+ }
}
this.assert(
diff --git a/test/should.test.js b/test/should.test.js
index <HASH>..<HASH> 100644
--- a/test/should.test.js
+++ b/test/should.test.js
@@ -355,7 +355,7 @@ module.exports = {
err(function(){
(function(){}).should.throw(/fail/);
- }, 'expected an exception to be thrown matching \'/fail/\'');
+ }, 'expected an exception to be thrown');
err(function(){
(function(){ throw new Error('error'); }).should.throw(/fail/);
@@ -367,7 +367,19 @@ module.exports = {
err(function(){
(function(){}).should.throw('fail');
+ }, 'expected an exception to be thrown');
+
+ err(function(){
+ (function(){ throw new Error('error'); }).should.throw('fail');
}, 'expected an exception to be thrown matching \'fail\'');
+ },
+
+ 'test throw() with string message': function(){
+ (function(){ throw new Error('fail'); }).should.throw('fail');
+
+ err(function(){
+ (function(){}).should.throw('fail');
+ }, 'expected an exception to be thrown');
err(function(){
(function(){ throw new Error('error'); }).should.throw('fail'); | Reverted failure message to only mention expected exception | tj_should.js | train |
61943d2f3900a37929a263f775270c687efbfcbc | diff --git a/test/test_io.py b/test/test_io.py
index <HASH>..<HASH> 100644
--- a/test/test_io.py
+++ b/test/test_io.py
@@ -414,6 +414,23 @@ class TestReadTextSegment(unittest.TestCase):
end=len(raw_text_segment)-1),
(text_dict, delim))
+ if six.PY3:
+ def test_primary_bad_segment_warning(self):
+ """
+ Test warning with unpaired non-boundary delimiter in last value.
+
+ """
+ raw_text_segment = '/k1/v1/k2/v2/k3/v3//'
+ delim = '/'
+ text_dict = {'k1':'v1','k2':'v2','k3':'v3'}
+ buf = six.BytesIO(six.b(raw_text_segment))
+
+ with self.assertWarnsRegex(UserWarning,
+ 'ill-formed TEXT segment'):
+ FlowCal.io.read_fcs_text_segment(buf=buf,
+ begin=0,
+ end=len(raw_text_segment)-1)
+
def test_primary_multi_delim_in_keyword_1(self):
"""
Test that multiple delimiters in keyword still parses correctly.
@@ -1154,6 +1171,25 @@ class TestReadTextSegment(unittest.TestCase):
supplemental=True),
(text_dict, delim))
+ if six.PY3:
+ def test_supp_bad_segment_warning(self):
+ """
+ Test warning with unpaired non-boundary delimiter in last value.
+
+ """
+ raw_text_segment = '/k1/v1/k2/v2/k3/v3//'
+ delim = '/'
+ text_dict = {'k1':'v1','k2':'v2','k3':'v3'}
+ buf = six.BytesIO(six.b(raw_text_segment))
+
+ with self.assertWarnsRegex(UserWarning,
+ 'ill-formed TEXT segment'):
+ FlowCal.io.read_fcs_text_segment(buf=buf,
+ begin=0,
+ end=len(raw_text_segment)-1,
+ delim=delim,
+ supplemental=True)
+
def test_supp_bad_segment_s(self):
"""
Test edge case with unpaired non-boundary delimiter in last value.
@@ -1177,6 +1213,25 @@ class TestReadTextSegment(unittest.TestCase):
supplemental=True),
(text_dict, delim))
+ if six.PY3:
+ def test_supp_bad_segment_s_warning(self):
+ """
+ Test warning with unpaired non-boundary delimiter in last value.
+
+ """
+ raw_text_segment = 'k1/v1/k2/v2/k3/v3//'
+ delim = '/'
+ text_dict = {'k1':'v1','k2':'v2','k3':'v3'}
+ buf = six.BytesIO(six.b(raw_text_segment))
+
+ with self.assertWarnsRegex(UserWarning,
+ 'ill-formed TEXT segment'):
+ FlowCal.io.read_fcs_text_segment(buf=buf,
+ begin=0,
+ end=len(raw_text_segment)-1,
+ delim=delim,
+ supplemental=True)
+
def test_supp_multi_delim_in_keyword_1(self):
"""
Test that multiple delimiters in keyword still parses correctly. | Add unit tests for IO warning. | taborlab_FlowCal | train |
69646f624b5888cddefa2b6a63495bc02f8ae2ea | diff --git a/src/java/com/threerings/presents/client/BlockingCommunicator.java b/src/java/com/threerings/presents/client/BlockingCommunicator.java
index <HASH>..<HASH> 100644
--- a/src/java/com/threerings/presents/client/BlockingCommunicator.java
+++ b/src/java/com/threerings/presents/client/BlockingCommunicator.java
@@ -501,7 +501,11 @@ public class BlockingCommunicator extends Communicator
{
// we want to interrupt the reader thread as it may be blocked listening to the socket;
// this is only called if the reader thread doesn't shut itself down
- interrupt();
+
+ // While it would be nice to be able to handle wacky cases requiring reader-side
+ // shutdown, doing so causes consternation on the other end's writer which suddenly
+ // loses its connection. So, we rely on the writer side to take us down.
+ // interrupt();
}
} | Roll this back - calling interrupt() in this way caused the other end's writer to get fairly upset.
git-svn-id: svn+ssh://src.earth.threerings.net/narya/trunk@<I> <I>f4-<I>e9-<I>-aa3c-eee0fc<I>fb1 | threerings_narya | train |
65a0b0e018b1fff56943484e4593888169f78fa9 | diff --git a/tests/DocumentExtendedTest.php b/tests/DocumentExtendedTest.php
index <HASH>..<HASH> 100644
--- a/tests/DocumentExtendedTest.php
+++ b/tests/DocumentExtendedTest.php
@@ -175,13 +175,6 @@ class DocumentExtendedTest extends \PHPUnit_Framework_TestCase
$this->assertTrue(true === ($resultingDocument3->someAttribute == 'someValue2'));
$this->assertTrue(true === ($resultingDocument3->someOtherAttribute == 'someOtherValue2'));
-
- var_dump($resultingDocument3);
-
-
-
- var_dump($resultingDocument);
- debugbreak("[email protected]");
$response = $documentHandler->delete($resultingDocument);
$this->assertTrue(true === $response, 'Delete should return true!');
} | removed var_dump and debug_break calls | arangodb_arangodb-php | train |
ddb397b8bebc98611b5699586e30313e699109f8 | diff --git a/sos/jupyter/completer.py b/sos/jupyter/completer.py
index <HASH>..<HASH> 100644
--- a/sos/jupyter/completer.py
+++ b/sos/jupyter/completer.py
@@ -55,7 +55,7 @@ class SoS_MagicsCompleter(Completer):
else:
return None
elif text.startswith('%') and line.startswith(text):
- return text, ['%' + x + ' ' for x in self.kernel.ALL_MAGICS.keys() if x.startswith(text[1:])]
+ return text, ['%' + x + ' ' for x in self.kernel.ALL_MAGICS if x.startswith(text[1:])]
elif any(line.startswith(x) for x in ('%use', '%with', '%restart')):
return text, [x for x in self.kernel.supported_languages.keys() if x.startswith(text)]
elif line.startswith('%get'): | Move interactive summary from README to a tutorial | vatlab_SoS | train |
f0101503bf0f32b99b94d44e76791a4bd6320dfa | diff --git a/integration/tools/validation/src/test/java/alluxio/cli/ValidationTestUtils.java b/integration/tools/validation/src/test/java/alluxio/cli/ValidationTestUtils.java
index <HASH>..<HASH> 100644
--- a/integration/tools/validation/src/test/java/alluxio/cli/ValidationTestUtils.java
+++ b/integration/tools/validation/src/test/java/alluxio/cli/ValidationTestUtils.java
@@ -73,6 +73,7 @@ public class ValidationTestUtils {
StreamResult sResult = new StreamResult(f);
transformer.transform(source, sResult);
} catch (Exception e) {
+ // TODO(bradyoo): Swallowing all exceptions and merely printing is not okay!
e.printStackTrace();
}
}
diff --git a/integration/tools/validation/src/test/java/alluxio/cli/hdfs/HdfsProxyUserValidationTaskTest.java b/integration/tools/validation/src/test/java/alluxio/cli/hdfs/HdfsProxyUserValidationTaskTest.java
index <HASH>..<HASH> 100644
--- a/integration/tools/validation/src/test/java/alluxio/cli/hdfs/HdfsProxyUserValidationTaskTest.java
+++ b/integration/tools/validation/src/test/java/alluxio/cli/hdfs/HdfsProxyUserValidationTaskTest.java
@@ -20,23 +20,17 @@ import alluxio.cli.ValidationTestUtils;
import alluxio.conf.InstancedConfiguration;
import alluxio.conf.PropertyKey;
import alluxio.security.authentication.AuthType;
-import alluxio.util.network.NetworkAddressUtils;
import com.google.common.collect.ImmutableMap;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.powermock.core.classloader.annotations.PrepareForTest;
-import org.powermock.modules.junit4.PowerMockRunner;
import java.io.File;
import java.io.IOException;
import java.nio.file.Paths;
import java.util.Map;
-@RunWith(PowerMockRunner.class)
-@PrepareForTest(NetworkAddressUtils.class)
public class HdfsProxyUserValidationTaskTest {
private static File sTestDir;
private InstancedConfiguration mConf; | Fix HDFSProxyUserValidationTaskTest
Using Powermock makes it now such that you can no longer fetch the
resource after Java 9 because (See
<URL>)
pr-link: Alluxio/alluxio#<I>
change-id: cid-a<I>de<I>acf<I>be<I>e2afe<I>aeb<I>dc5c5 | Alluxio_alluxio | train |
48454a33b53a2007a0a511ecc0ae90213a782d8f | diff --git a/src/main/java/com/xebialabs/restito/semantics/Action.java b/src/main/java/com/xebialabs/restito/semantics/Action.java
index <HASH>..<HASH> 100644
--- a/src/main/java/com/xebialabs/restito/semantics/Action.java
+++ b/src/main/java/com/xebialabs/restito/semantics/Action.java
@@ -172,4 +172,21 @@ public class Action implements Applicable {
}
});
}
+
+ /**
+ * Creates a composite action which contains all passed actions and
+ * executes them in the same order.
+ */
+ public static Action composite(final Action... actions) {
+ return new Action(new Function<Response, Response>() {
+ @Override
+ public Response apply(Response input) {
+ for (Applicable action : actions) {
+ action.apply(input);
+ }
+
+ return input;
+ }
+ });
+ }
}
diff --git a/src/test/java/com/xebialabs/restito/semantics/ActionTest.java b/src/test/java/com/xebialabs/restito/semantics/ActionTest.java
index <HASH>..<HASH> 100644
--- a/src/test/java/com/xebialabs/restito/semantics/ActionTest.java
+++ b/src/test/java/com/xebialabs/restito/semantics/ActionTest.java
@@ -87,7 +87,7 @@ public class ActionTest {
}
@Test
- public void shouldCreateCompositeAction() {
+ public void shouldCreateCompositeActionFromActions() {
composite(status(HttpStatus.OK_200), header("foo", "bar")).apply(response);
InOrder inOrder = inOrder(response);
@@ -95,4 +95,11 @@ public class ActionTest {
inOrder.verify(response).setHeader("foo", "bar");
}
+
+ @Test
+ public void shouldCreateCompositeActionFromApplicables() {
+ composite((Applicable)status(HttpStatus.NOT_ACCEPTABLE_406)).apply(response);
+
+ verify(response).setStatus(HttpStatus.NOT_ACCEPTABLE_406);
+ }
} | Added support from combinedd actions alongside with applicables | mkotsur_restito | train |
590dd6f1cf726c477db3a97e8f742bf6e1a9fb07 | diff --git a/src/ol/imagetile.js b/src/ol/imagetile.js
index <HASH>..<HASH> 100644
--- a/src/ol/imagetile.js
+++ b/src/ol/imagetile.js
@@ -61,7 +61,7 @@ ol.inherits(ol.ImageTile, ol.Tile);
ol.ImageTile.prototype.disposeInternal = function() {
if (this.state == ol.TileState.LOADING) {
this.unlistenImage_();
- this.image_.src = ol.ImageTile.blankImageUrl;
+ this.image_ = ol.ImageTile.getBlankImage();
}
if (this.interimTile) {
this.interimTile.dispose();
@@ -98,7 +98,7 @@ ol.ImageTile.prototype.getKey = function() {
ol.ImageTile.prototype.handleImageError_ = function() {
this.state = ol.TileState.ERROR;
this.unlistenImage_();
- this.image_.src = ol.ImageTile.blankImageUrl;
+ this.image_ = ol.ImageTile.getBlankImage();
this.changed();
};
@@ -150,12 +150,12 @@ ol.ImageTile.prototype.unlistenImage_ = function() {
/**
- * Data URI for a blank image.
- * @type {string}
+ * Get a 1-pixel blank image.
+ * @return {HTMLCanvasElement} Blank image.
*/
-ol.ImageTile.blankImageUrl = (function() {
+ol.ImageTile.getBlankImage = function() {
var ctx = ol.dom.createCanvasContext2D(1, 1);
ctx.fillStyle = 'rgba(0,0,0,0)';
ctx.fillRect(0, 0, 1, 1);
- return ctx.canvas.toDataURL('image/png');
-})();
+ return ctx.canvas;
+}; | Always create a new blank image to avoid CSP violations | openlayers_openlayers | train |
88ce8e9aa23ad84b5786c2778e7632479f27058a | diff --git a/src/main/java/stormpot/whirlpool/Whirlpool.java b/src/main/java/stormpot/whirlpool/Whirlpool.java
index <HASH>..<HASH> 100644
--- a/src/main/java/stormpot/whirlpool/Whirlpool.java
+++ b/src/main/java/stormpot/whirlpool/Whirlpool.java
@@ -87,14 +87,25 @@ public class Whirlpool<T extends Poolable> implements LifecycledPool<T> {
}
WSlot relieve(long timeout, TimeUnit unit) throws InterruptedException {
- Request request = requestTL.get();
+ Request request = getThreadLocalRequest();
request.setTimeout(timeout, unit);
request.requestOp = RELIEVE;
return perform(request, false, true);
}
+
+ private Request getThreadLocalRequest() {
+ Request request = requestTL.get();
+ if (request.requestOp != null) {
+ throw new AssertionError("requesting thread have stale request");
+ }
+ if (request.response != null) {
+ throw new AssertionError("requesting thread have stale response");
+ }
+ return request;
+ }
public T claim() throws PoolException, InterruptedException {
- Request request = requestTL.get();
+ Request request = getThreadLocalRequest();
request.setNoTimeout();
request.requestOp = CLAIM;
WSlot slot = perform(request, true, true);
@@ -106,7 +117,7 @@ public class Whirlpool<T extends Poolable> implements LifecycledPool<T> {
if (unit == null) {
throw new IllegalArgumentException("timeout TimeUnit cannot be null.");
}
- Request request = requestTL.get();
+ Request request = getThreadLocalRequest();
request.setTimeout(timeout, unit);
request.requestOp = CLAIM;
WSlot slot = perform(request, true, true);
@@ -132,7 +143,7 @@ public class Whirlpool<T extends Poolable> implements LifecycledPool<T> {
}
void release(WSlot slot) {
- Request request = requestTL.get();
+ Request request = getThreadLocalRequest();
request.setTimeout(1, TimeUnit.HOURS);
request.requestOp = slot;
try {
@@ -168,11 +179,8 @@ public class Whirlpool<T extends Poolable> implements LifecycledPool<T> {
request.await();
continue;
}
- if (slot == TIMEOUT) {
- slot = null;
- }
request.response = null;
- return slot;
+ return slot == TIMEOUT? null : slot;
} else {
// step 2 - did not get lock - spin-wait for response
for (int i = 0; i < WAIT_SPINS; i++) { | add some asserts in the hope that it would expose a bug, but it did not. | chrisvest_stormpot | train |
e687192bafdf5f91ca4eed977f9c006fd1e4018d | diff --git a/datatableview/helpers.py b/datatableview/helpers.py
index <HASH>..<HASH> 100644
--- a/datatableview/helpers.py
+++ b/datatableview/helpers.py
@@ -22,7 +22,7 @@ import six
from .utils import resolve_orm_path, XEDITABLE_FIELD_TYPES
-if get_version().split('.') >= ['1', '5']:
+if [int(v) for v in get_version().split('.')[0:2]] >= [1, 5]:
from django.utils.timezone import localtime
else:
localtime = None | Update helpers.py to properly detect Django <I> and up
Was not allowing localtime adjustment due to incorrect version check which fails on DJango versions over <I>. | pivotal-energy-solutions_django-datatable-view | train |
a3a5e62584c6be61e65941982056f4568e9c8b85 | diff --git a/src/Controllers/ElementalAreaController.php b/src/Controllers/ElementalAreaController.php
index <HASH>..<HASH> 100644
--- a/src/Controllers/ElementalAreaController.php
+++ b/src/Controllers/ElementalAreaController.php
@@ -165,6 +165,14 @@ class ElementalAreaController extends CMSMain
return HTTPResponse::create($body)->addHeader('Content-Type', 'application/json');
}
+ /**
+ * Provides action control for form fields that are request handlers when they're used in an in-line edit form.
+ *
+ * Eg. UploadField
+ *
+ * @param HTTPRequest $request
+ * @return array|HTTPResponse|\SilverStripe\Control\RequestHandler|string
+ */
public function formAction(HTTPRequest $request)
{
$formName = $request->param('FormName'); | DOCS Documention form field action controller method | dnadesign_silverstripe-elemental | train |
21f2e8fa3e073784c5d2f973a31f1165ede09a9f | diff --git a/tests/__init__.py b/tests/__init__.py
index <HASH>..<HASH> 100644
--- a/tests/__init__.py
+++ b/tests/__init__.py
@@ -58,7 +58,7 @@ def _all_objects_in_storage_broker(storage_broker):
bucket = storage_broker.s3resource.Bucket(storage_broker.bucket)
- for obj in bucket.objects.filter(Prefix=storage_broker.uuid).all():
+ for obj in bucket.objects.filter(Prefix=storage_broker._get_prefix()).all():
yield obj.key
registration_key = "dtool-{}".format(storage_broker.uuid)
diff --git a/tests/test_prefix_for_storage_location_functional.py b/tests/test_prefix_for_storage_location_functional.py
index <HASH>..<HASH> 100644
--- a/tests/test_prefix_for_storage_location_functional.py
+++ b/tests/test_prefix_for_storage_location_functional.py
@@ -2,8 +2,7 @@
import pytest
-from . import tmp_uuid_and_uri # NOQA
-from . import tmp_env_var
+from . import tmp_env_var, S3_TEST_BASE_URI, _remove_dataset
def _prefix_contains_something(storage_broker, prefix):
@@ -14,22 +13,49 @@ def _prefix_contains_something(storage_broker, prefix):
return len(prefix_objects) > 0
-def test_prefix_functional(tmp_uuid_and_uri): # NOQA
+def test_prefix_functional(): # NOQA
- uuid, dest_uri = tmp_uuid_and_uri
+ from dtoolcore import DataSetCreator
+ from dtoolcore import DataSet, iter_datasets_in_base_uri
- from dtoolcore import ProtoDataSet, generate_admin_metadata
- from dtoolcore import DataSet
+ # Create a minimal dataset without a prefix
+ with tmp_env_var("DTOOL_S3_DATASET_PREFIX", ""):
+ with DataSetCreator("no-prefix", S3_TEST_BASE_URI) as ds_creator:
+ ds_creator.put_annotation("prefix", "no")
+ no_prefix_uri = ds_creator.uri
- name = "my_dataset"
- admin_metadata = generate_admin_metadata(name)
- admin_metadata["uuid"] = uuid
+ dataset_no_prefix = DataSet.from_uri(no_prefix_uri)
+
+ # Basic test that retrieval works.
+ assert dataset_no_prefix.get_annotation("prefix") == "no"
+
+ # Basic test that prefix is correct.
+ structure_key = dataset_no_prefix._storage_broker.get_structure_key()
+ assert structure_key.startswith(dataset_no_prefix.uuid)
# Create a minimal dataset
prefix = "u/olssont/"
with tmp_env_var("DTOOL_S3_DATASET_PREFIX", prefix):
- proto_dataset = ProtoDataSet(
- uri=dest_uri,
- admin_metadata=admin_metadata,
- config_path=None)
- assert proto_dataset._storage_broker.get_structure_key().startswith(prefix)
+ with DataSetCreator("no-prefix", S3_TEST_BASE_URI) as ds_creator:
+ ds_creator.put_annotation("prefix", "yes")
+ prefix_uri = ds_creator.uri
+
+ dataset_with_prefix = DataSet.from_uri(prefix_uri)
+
+ # Basic test that retrieval works.
+ assert dataset_with_prefix.get_annotation("prefix") == "yes"
+
+ # Basic test that prefix is correct.
+ structure_key = dataset_with_prefix._storage_broker.get_structure_key()
+ assert structure_key.startswith(prefix)
+
+ # Basic tests that everything can be picked up.
+ dataset_uris = list(
+ ds.uri for ds in
+ iter_datasets_in_base_uri(S3_TEST_BASE_URI)
+ )
+ assert dataset_no_prefix.uri in dataset_uris
+ assert dataset_with_prefix.uri in dataset_uris
+
+ _remove_dataset(dataset_no_prefix.uri)
+ _remove_dataset(dataset_with_prefix.uri) | Bulk out the prefix for storage location functional test | jic-dtool_dtool-s3 | train |
4711e0925603b6be51553196a938f42c6d190a5a | diff --git a/Application.php b/Application.php
index <HASH>..<HASH> 100644
--- a/Application.php
+++ b/Application.php
@@ -112,6 +112,7 @@ class Application extends \Silex\Application
'controller.render:renderRawAction'
)->bind('pattern_render_raw');
$this->match('/{name}', 'controller.ui:staticAction')
+ ->bind('static')
->value('name', 'index.html')
->assert('name', '.+');
}
diff --git a/Console/Command/ServerCommand.php b/Console/Command/ServerCommand.php
index <HASH>..<HASH> 100644
--- a/Console/Command/ServerCommand.php
+++ b/Console/Command/ServerCommand.php
@@ -16,6 +16,7 @@ use Symfony\Component\Console\Input\InputArgument;
use Symfony\Component\Console\Input\InputInterface;
use Symfony\Component\Console\Input\InputOption;
use Symfony\Component\Console\Output\OutputInterface;
+use Symfony\Component\Console\Style\SymfonyStyle;
use Symfony\Component\Process\ProcessBuilder;
class ServerCommand extends Command
@@ -68,9 +69,47 @@ class ServerCommand extends Command
return $this;
}
+ private function validateAddress($address)
+ {
+ if (null === $address) {
+ $hostname = '127.0.0.1';
+ $port = $this->findBestPort();
+ } elseif (false !== $pos = strrpos($address, ':')) {
+ $hostname = substr($address, 0, $pos);
+ $port = substr($address, $pos + 1);
+ } elseif (ctype_digit($address)) {
+ $hostname = '127.0.0.1';
+ $port = $address;
+ } else {
+ $hostname = $address;
+ $port = $this->findBestPort();
+ }
+ if ($hostname === '*') {
+ $hostname = '0.0.0.0';
+ }
+ if (!ctype_digit($port)) {
+ throw new \InvalidArgumentException(sprintf('Port %s is not valid', $port));
+ }
+
+ return sprintf('%s:%s', $hostname, $port);
+ }
+
+ private function findBestPort()
+ {
+ $port = 8000;
+ while (false !== $fp = @fsockopen($this->hostname, $port, $errno, $errstr, 1)) {
+ fclose($fp);
+ if ($port++ >= 8100) {
+ throw new \RuntimeException('Unable to find a port available to run the web server.');
+ }
+ }
+
+ return $port;
+ }
+
public function execute(InputInterface $input, OutputInterface $output)
{
- $address = $input->getArgument('address');
+ $address = $this->validateAddress($input->getArgument('address'));
$routerFile = realpath(__DIR__.'/../../Resources/router.php');
$builder = $this->getProcessBuilder()
@@ -79,12 +118,18 @@ class ServerCommand extends Command
'MANNEQUIN_CONFIG' => realpath($this->configFile),
'MANNEQUIN_AUTOLOAD' => realpath($this->autoloadPath),
'MANNEQUIN_DEBUG' => $this->debug,
+ 'MANNEQUIN_VERBOSITY' => $output->getVerbosity(),
])
->setWorkingDirectory(realpath(__DIR__.'/../../Resources'))
->setTimeout(null);
- return $builder->getProcess()
- ->setTty(true)
- ->run();
+ $process = $builder->getProcess();
+ $io = new SymfonyStyle($input, $output);
+ $io->success([
+ sprintf('Starting server on http://%s', $address),
+ 'For debug output, use the -v flag',
+ ]);
+
+ return $this->getHelper('process')->run($output, $process, null, null);
}
}
diff --git a/Resources/router.php b/Resources/router.php
index <HASH>..<HASH> 100644
--- a/Resources/router.php
+++ b/Resources/router.php
@@ -9,14 +9,20 @@
* with this source code in the file LICENSE.
*/
+use LastCall\Mannequin\Core\Application;
+use Symfony\Component\Console\Output\ConsoleOutput;
+use Symfony\Component\Console\Logger\ConsoleLogger;
+
if (getenv('MANNEQUIN_AUTOLOAD')) {
require_once getenv('MANNEQUIN_AUTOLOAD');
}
-$app = new \LastCall\Mannequin\Core\Application([
+$output = new ConsoleOutput(getenv('MANNEQUIN_VERBOSITY'));
+$app = new Application([
'debug' => getenv('MANNEQUIN_DEBUG') ?? false,
'autoload_file' => getenv('MANNEQUIN_AUTOLOAD'),
'config_file' => getenv('MANNEQUIN_CONFIG'),
+ 'logger' => new ConsoleLogger($output),
]);
$app->run(); | Improve robustness and debuggability of server command | LastCallMedia_Mannequin-Core | train |
7af2b8be3e60631c715bcda663ee4501b6927757 | diff --git a/decode_test.go b/decode_test.go
index <HASH>..<HASH> 100644
--- a/decode_test.go
+++ b/decode_test.go
@@ -32,6 +32,7 @@ pi = 3.14
colors = [
["red", "green", "blue"],
["cyan", "magenta", "yellow", "black"],
+ [pink,brown],
]
my {
@@ -76,6 +77,7 @@ my {
Colors: [][]string{
{"red", "green", "blue"},
{"cyan", "magenta", "yellow", "black"},
+ {"pink", "brown"},
},
My: map[string]cats{
"Cats": cats{Plato: "cat 1", Cauchy: "cat 2"},
diff --git a/lex.go b/lex.go
index <HASH>..<HASH> 100644
--- a/lex.go
+++ b/lex.go
@@ -76,6 +76,7 @@ type lexer struct {
state stateFn
items chan item
circuitBreaker int
+ isEnd func(lx *lexer, r rune) bool
// A stack of state functions used to maintain context.
// The idea is to reuse parts of the state machine in various places.
@@ -109,6 +110,7 @@ func lex(input string) *lexer {
line: 1,
items: make(chan item, 10),
stack: make([]stateFn, 0, 10),
+ isEnd: isEndNormal,
}
return lx
}
@@ -347,6 +349,7 @@ func lexValue(lx *lexer) stateFn {
// We allow whitespace to precede a value, but NOT new lines.
// In array syntax, the array states are responsible for ignoring new lines.
r := lx.next()
+ //u.Infof("lexValue r = %v", string(r))
if isWhitespace(r) {
return lexSkip(lx, lexValue)
}
@@ -355,15 +358,16 @@ func lexValue(lx *lexer) stateFn {
case r == arrayStart:
lx.ignore()
lx.emit(itemArrayStart)
+ lx.isEnd = isEndArrayUnQuoted
return lexArrayValue
case r == mapStart:
lx.ignore()
lx.emit(itemMapStart)
return lexMapKeyStart
- case r == sqStringStart:
+ case r == sqStringStart: // single quote: '
lx.ignore() // ignore the " or '
return lexQuotedString
- case r == dqStringStart:
+ case r == dqStringStart: // "
lx.ignore() // ignore the " or '
return lexDubQuotedString
case r == '-':
@@ -379,6 +383,8 @@ func lexValue(lx *lexer) stateFn {
case isNL(r):
return lx.errorf("Expected value but found new line")
}
+ // we didn't consume it, so backup
+ lx.backup()
return lexString
//return lx.errorf("Expected value but found '%s' instead.", r)
}
@@ -387,6 +393,7 @@ func lexValue(lx *lexer) stateFn {
// have already been consumed. All whitespace and new lines are ignored.
func lexArrayValue(lx *lexer) stateFn {
r := lx.next()
+ //u.Infof("lexArrayValue r = %v", string(r))
switch {
case isWhitespace(r) || isNL(r):
return lexSkip(lx, lexArrayValue)
@@ -401,9 +408,8 @@ func lexArrayValue(lx *lexer) stateFn {
}
lx.backup()
fallthrough
- case r == arrayValTerm:
- return lx.errorf("Unexpected array value terminator '%v'.",
- arrayValTerm)
+ case r == arrayValTerm: // , we should not have found comma yet
+ return lx.errorf("Unexpected array value terminator '%v'.", arrayValTerm)
case r == arrayEnd:
return lexArrayEnd
}
@@ -417,6 +423,7 @@ func lexArrayValue(lx *lexer) stateFn {
// it ignores whitespace and expects either a ',' or a ']'.
func lexArrayValueEnd(lx *lexer) stateFn {
r := lx.next()
+ //u.Infof("lexArrayValueEnd r = %v", string(r))
switch {
case isWhitespace(r):
return lexSkip(lx, lexArrayValueEnd)
@@ -445,6 +452,7 @@ func lexArrayValueEnd(lx *lexer) stateFn {
func lexArrayEnd(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemArrayEnd)
+ lx.isEnd = isEndNormal
return lx.pop()
}
@@ -647,11 +655,12 @@ func lexDubQuotedString(lx *lexer) stateFn {
// beginning '"' has already been consumed and ignored.
func lexString(lx *lexer) stateFn {
r := lx.next()
+ //u.Infof("lexString r = %v", string(r))
switch {
case r == '\\':
return lexStringEscape
// Termination of non-quoted strings
- case isNL(r) || r == eof || r == optValTerm || isWhitespace(r):
+ case lx.isEnd(lx, r):
lx.backup()
if lx.isBool() {
lx.emit(itemBool)
@@ -913,6 +922,14 @@ func lexSkip(lx *lexer, nextState stateFn) stateFn {
}
}
+func isEndNormal(lx *lexer, r rune) bool {
+ return (isNL(r) || r == eof || r == optValTerm || isWhitespace(r))
+}
+
+func isEndArrayUnQuoted(lx *lexer, r rune) bool {
+ return (isNL(r) || r == eof || r == optValTerm || r == arrayEnd || r == arrayValTerm || isWhitespace(r))
+}
+
// Tests for both key separators
func isKeySeparator(r rune) bool {
return r == keySepEqual || r == keySepColon | allow unquoted values in arrays | lytics_confl | train |
00e91bf8c6a4179255f31ed5cac127b175030e7e | diff --git a/test/dummy/app/models/event.rb b/test/dummy/app/models/event.rb
index <HASH>..<HASH> 100644
--- a/test/dummy/app/models/event.rb
+++ b/test/dummy/app/models/event.rb
@@ -2,8 +2,9 @@ class Event < ActiveRecord::Base
after_initialize :init_event
def init_event
- self.date||= Date.tomorrow
- self.time||= Time.now + 1.hour
+ self.date||= Date.today
+ self.datetime||= (DateTime.now + 1.hour).change(min: 0, sec: 0)
+ self.time||= (Time.current + 1.hour).change(min: 0, sec: 0)
end
end | Fixed default datetime not defined but used by form in create view | benignware_date_picker | train |
cfffc7c0a60a1ead19647ed28decb3e886e0eaed | diff --git a/spyderlib/widgets/qteditor/syntaxhighlighters.py b/spyderlib/widgets/qteditor/syntaxhighlighters.py
index <HASH>..<HASH> 100644
--- a/spyderlib/widgets/qteditor/syntaxhighlighters.py
+++ b/spyderlib/widgets/qteditor/syntaxhighlighters.py
@@ -38,16 +38,17 @@ def make_python_patterns(additional_keywords=[], additional_builtins=[]):
[r"\b[+-]?[0-9]+[lL]?\b",
r"\b[+-]?0[xX][0-9A-Fa-f]+[lL]?\b",
r"\b[+-]?[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?\b"])
- ml_sq3string = r"(\b[rRuU])?'''[^'\\]*((\\.|'(?!''))[^'\\]*)*"
- ml_dq3string = r'(\b[rRuU])?"""[^"\\]*((\\.|"(?!""))[^"\\]*)*'
- multiline_string = any("ML_STRING", [ml_sq3string, ml_dq3string])
sqstring = r"(\b[rRuU])?'[^'\\\n]*(\\.[^'\\\n]*)*'?"
dqstring = r'(\b[rRuU])?"[^"\\\n]*(\\.[^"\\\n]*)*"?'
sq3string = r"(\b[rRuU])?'''[^'\\]*((\\.|'(?!''))[^'\\]*)*(''')?"
dq3string = r'(\b[rRuU])?"""[^"\\]*((\\.|"(?!""))[^"\\]*)*(""")?'
- string = any("STRING", [sq3string, dq3string, sqstring, dqstring])
+ uf_sq3string = r"(\b[rRuU])?'''[^'\\]*((\\.|'(?!''))[^'\\]*)*(?!''')$"
+ uf_dq3string = r'(\b[rRuU])?"""[^"\\]*((\\.|"(?!""))[^"\\]*)*(?!""")$'
+ string = any("STRING", [sqstring, dqstring, sq3string, dq3string])
+ ufstring1 = any("UF_SQ3STRING", [uf_sq3string])
+ ufstring2 = any("UF_DQ3STRING", [uf_dq3string])
return instance + "|" + kw + "|" + builtin + "|" + comment + "|" + \
- multiline_string + "|" + string + "|" + number + "|" + \
+ ufstring1 + "|" + ufstring2 + "|" + string + "|" + number + "|" + \
any("SYNC", [r"\n"])
#TODO: Use setCurrentBlockUserData for brace matching (see Qt documentation)
@@ -155,36 +156,56 @@ class PythonSH(BaseSH):
IDPROG = re.compile(r"\s+(\w+)", re.S)
ASPROG = re.compile(r".*?\b(as)\b")
# Syntax highlighting states (from one text block to another):
- NORMAL = 0
- INSIDE_STRING = 1
- DEF_TYPES = {"def": ClassBrowserData.FUNCTION, "class": ClassBrowserData.CLASS}
+ NORMAL, INSIDE_SQSTRING, INSIDE_DQSTRING = range(3)
+ DEF_TYPES = {"def": ClassBrowserData.FUNCTION,
+ "class": ClassBrowserData.CLASS}
def __init__(self, parent, font=None, color_scheme=None):
super(PythonSH, self).__init__(parent)
self.import_statements = {}
def highlightBlock(self, text):
- inside_string = self.previousBlockState() == self.INSIDE_STRING
- self.setFormat(0, text.length(),
- self.formats["STRING" if inside_string else "NORMAL"])
+ text = unicode(text)
+ prev_state = self.previousBlockState()
+ if prev_state == self.INSIDE_DQSTRING:
+ offset = -4
+ text = r'""" '+text
+ elif prev_state == self.INSIDE_SQSTRING:
+ offset = -4
+ text = r"''' "+text
+ else:
+ offset = 0
+ prev_state = self.NORMAL
cbdata = None
import_stmt = None
- text = unicode(text)
+ last_state = None
+ state = self.NORMAL
match = self.PROG.search(text)
- index = 0
while match:
for key, value in match.groupdict().items():
if value:
start, end = match.span(key)
- index += end-start
- if key == "ML_STRING":
+ start = max([0, start+offset])
+ end = max([0, end+offset])
+ if key == "UF_SQ3STRING":
self.setFormat(start, end-start, self.formats["STRING"])
- inside_string = not inside_string
- elif inside_string:
+ if prev_state in (self.NORMAL, self.INSIDE_SQSTRING):
+ state = self.INSIDE_SQSTRING
+ else:
+ state = prev_state
+ elif key == "UF_DQ3STRING":
self.setFormat(start, end-start, self.formats["STRING"])
+ if prev_state in (self.NORMAL, self.INSIDE_DQSTRING):
+ state = self.INSIDE_DQSTRING
+ else:
+ state = prev_state
else:
self.setFormat(start, end-start, self.formats[key])
+ if key != "STRING":
+ last_state = self.NORMAL
+ else:
+ last_state = None
if value in ("def", "class"):
match1 = self.IDPROG.match(text, end)
if match1:
@@ -215,8 +236,9 @@ class PythonSH(BaseSH):
match = self.PROG.search(text, match.end())
- last_state = self.INSIDE_STRING if inside_string else self.NORMAL
- self.setCurrentBlockState(last_state)
+ if last_state is not None:
+ state = last_state
+ self.setCurrentBlockState(state)
if cbdata is not None:
block_nb = self.currentBlock().blockNumber() | QtEditor: improved syntax highlighting (multiline string) | spyder-ide_spyder | train |
8c99d47d4cf48960e3aa89e2f8f2127f818dee38 | diff --git a/bolt/local/array.py b/bolt/local/array.py
index <HASH>..<HASH> 100644
--- a/bolt/local/array.py
+++ b/bolt/local/array.py
@@ -17,6 +17,12 @@ class BoltArrayLocal(ndarray, BoltArray):
return
self._mode = getattr(obj, 'mode', None)
+ def __array_wrap__(self, obj):
+ if obj.shape == ():
+ return obj[()]
+ else:
+ return ndarray.__array_wrap__(self, obj)
+
@property
def _constructor(self):
return BoltArrayLocal | Added the __array_wrap__ function to BoltArrayLocal so that ufuncs will return scalars if their output is a BoltArrayLocal of shape () | bolt-project_bolt | train |
99442b5c930c86f78bd35d961f7166afe2c9c58f | diff --git a/javascript/lib/amigo/commonjs-modules_1_0-shim.js b/javascript/lib/amigo/commonjs-modules_1_0-shim.js
index <HASH>..<HASH> 100644
--- a/javascript/lib/amigo/commonjs-modules_1_0-shim.js
+++ b/javascript/lib/amigo/commonjs-modules_1_0-shim.js
@@ -4,5 +4,12 @@
// (browser environment, etc.), take no action and depend on the
// global namespace.
if( typeof(exports) != 'undefined' ){
+
+ // Old style--exporting separate namespace.
exports.amigo = amigo;
+
+ // New, better, style--assemble; these should not collide.
+ bbop.core.each(amigo, function(k, v){
+ exports[k] = v;
+ });
} | better commonjs export tacked on | geneontology_amigo | train |
3a230024831c2c9be3fd27edc1f596f94d12686a | diff --git a/src/main/java/io/github/lukehutch/fastclasspathscanner/scanner/Scanner.java b/src/main/java/io/github/lukehutch/fastclasspathscanner/scanner/Scanner.java
index <HASH>..<HASH> 100644
--- a/src/main/java/io/github/lukehutch/fastclasspathscanner/scanner/Scanner.java
+++ b/src/main/java/io/github/lukehutch/fastclasspathscanner/scanner/Scanner.java
@@ -358,7 +358,8 @@ public class Scanner implements Callable<ScanResult> {
final String packageRoot = classpathElt.getJarfilePackageRoot();
final ModuleRef classpathElementModuleRef = classpathElt.getClasspathElementModuleRef();
if (classpathElementModuleRef != null) {
- logNode.log(i + ": module " + classpathElementModuleRef.getModuleName());
+ logNode.log(i + ": module " + classpathElementModuleRef.getModuleName()
+ + " ; module location: " + classpathElementModuleRef.getModuleLocationStr());
} else {
final String classpathEltStr = classpathElt.toString();
final String classpathEltFileStr = "" + classpathElt.getClasspathElementFile(logNode); | Add module location to verbose log (#<I>) | classgraph_classgraph | train |
e6c85bf4871ca5c4c230cf6455d20841716cbaae | diff --git a/lib/sample_models/sampler.rb b/lib/sample_models/sampler.rb
index <HASH>..<HASH> 100644
--- a/lib/sample_models/sampler.rb
+++ b/lib/sample_models/sampler.rb
@@ -41,7 +41,11 @@ module SampleModels
attrs = args.last.is_a?(Hash) ? args.pop : {}
args.each do |associated_value|
assocs = @model_class.reflect_on_all_associations.select { |a|
- a.klass == associated_value.class
+ begin
+ a.klass == associated_value.class
+ rescue NameError
+ false
+ end
}
if assocs.size == 1
attrs[assocs.first.name] = associated_value
diff --git a/spec_or_test/specs_or_test_cases.rb b/spec_or_test/specs_or_test_cases.rb
index <HASH>..<HASH> 100644
--- a/spec_or_test/specs_or_test_cases.rb
+++ b/spec_or_test/specs_or_test_cases.rb
@@ -510,6 +510,20 @@ describe 'Model with a polymorphic belongs-to association' do
assert_not_equal Bookmark, bookmark.bookmarkable.class
end
+ it 'should let you specify that association' do
+ blog_post = BlogPost.sample
+ bookmark = Bookmark.sample :bookmarkable => blog_post
+ assert_equal blog_post, bookmark.bookmarkable
+ end
+
+ it 'should let you specify that association with other leading associations' do
+ user = User.sample
+ blog_post = BlogPost.sample
+ sub = Subscription.sample user, :subscribable => blog_post
+ assert_equal user, sub.user
+ assert_equal blog_post, sub.subscribable
+ end
+
it 'should allow you to constrain which classes will be used by default for the polymorphic type' do
sub = Subscription.create_sample
assert_equal 'BlogPost', sub.subscribable_type | fix to shortcut associations when the model has other polymorphic associations | fhwang_sample_models | train |
4591bc9f83c1e818192df2f38ede7e35c3071d9b | diff --git a/tests/PHPMockTest.php b/tests/PHPMockTest.php
index <HASH>..<HASH> 100644
--- a/tests/PHPMockTest.php
+++ b/tests/PHPMockTest.php
@@ -58,10 +58,8 @@ class PHPMockTest extends AbstractMockTest
$time->__phpunit_verify();
$this->fail("Expectation should fail");
-
} catch (\PHPUnit_Framework_ExpectationFailedException $e) {
time(); // satisfy the expectation
-
}
}
} | Comply to the latest phpcs PSR-2 code style | php-mock_php-mock-phpunit | train |
73fa67736f51a8529d7107f93b84775f19cee5c9 | diff --git a/flink-table/flink-table-planner-blink/src/test/java/org/apache/flink/table/api/TableUtilsStreamingITCase.java b/flink-table/flink-table-planner-blink/src/test/java/org/apache/flink/table/api/TableUtilsStreamingITCase.java
index <HASH>..<HASH> 100644
--- a/flink-table/flink-table-planner-blink/src/test/java/org/apache/flink/table/api/TableUtilsStreamingITCase.java
+++ b/flink-table/flink-table-planner-blink/src/test/java/org/apache/flink/table/api/TableUtilsStreamingITCase.java
@@ -22,6 +22,7 @@ import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.java.StreamTableEnvironment;
import org.apache.flink.types.Row;
+import org.apache.flink.util.TestLogger;
import org.junit.Before;
import org.junit.Test;
@@ -36,7 +37,7 @@ import static org.junit.Assert.assertEquals;
/**
* IT case for {@link TableUtils} in streaming mode.
*/
-public class TableUtilsStreamingITCase {
+public class TableUtilsStreamingITCase extends TestLogger {
private StreamExecutionEnvironment env;
private StreamTableEnvironment tEnv;
diff --git a/flink-table/flink-table-planner-blink/src/test/java/org/apache/flink/table/planner/runtime/utils/BatchAbstractTestBase.java b/flink-table/flink-table-planner-blink/src/test/java/org/apache/flink/table/planner/runtime/utils/BatchAbstractTestBase.java
index <HASH>..<HASH> 100644
--- a/flink-table/flink-table-planner-blink/src/test/java/org/apache/flink/table/planner/runtime/utils/BatchAbstractTestBase.java
+++ b/flink-table/flink-table-planner-blink/src/test/java/org/apache/flink/table/planner/runtime/utils/BatchAbstractTestBase.java
@@ -23,6 +23,7 @@ import org.apache.flink.configuration.MemorySize;
import org.apache.flink.configuration.TaskManagerOptions;
import org.apache.flink.runtime.testutils.MiniClusterResourceConfiguration;
import org.apache.flink.test.util.MiniClusterWithClientResource;
+import org.apache.flink.util.TestLogger;
import org.junit.ClassRule;
import org.junit.rules.TemporaryFolder;
@@ -31,7 +32,7 @@ import org.junit.rules.TemporaryFolder;
/**
* Batch test base to use {@link ClassRule}.
*/
-public class BatchAbstractTestBase {
+public class BatchAbstractTestBase extends TestLogger {
public static final int DEFAULT_PARALLELISM = 3;
diff --git a/flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/api/TableEnvironmentITCase.scala b/flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/api/TableEnvironmentITCase.scala
index <HASH>..<HASH> 100644
--- a/flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/api/TableEnvironmentITCase.scala
+++ b/flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/api/TableEnvironmentITCase.scala
@@ -27,12 +27,13 @@ import org.apache.flink.streaming.api.scala.{StreamExecutionEnvironment => Scala
import org.apache.flink.table.api.internal.TableEnvironmentImpl
import org.apache.flink.table.api.java.StreamTableEnvironment
import org.apache.flink.table.api.scala.{StreamTableEnvironment => ScalaStreamTableEnvironment, _}
+import org.apache.flink.table.planner.factories.utils.TestCollectionTableFactory
import org.apache.flink.table.planner.runtime.utils.TestingAppendSink
import org.apache.flink.table.planner.utils.TableTestUtil.{readFromResource, replaceStageId}
import org.apache.flink.table.planner.utils.TestTableSources.getPersonCsvTableSource
import org.apache.flink.table.sinks.CsvTableSink
import org.apache.flink.types.Row
-import org.apache.flink.util.FileUtils
+import org.apache.flink.util.{FileUtils, TestLogger}
import org.junit.Assert.{assertEquals, assertFalse, assertTrue}
import org.junit.rules.TemporaryFolder
@@ -42,14 +43,11 @@ import org.junit.{Assert, Before, Rule, Test}
import _root_.java.io.File
import _root_.java.util
-
-import org.apache.flink.table.planner.factories.utils.TestCollectionTableFactory
-
import _root_.scala.collection.mutable
@RunWith(classOf[Parameterized])
-class TableEnvironmentITCase(tableEnvName: String, isStreaming: Boolean) {
+class TableEnvironmentITCase(tableEnvName: String, isStreaming: Boolean) extends TestLogger {
private val _tempFolder = new TemporaryFolder() | [FLINK-<I>][table-planner-blink] Extend batch integration tests with TestLogger
This closes #<I> | apache_flink | train |
0a6ead4f432d75410b0f0a09d2e682362f5b95dd | diff --git a/pkg/cmd/server/start/start_master.go b/pkg/cmd/server/start/start_master.go
index <HASH>..<HASH> 100644
--- a/pkg/cmd/server/start/start_master.go
+++ b/pkg/cmd/server/start/start_master.go
@@ -425,6 +425,17 @@ func (m *Master) Start() error {
}
if m.api {
+ // start etcd if configured to run in process
+ if m.config.EtcdConfig != nil {
+ etcdserver.RunEtcd(m.config.EtcdConfig)
+ }
+
+ // ensure connectivity to etcd before calling BuildMasterConfig,
+ // which constructs storage whose etcd clients require connectivity to etcd at construction time
+ if err := testEtcdConnectivity(m.config.EtcdClientInfo); err != nil {
+ return err
+ }
+
// informers are shared amongst all the various api components we build
// TODO the needs of the apiserver and the controllers are drifting. We should consider two different skins here
clientConfig, err := configapi.GetClientConfig(m.config.MasterClients.OpenShiftLoopbackKubeConfig, m.config.MasterClients.OpenShiftLoopbackClientConnectionOverrides)
@@ -461,20 +472,9 @@ func (m *Master) Start() error {
// StartAPI starts the components of the master that are considered part of the API - the Kubernetes
// API and core controllers, the Origin API, the group, policy, project, and authorization caches,
-// etcd, the asset server (for the UI), the OAuth server endpoints, and the DNS server.
+// the asset server (for the UI), the OAuth server endpoints, and the DNS server.
// TODO: allow to be more granularly targeted
func StartAPI(oc *origin.MasterConfig) error {
- // start etcd
- if oc.Options.EtcdConfig != nil {
- etcdserver.RunEtcd(oc.Options.EtcdConfig)
- }
-
- // verify we can connect to etcd with the provided config
- // TODO remove when this becomes a health check in 3.8
- if err := testEtcdConnectivity(oc.Options.EtcdClientInfo); err != nil {
- return err
- }
-
// start DNS before the informers are started because it adds a ClusterIP index.
if oc.Options.DNSConfig != nil {
oc.RunDNSServer()
@@ -493,6 +493,7 @@ func testEtcdConnectivity(etcdClientInfo configapi.EtcdConnectionInfo) error {
if err != nil {
return err
}
+ defer etcdClient3.Close()
if err := etcd.TestEtcdClientV3(etcdClient3); err != nil {
return err
} | Hoist etcd startup first for all-in-one | openshift_origin | train |
ec4afa3cfd3bc50692eac922c53063037549c34c | diff --git a/modules/module-test/mockito/junit4/src/test/java/samples/powermockito/junit4/spy/SpyTest.java b/modules/module-test/mockito/junit4/src/test/java/samples/powermockito/junit4/spy/SpyTest.java
index <HASH>..<HASH> 100644
--- a/modules/module-test/mockito/junit4/src/test/java/samples/powermockito/junit4/spy/SpyTest.java
+++ b/modules/module-test/mockito/junit4/src/test/java/samples/powermockito/junit4/spy/SpyTest.java
@@ -27,6 +27,7 @@ import samples.suppressmethod.SuppressMethodParent;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.Assert.assertEquals;
+import static org.powermock.api.mockito.PowerMockito.doNothing;
import static org.powermock.api.mockito.PowerMockito.spy;
import static org.powermock.api.mockito.PowerMockito.when;
import static org.powermock.api.support.membermodification.MemberMatcher.method;
@@ -66,4 +67,11 @@ public class SpyTest {
SuppressMethod tested = spy(new SuppressMethod());
assertEquals(20, tested.myMethod());
}
+
+ @Test
+ public void testDoNothingForSpy() {
+ doNothing().when(partialMock).throwException();
+
+ partialMock.throwException();
+ }
}
\ No newline at end of file
diff --git a/tests/utils/src/main/java/samples/spy/SpyObject.java b/tests/utils/src/main/java/samples/spy/SpyObject.java
index <HASH>..<HASH> 100644
--- a/tests/utils/src/main/java/samples/spy/SpyObject.java
+++ b/tests/utils/src/main/java/samples/spy/SpyObject.java
@@ -24,4 +24,9 @@ public class SpyObject {
public String getStringTwo() {
return "two";
}
+
+
+ public void throwException() {
+ throw new RuntimeException("Aaaaa! Test is failed.");
+ }
} | #<I> suppress(method) doesn't work for spies.
One more test to check that doNothing. | powermock_powermock | train |
2a08c5e114c6a655697b98bc3c656ba7ece22db2 | diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100755
--- a/setup.py
+++ b/setup.py
@@ -4,13 +4,13 @@
iPOPO installation script
:author: Thomas Calmant
-:copyright: Copyright 2017, Thomas Calmant
+:copyright: Copyright 2018, Thomas Calmant
:license: Apache License 2.0
:version: 0.7.1
..
- Copyright 2017 Thomas Calmant
+ Copyright 2018 Thomas Calmant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -72,7 +72,9 @@ setup(
'pelix.remote.discovery',
'pelix.remote.transport',
'pelix.services',
- 'pelix.shell'],
+ 'pelix.shell',
+ 'pelix.shell.completion',
+ ],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
@@ -81,11 +83,11 @@ setup(
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
- 'Topic :: Software Development :: Libraries :: Application Frameworks'
+ 'Programming Language :: Python :: 3.7',
+ 'Topic :: Software Development :: Libraries :: Application Frameworks',
],
install_requires=[
'jsonrpclib-pelix>=0.3.1' | Review of setup.py
Changed copyright year and added missing pelix.shell.completion package. | tcalmant_ipopo | train |
bb3bab7dfa9f8e8416bd5f725c6f3c23cb5644a5 | diff --git a/lib/steam/community/css/CSSStats.php b/lib/steam/community/css/CSSStats.php
index <HASH>..<HASH> 100644
--- a/lib/steam/community/css/CSSStats.php
+++ b/lib/steam/community/css/CSSStats.php
@@ -58,7 +58,7 @@ class CSSStats extends GameStats {
$this->lastMatchStats['damage'] = (int) $statsData->lastmatch->dmg;
$this->lastMatchStats['deaths'] = (int) $statsData->lastmatch->deaths;
$this->lastMatchStats['dominations'] = (int) $statsData->lastmatch->dominations;
- $this->lastMatchStats['favoriteWeapon'] = (string) $statsData->lastmatch->favwpn;
+ $this->lastMatchStats['favoriteWeaponId'] = (int) $statsData->lastmatch->favwpnid;
$this->lastMatchStats['kills'] = (int) $statsData->lastmatch->kills;
$this->lastMatchStats['maxPlayers'] = (int) $statsData->lastmatch->max_players;
$this->lastMatchStats['money'] = (int) $statsData->lastmatch->money; | Fix for a change in CS:S stats XML data
The favorite weapon of the last game is now an numeric ID. Right now, there
seems no way to tell the weapon name from this ID programatically.
Closes #<I> | koraktor_steam-condenser-php | train |
f74fa201d8b3c94094ed9f7fe5b4cd0a5417ef6b | diff --git a/src/BoomCMS/Http/Controllers/CMS/Page/Settings/View.php b/src/BoomCMS/Http/Controllers/CMS/Page/Settings/View.php
index <HASH>..<HASH> 100644
--- a/src/BoomCMS/Http/Controllers/CMS/Page/Settings/View.php
+++ b/src/BoomCMS/Http/Controllers/CMS/Page/Settings/View.php
@@ -2,7 +2,6 @@
namespace BoomCMS\Http\Controllers\CMS\Page\Settings;
-use BoomCMS\Core\Page\Finder as PageFinder;
use Illuminate\Support\Facades\App;
class View extends Settings
@@ -90,22 +89,6 @@ class View extends Settings
]);
}
- public function sort_children()
- {
- parent::children();
-
- $finder = new PageFinder\Finder();
-
- $children = $finder
- ->addFilter(new PageFinder\ParentPage($this->page))
- ->setLimit(50)
- ->findAll();
-
- return view("$this->viewPrefix/sort_children", [
- 'children' => $children,
- ]);
- }
-
public function visibility()
{
parent::visibility(); | Removed redundant code from page settings view controller | boomcms_boom-core | train |
4469010c1be3a94f74d6448497051468f617baf2 | diff --git a/src/transformers/integrations.py b/src/transformers/integrations.py
index <HASH>..<HASH> 100644
--- a/src/transformers/integrations.py
+++ b/src/transformers/integrations.py
@@ -382,9 +382,10 @@ class TensorBoardCallback(TrainerCallback):
def __init__(self, tb_writer=None):
has_tensorboard = is_tensorboard_available()
- assert (
- has_tensorboard
- ), "TensorBoardCallback requires tensorboard to be installed. Either update your PyTorch version or install tensorboardX."
+ if not has_tensorboard:
+ raise RuntimeError(
+ "TensorBoardCallback requires tensorboard to be installed. Either update your PyTorch version or install tensorboardX."
+ )
if has_tensorboard:
try:
from torch.utils.tensorboard import SummaryWriter # noqa: F401
@@ -465,7 +466,8 @@ class WandbCallback(TrainerCallback):
def __init__(self):
has_wandb = is_wandb_available()
- assert has_wandb, "WandbCallback requires wandb to be installed. Run `pip install wandb`."
+ if not has_wandb:
+ raise RuntimeError("WandbCallback requires wandb to be installed. Run `pip install wandb`.")
if has_wandb:
import wandb
@@ -587,7 +589,8 @@ class CometCallback(TrainerCallback):
"""
def __init__(self):
- assert _has_comet, "CometCallback requires comet-ml to be installed. Run `pip install comet-ml`."
+ if not _has_comet:
+ raise RuntimeError("CometCallback requires comet-ml to be installed. Run `pip install comet-ml`.")
self._initialized = False
def setup(self, args, state, model):
@@ -643,9 +646,8 @@ class AzureMLCallback(TrainerCallback):
"""
def __init__(self, azureml_run=None):
- assert (
- is_azureml_available()
- ), "AzureMLCallback requires azureml to be installed. Run `pip install azureml-sdk`."
+ if not is_azureml_available():
+ raise RuntimeError("AzureMLCallback requires azureml to be installed. Run `pip install azureml-sdk`.")
self.azureml_run = azureml_run
def on_init_end(self, args, state, control, **kwargs):
@@ -667,7 +669,8 @@ class MLflowCallback(TrainerCallback):
"""
def __init__(self):
- assert is_mlflow_available(), "MLflowCallback requires mlflow to be installed. Run `pip install mlflow`."
+ if not is_mlflow_available():
+ raise RuntimeError("MLflowCallback requires mlflow to be installed. Run `pip install mlflow`.")
import mlflow
self._MAX_PARAM_VAL_LENGTH = mlflow.utils.validation.MAX_PARAM_VAL_LENGTH
@@ -753,9 +756,10 @@ class NeptuneCallback(TrainerCallback):
"""
def __init__(self):
- assert (
- is_neptune_available()
- ), "NeptuneCallback requires neptune-client to be installed. Run `pip install neptune-client`."
+ if not is_neptune_available():
+ raise ValueError(
+ "NeptuneCallback requires neptune-client to be installed. Run `pip install neptune-client`."
+ )
import neptune.new as neptune
self._neptune = neptune
@@ -823,9 +827,10 @@ class CodeCarbonCallback(TrainerCallback):
"""
def __init__(self):
- assert (
- is_codecarbon_available()
- ), "CodeCarbonCallback requires `codecarbon` to be installed. Run `pip install codecarbon`."
+ if not is_codecarbon_available():
+ raise RuntimeError(
+ "CodeCarbonCallback requires `codecarbon` to be installed. Run `pip install codecarbon`."
+ )
import codecarbon
self._codecarbon = codecarbon | Replace assertions with RuntimeError exceptions (#<I>) | huggingface_pytorch-pretrained-BERT | train |
67cc3119c4ab98d5088aba9b28e75ae2a8213ffd | diff --git a/test/unit/Reflection/ReflectionClassTest.php b/test/unit/Reflection/ReflectionClassTest.php
index <HASH>..<HASH> 100644
--- a/test/unit/Reflection/ReflectionClassTest.php
+++ b/test/unit/Reflection/ReflectionClassTest.php
@@ -400,7 +400,7 @@ class ReflectionClassTest extends \PHPUnit_Framework_TestCase
$this->assertSame(
[
ClassWithInterfaces\A::class,
- ClassWithInterfacesOther\A::class,
+ ClassWithInterfacesOther\B::class,
ClassWithInterfaces\C::class,
ClassWithInterfacesOther\D::class,
\E::class, | Correcting test asset: imported interfaces were wrong | Roave_BetterReflection | train |
59866a1fcdcf9e17d68d764c13066b058667143d | diff --git a/logger.go b/logger.go
index <HASH>..<HASH> 100644
--- a/logger.go
+++ b/logger.go
@@ -79,7 +79,14 @@ func (r *Record) Message() string {
}
type Logger struct {
- Module string
+ Module string
+ backend LeveledBackend
+ haveBackend bool
+}
+
+func (l *Logger) SetBackend(backend LeveledBackend) {
+ l.backend = backend
+ l.haveBackend = true
}
// TODO call NewLogger and remove MustGetLogger?
@@ -148,6 +155,11 @@ func (l *Logger) log(lvl Level, format string, args ...interface{}) {
// calldepth=2 brings the stack up to the caller of the level
// methods, Info(), Fatal(), etc.
+ if l.haveBackend {
+ l.backend.Log(lvl, 2, record)
+ return
+ }
+
defaultBackend.Log(lvl, 2, record)
} | Add ability to set backend to separate logger | ckeyer_go-log | train |
0600b50ec757ee0451fb42f07721a4bc537fabcd | diff --git a/src/web/org/codehaus/groovy/grails/web/servlet/GrailsHttpServletResponse.java b/src/web/org/codehaus/groovy/grails/web/servlet/GrailsHttpServletResponse.java
index <HASH>..<HASH> 100644
--- a/src/web/org/codehaus/groovy/grails/web/servlet/GrailsHttpServletResponse.java
+++ b/src/web/org/codehaus/groovy/grails/web/servlet/GrailsHttpServletResponse.java
@@ -119,8 +119,7 @@ public class GrailsHttpServletResponse implements HttpServletResponse {
}
public ServletOutputStream getOutputStream(String contentType, String characterEncoding) throws IOException {
- this.delegate.setContentType(contentType);
- this.delegate.setCharacterEncoding(characterEncoding);
+ this.delegate.setContentType(contentType + ";charset=" + characterEncoding);
return this.delegate.getOutputStream();
}
@@ -129,8 +128,7 @@ public class GrailsHttpServletResponse implements HttpServletResponse {
}
public PrintWriter getWriter(String contentType, String characterEncoding) throws IOException {
- this.delegate.setContentType(contentType);
- this.delegate.setCharacterEncoding(characterEncoding);
+ this.delegate.setContentType(contentType + ";charset=" + characterEncoding);
return this.delegate.getWriter();
} | Don't call setCharacterEncoding when getting the writer of outpustream since this method is not part of J2EE <I>.
git-svn-id: <URL> | grails_grails-core | train |
bcb33d5af8e98915325f7aaecdf6008154ce3521 | diff --git a/topologies/mongos.js b/topologies/mongos.js
index <HASH>..<HASH> 100644
--- a/topologies/mongos.js
+++ b/topologies/mongos.js
@@ -900,7 +900,7 @@ var executeWriteOperation = function(self, op, ns, ops, options, callback) {
server[op](ns, ops, options, (err, result) => {
if (!err) return callback(null, result);
- if (!(err instanceof errors.MongoNetworkError)) {
+ if (!(err instanceof errors.MongoNetworkError) && !err.message.match(/not master/)) {
return callback(err);
}
diff --git a/topologies/replset.js b/topologies/replset.js
index <HASH>..<HASH> 100644
--- a/topologies/replset.js
+++ b/topologies/replset.js
@@ -1187,7 +1187,7 @@ var executeWriteOperation = function(self, op, ns, ops, options, callback) {
self.s.replicaSetState.primary[op](ns, ops, options, (err, result) => {
if (!err) return callback(null, result);
- if (!(err instanceof errors.MongoNetworkError)) {
+ if (!(err instanceof errors.MongoNetworkError) && !err.message.match(/not master/)) {
return callback(err);
} | feat(retryable-writes): retry on "not master" stepdown errors
NODE-<I> | mongodb_node-mongodb-native | train |
1493378d8cdf8334068462378316d216b63bcdd8 | diff --git a/utils/parse_command_line.py b/utils/parse_command_line.py
index <HASH>..<HASH> 100644
--- a/utils/parse_command_line.py
+++ b/utils/parse_command_line.py
@@ -193,7 +193,8 @@ def parse_command_line(parser):
with open(args_command_list.targets_in) as f:
for target in f.readlines():
if target.strip(): # Ignore empty lines
- args_target_list.append(target.strip())
+ if not target.startswith('#'): # Ignore comment lines
+ args_target_list.append(target.strip())
except IOError, e:
print " ERROR: Can't read targets from input file '%s'." % args_command_list.targets_in
return | Support for comments in --targets_in files | nabla-c0d3_sslyze | train |
5976e9f37be1c90c042e9112bff3b6d12d790e3f | diff --git a/main/core/Library/Installation/AdditionalInstaller.php b/main/core/Library/Installation/AdditionalInstaller.php
index <HASH>..<HASH> 100644
--- a/main/core/Library/Installation/AdditionalInstaller.php
+++ b/main/core/Library/Installation/AdditionalInstaller.php
@@ -12,6 +12,7 @@
namespace Claroline\CoreBundle\Library\Installation;
use Claroline\InstallationBundle\Additional\AdditionalInstaller as BaseInstaller;
+use Psr\Log\LogLevel;
use Symfony\Bundle\SecurityBundle\Command\InitAclCommand;
use Symfony\Component\Console\Input\ArrayInput;
use Symfony\Component\Console\Output\NullOutput;
@@ -25,22 +26,28 @@ class AdditionalInstaller extends BaseInstaller
public function preUpdate($currentVersion, $targetVersion)
{
+ $dataWebDir = $this->container->getParameter('claroline.param.data_web_dir');
$fileSystem = $this->container->get('filesystem');
$publicFilesDir = $this->container->getParameter('claroline.param.public_files_directory');
- $dataWebDir = $this->container->getParameter('claroline.param.data_web_dir');
-
- if (!$fileSystem->exists($publicFilesDir)) {
- $fileSystem->mkdir($publicFilesDir, 0775);
- $fileSystem->chmod($publicFilesDir, 0775, 0000, true);
- }
if (!$fileSystem->exists($dataWebDir)) {
+ $this->log('Creating symlink to public directory of files directory in web directory...');
$fileSystem->symlink($publicFilesDir, $dataWebDir);
+ } else {
+ if (!is_link($dataWebDir)) {
+ //we could remove it manually but it might be risky
+ $this->log('Symlink from web/data to files/data could not be created, please remove your web/data folder manually', LogLevel::ERROR);
+ } else {
+ $this->log('Web folder symlinks validated...');
+ }
}
- //when everything concerning files is done properly, these lines should be removed
- $updater = new Updater\Updater100000($this->container);
- $updater->moveUploadsDirectory();
+ try {
+ $updater = new Updater\Updater100000($this->container);
+ $updater->moveUploadsDirectory();
+ } catch (\Exception $e) {
+ $this->log($e->getMessage(), LogLevel::ERROR);
+ }
$maintenanceUpdater = new Updater\WebUpdater($this->container->getParameter('kernel.root_dir'));
$maintenanceUpdater->preUpdate();
@@ -292,6 +299,7 @@ class AdditionalInstaller extends BaseInstaller
public function end()
{
+ $this->container->get('claroline.installation.refresher')->installAssets();
$this->log('Updating resource icons...');
$this->container->get('claroline.manager.icon_set_manager')->setLogger($this->logger);
$this->container->get('claroline.manager.icon_set_manager')->addDefaultIconSets();
diff --git a/main/core/Library/Installation/Updater/Updater100000.php b/main/core/Library/Installation/Updater/Updater100000.php
index <HASH>..<HASH> 100644
--- a/main/core/Library/Installation/Updater/Updater100000.php
+++ b/main/core/Library/Installation/Updater/Updater100000.php
@@ -160,12 +160,17 @@ class Updater100000 extends Updater
{
$logos = new \DirectoryIterator($this->container->getParameter('claroline.param.logos_directory'));
$logoService = $this->container->get('claroline.common.logo_service');
+ $ch = $this->container->get('claroline.config.platform_config_handler');
foreach ($logos as $logo) {
if ($logo->isFile()) {
$this->log('Saving logo '.$logo->getBasename().'...');
$file = new File($logo->getPathname());
- $logoService->createLogo($file);
+ $logoFile = $logoService->createLogo($file);
+
+ if ($logo->getBasename() === $ch->getParameter('logo')) {
+ $ch->setParameter('logo', $logoFile->getPublicFile()->getUrl());
+ }
}
}
}
diff --git a/main/core/Library/Logo/LogoService.php b/main/core/Library/Logo/LogoService.php
index <HASH>..<HASH> 100644
--- a/main/core/Library/Logo/LogoService.php
+++ b/main/core/Library/Logo/LogoService.php
@@ -67,7 +67,8 @@ class LogoService
public function createLogo(File $file)
{
$publicFile = $this->fu->createFile($file, $file->getBasename(), null, null, null, self::PUBLIC_FILE_TYPE);
- $this->fu->createFileUse($publicFile, 'none', 'none');
+
+ return $this->fu->createFileUse($publicFile, 'none', 'none');
}
/** | [CoreBundle] Logo platform move (#<I>)
* Fixed web folder deletion
* [CoreBundle] Logo paths | claroline_Distribution | train |
6459e77b6e8e6342c492c0e074b64dea5e50e1c9 | diff --git a/service/s3/s3manager/upload.go b/service/s3/s3manager/upload.go
index <HASH>..<HASH> 100644
--- a/service/s3/s3manager/upload.go
+++ b/service/s3/s3manager/upload.go
@@ -9,6 +9,7 @@ import (
"time"
"github.com/awslabs/aws-sdk-go/aws/awserr"
+ "github.com/awslabs/aws-sdk-go/aws/awsutil"
"github.com/awslabs/aws-sdk-go/internal/apierr"
"github.com/awslabs/aws-sdk-go/service/s3"
)
@@ -83,7 +84,6 @@ func (m *multiUploadError) UploadID() string {
return m.uploadID
}
-
// UploadInput contains all input for upload requests to Amazon S3.
type UploadInput struct {
// The canned ACL to apply to the object.
@@ -253,11 +253,11 @@ func (u *uploader) upload() (*UploadOutput, error) {
// a regular PutObject request. Multipart requests require at least two
// parts, or at least 5MB of data.
func (u *uploader) singlePart(part []byte) (*UploadOutput, error) {
- req, _ := u.s.PutObjectRequest(&s3.PutObjectInput{
- Bucket: u.in.Bucket,
- Key: u.in.Key,
- Body: bytes.NewReader(part),
- })
+ params := &s3.PutObjectInput{}
+ awsutil.Copy(params, u.in)
+ params.Body = bytes.NewReader(part)
+
+ req, _ := u.s.PutObjectRequest(params)
if err := req.Send(); err != nil {
return nil, err
}
@@ -293,11 +293,11 @@ func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].Pa
// upload will perform a multipart upload using the firstPart buffer containing
// the first chunk of data.
func (u *multiuploader) upload(firstPart []byte) (*UploadOutput, error) {
+ params := &s3.CreateMultipartUploadInput{}
+ awsutil.Copy(params, u.in)
+
// Create the multipart
- resp, err := u.s.CreateMultipartUpload(&s3.CreateMultipartUploadInput{
- Bucket: u.in.Bucket,
- Key: u.in.Key,
- })
+ resp, err := u.s.CreateMultipartUpload(params)
if err != nil {
return nil, err
} | service/s3/s3manager: Added support for all UploadInput parameters
Using the updated awsutil.Copy the upload manager can now use all the
fields in its UploadInput struct.
Fixes #<I> | aws_aws-sdk-go | train |
a32624a461941a88aa550739201f8edb6ebe345e | diff --git a/gems/vfs/lib/torquebox/vfs/ext/jdbc.rb b/gems/vfs/lib/torquebox/vfs/ext/jdbc.rb
index <HASH>..<HASH> 100644
--- a/gems/vfs/lib/torquebox/vfs/ext/jdbc.rb
+++ b/gems/vfs/lib/torquebox/vfs/ext/jdbc.rb
@@ -21,7 +21,9 @@ class Java::java.sql::DriverManager
alias_method :register_driver_without_vfs, :registerDriver
# Monkey patch getConnection so we can sort out local filesystem url's for
- # SQLite (we need to remove the 'vfs:' from the url)
+ # SQLite (we need to remove the 'vfs:' from the url). This works
+ # for activerecord-jdbc-adapter v1.1.2 and under. For v1.1.3 and
+ # up, see web/src/main/java/org/torquebox/rails/boot.rb
def getConnection(url, *params)
# Remove any VFS prefix from the url (for SQLite)
diff --git a/modules/web/src/main/java/org/torquebox/web/rails/boot.rb b/modules/web/src/main/java/org/torquebox/web/rails/boot.rb
index <HASH>..<HASH> 100644
--- a/modules/web/src/main/java/org/torquebox/web/rails/boot.rb
+++ b/modules/web/src/main/java/org/torquebox/web/rails/boot.rb
@@ -35,6 +35,22 @@ class Class
end
end
end
+
+ # We monkey patch activerecord-jdbc-adapter here to handle
+ # sqlite vfs paths. This patch works for
+ # activerecord-jdbc-adapter v1.1.3 and up. For lower versions,
+ # see vfs/lib/torquebox/vfs/ext/jdbc.rb.
+ if self.to_s == 'ActiveRecord::ConnectionAdapters::JdbcDriver' &&
+ method_name == :connection
+ self.class_eval do
+ alias_method :connection_before_torquebox, :connection
+
+ def connection(url, user, pass)
+ connection_before_torquebox( url.sub(':vfs:', ':'), user, pass )
+ end
+ end
+ end
+
if ( (self.to_s == 'Rails::Initializer') && ( method_name == :set_autoload_paths ) )
self.class_eval do
alias_method :set_autoload_paths_before_torquebox, :set_autoload_paths
diff --git a/pom.xml b/pom.xml
index <HASH>..<HASH> 100644
--- a/pom.xml
+++ b/pom.xml
@@ -336,10 +336,10 @@
<version.rails31>3.1.0.rc5</version.rails31>
<version.arel>2.0.9</version.arel>
- <version.ar.jdbc>0.9.7</version.ar.jdbc>
- <version.ar.sqlite3>0.9.7</version.ar.sqlite3>
- <version.ar.mysql>0.9.7</version.ar.mysql>
- <version.ar.postgresql>0.9.7</version.ar.postgresql>
+ <version.ar.jdbc>1.1.3</version.ar.jdbc>
+ <version.ar.sqlite3>1.1.3</version.ar.sqlite3>
+ <version.ar.mysql>1.1.3</version.ar.mysql>
+ <version.ar.postgresql>1.1.3</version.ar.postgresql>
<version.jdbc.sqlite3>3.6.3.054</version.jdbc.sqlite3>
<version.jdbc.mysql>5.0.4</version.jdbc.mysql> | Monkey path arjdbc <I> and up to work with sqlite vfs paths [TORQUE-<I>] | torquebox_torquebox | train |
2351fc6b3fd81a9ae19690010a7d85ae69102b20 | diff --git a/bin/inline_forms_installer_core.rb b/bin/inline_forms_installer_core.rb
index <HASH>..<HASH> 100755
--- a/bin/inline_forms_installer_core.rb
+++ b/bin/inline_forms_installer_core.rb
@@ -17,7 +17,7 @@ gem 'carrierwave'
gem 'remotipart', '~> 1.0'
gem 'paper_trail'
gem 'devise'
-gem 'inline_forms'
+gem 'inline_forms', '>=5'
gem 'validation_hints'
gem 'mini_magick'
gem 'rails-i18n'
diff --git a/lib/inline_forms/version.rb b/lib/inline_forms/version.rb
index <HASH>..<HASH> 100644
--- a/lib/inline_forms/version.rb
+++ b/lib/inline_forms/version.rb
@@ -1,4 +1,4 @@
# -*- encoding : utf-8 -*-
module InlineForms
- VERSION = "5.0.7"
+ VERSION = "5.0.8"
end | the installer installed inline_forms <I>... strange... | acesuares_inline_forms | train |
8e12500350f2852fc22cff0571cb23ec39cb1098 | diff --git a/net/http/Media.php b/net/http/Media.php
index <HASH>..<HASH> 100644
--- a/net/http/Media.php
+++ b/net/http/Media.php
@@ -113,9 +113,9 @@ class Media extends \lithium\core\StaticObject {
* Examples:
* {{{ embed:lithium\tests\cases\net\http\MediaTest::testMediaTypes(1-2) }}}
*
- * {{{ embed:lithium\tests\cases\net\http\MediaTest::testMediaTypes(19-20) }}}
+ * {{{ embed:lithium\tests\cases\net\http\MediaTest::testMediaTypes(19-23) }}}
*
- * {{{ embed:lithium\tests\cases\net\http\MediaTest::testMediaTypes(40-41) }}}
+ * {{{ embed:lithium\tests\cases\net\http\MediaTest::testMediaTypes(43-44) }}}
*
* Alternatively, can be used to detect the type name of a registered content type:
* {{{ | Fix wrong embedded code for Media docs examples | UnionOfRAD_lithium | train |
8ae0a20305114bae6c0cd3ea66ccb910fba6ebba | diff --git a/.flowconfig b/.flowconfig
index <HASH>..<HASH> 100644
--- a/.flowconfig
+++ b/.flowconfig
@@ -1,5 +1,9 @@
+[options]
+all=true
+
[ignore]
+.*/node_modules/.*
.*/dist/.*
[libs]
-<PROJECT_ROOT>/flow-typed
+flow-typed
diff --git a/workspaces/cli/src/__tests__/freighter.test.js b/workspaces/cli/src/__tests__/freighter.test.js
index <HASH>..<HASH> 100644
--- a/workspaces/cli/src/__tests__/freighter.test.js
+++ b/workspaces/cli/src/__tests__/freighter.test.js
@@ -1,4 +1,3 @@
-// @flow
import fs from 'fs-extra';
import path from 'path';
diff --git a/workspaces/cli/src/commands/decorator.js b/workspaces/cli/src/commands/decorator.js
index <HASH>..<HASH> 100644
--- a/workspaces/cli/src/commands/decorator.js
+++ b/workspaces/cli/src/commands/decorator.js
@@ -1,4 +1,3 @@
-// @flow
let commandReturnValue;
// Intercept and cache command return values. This is primarily
diff --git a/workspaces/cli/src/commands/init.js b/workspaces/cli/src/commands/init.js
index <HASH>..<HASH> 100644
--- a/workspaces/cli/src/commands/init.js
+++ b/workspaces/cli/src/commands/init.js
@@ -1,4 +1,3 @@
-// @flow
import fs from 'fs-extra';
import path from 'path';
diff --git a/workspaces/cli/src/freighter.js b/workspaces/cli/src/freighter.js
index <HASH>..<HASH> 100644
--- a/workspaces/cli/src/freighter.js
+++ b/workspaces/cli/src/freighter.js
@@ -1,4 +1,3 @@
-// @flow
import program from 'commander';
import init from './commands/init';
diff --git a/workspaces/cli/src/index.js b/workspaces/cli/src/index.js
index <HASH>..<HASH> 100755
--- a/workspaces/cli/src/index.js
+++ b/workspaces/cli/src/index.js
@@ -1,5 +1,4 @@
#!/usr/bin/env node
-// @flow
import freighter from './freighter';
// No arguments passed. Punish them.
diff --git a/workspaces/cli/src/test-utils.js b/workspaces/cli/src/test-utils.js
index <HASH>..<HASH> 100644
--- a/workspaces/cli/src/test-utils.js
+++ b/workspaces/cli/src/test-utils.js
@@ -1,4 +1,3 @@
-// @flow
import { testUtils } from './commands/decorator';
import freighter from './freighter'; | Remove pragma restriction
Freighter is a greenfield project. Flow should be enabled without
needing explicit pragmas. | PsychoLlama_freighter | train |
c1807031ac9d5e749d5cd43161bd7c333d50a620 | diff --git a/lib/kaminari/config.rb b/lib/kaminari/config.rb
index <HASH>..<HASH> 100644
--- a/lib/kaminari/config.rb
+++ b/lib/kaminari/config.rb
@@ -24,6 +24,7 @@ module Kaminari
config_accessor :left
config_accessor :right
config_accessor :page_method_name
+ config_accessor :max_pages
def param_name
config.param_name.respond_to?(:call) ? config.param_name.call : config.param_name
@@ -45,5 +46,6 @@ module Kaminari
config.right = 0
config.page_method_name = :page
config.param_name = :page
+ config.max_pages = nil
end
end
diff --git a/lib/kaminari/models/configuration_methods.rb b/lib/kaminari/models/configuration_methods.rb
index <HASH>..<HASH> 100644
--- a/lib/kaminari/models/configuration_methods.rb
+++ b/lib/kaminari/models/configuration_methods.rb
@@ -29,6 +29,20 @@ module Kaminari
def max_per_page
@_max_per_page || Kaminari.config.max_per_page
end
+
+ # Overrides the max_pages value per model
+ # class Article < ActiveRecord::Base
+ # max_pages_per 100
+ # end
+ def max_pages_per(val)
+ @_max_pages = val
+ end
+
+ # This model's max_pages value
+ # returns max_pages value unless explicitly overridden via <tt>max_pages_per</tt>
+ def max_pages
+ @_max_pages || Kaminari.config.max_pages
+ end
end
end
end
diff --git a/lib/kaminari/models/page_scope_methods.rb b/lib/kaminari/models/page_scope_methods.rb
index <HASH>..<HASH> 100644
--- a/lib/kaminari/models/page_scope_methods.rb
+++ b/lib/kaminari/models/page_scope_methods.rb
@@ -18,7 +18,12 @@ module Kaminari
# Total number of pages
def total_pages
- (total_count.to_f / limit_value).ceil
+ total_pages_count = (total_count.to_f / limit_value).ceil
+ if max_pages.present? && max_pages < total_pages_count
+ max_pages
+ else
+ total_pages_count
+ end
end
#FIXME for compatibility. remove num_pages at some time in the future
alias num_pages total_pages | Add max_pages and max_pages_per methods to limit displayed pages per model or globally | kaminari_kaminari | train |
b62fa406d857448931789a78d5a65025a27ea571 | diff --git a/cmd/share-download-main.go b/cmd/share-download-main.go
index <HASH>..<HASH> 100644
--- a/cmd/share-download-main.go
+++ b/cmd/share-download-main.go
@@ -17,6 +17,7 @@
package cmd
import (
+ "strings"
"time"
"github.com/minio/cli"
@@ -57,9 +58,9 @@ EXAMPLES:
$ {{.HelpName}} --expire=10m s3/backup/2006-Mar-1/backup.tar.gz
3. Share all objects under this folder with 5 days expiry.
- $ {{.HelpName}} --expire=120h s3/backup/
+ $ {{.HelpName}} --expire=120h s3/backup/2006-Mar-1/
- 4. Share all objects under this folder and all its sub-folders with 5 days expiry.
+ 4. Share all objects under this bucket and all its folders and sub-folders with 5 days expiry.
$ {{.HelpName}} --recursive --expire=120h s3/backup/
`,
@@ -126,17 +127,24 @@ func doShareDownloadURL(targetURL string, isRecursive bool, expiry time.Duration
// Channel which will receive objects whose URLs need to be shared
objectsCh := make(chan *clientContent)
- if !isRecursive {
- // Share thr url of only one exact prefix if it exists
- content, err := clnt.Stat(isIncomplete)
- if err != nil {
- return err.Trace(clnt.GetURL().String())
- }
+ content, err := clnt.Stat(isIncomplete)
+ if err != nil {
+ return err.Trace(clnt.GetURL().String())
+ }
+
+ if !content.Type.IsDir() {
go func() {
defer close(objectsCh)
objectsCh <- content
}()
} else {
+ if !strings.HasSuffix(targetURLFull, string(clnt.GetURL().Separator)) {
+ targetURLFull = targetURLFull + string(clnt.GetURL().Separator)
+ }
+ clnt, err = newClientFromAlias(targetAlias, targetURLFull)
+ if err != nil {
+ return err.Trace(targetURLFull)
+ }
// Recursive mode: Share list of objects
go func() {
defer close(objectsCh) | share: Presign objects under a dir prefix (#<I>)
From now on, when the user specifies to share a prefix which represents
a directory, all objects under that directory will be shared without
having to check for --recursive flag. If the latter is specified, all
objects in all subdirs will be shared. | minio_mc | train |
2b4cfe9328a2da59367bebb62c6e6136818e1ec5 | diff --git a/lib/mediawiki.TokenTransformManager.js b/lib/mediawiki.TokenTransformManager.js
index <HASH>..<HASH> 100644
--- a/lib/mediawiki.TokenTransformManager.js
+++ b/lib/mediawiki.TokenTransformManager.js
@@ -1194,6 +1194,12 @@ TokenAccumulator.prototype.receiveToksFromChild = function ( ret ) {
// Send async if child or sibling haven't finished or if there's sibling
// tokens waiting
+ if (!ret.async && this.siblingChunks.length
+ && this.siblingChunks[0].rank === ret.tokens.rank) {
+ var tokens = ret.tokens.concat(this.siblingChunks.shift());
+ tokens.rank = ret.tokens.rank;
+ ret.tokens = tokens;
+ }
var async = ret.async || this.waitForSibling || this.siblingChunks.length;
this._callParentCB({tokens: ret.tokens, async: async});
@@ -1202,7 +1208,9 @@ TokenAccumulator.prototype.receiveToksFromChild = function ( ret ) {
// since any tokens we receive now will already be in order
// and no buffering is necessary.
this.waitForChild = false;
- this.emitTokens(this.waitForSibling);
+ if ( this.siblingChunks.length ) {
+ this.emitTokens(this.waitForSibling);
+ }
}
return null; | Small optimization: Concat child and sibling chunk
Concat child and sibling chunks if the child is done and the ranks of child
and sibling are matching.
Change-Id: I7c<I>bc<I>c<I>d4c7dfa<I>a<I>b8 | wikimedia_parsoid | train |
e184d8adfc306a89b5c69502a69b6ad4edfa6bd9 | diff --git a/tests/base_tests.py b/tests/base_tests.py
index <HASH>..<HASH> 100644
--- a/tests/base_tests.py
+++ b/tests/base_tests.py
@@ -185,12 +185,7 @@ async def hsession_t_smallpool(s):
# Test stateful Session
async def hsession_t_stateful(s):
- r = await s.get()
- assert r.status_code == 200
-
-
-async def session_t_stateful_double_worker(s):
- r = await s.get()
+ r = await s.get(path='/cookies/set?cow=moo')
assert r.status_code == 200
diff --git a/tests/test_asks_curio.py b/tests/test_asks_curio.py
index <HASH>..<HASH> 100644
--- a/tests/test_asks_curio.py
+++ b/tests/test_asks_curio.py
@@ -32,20 +32,23 @@ class TestAsksCurio(metaclass=base_tests.TestAsksMeta):
@curio_run
async def test_session_stateful(self):
from asks.sessions import Session
- s = Session(
- 'https://google.ie', persist_cookies=True)
+ s = Session(self.httpbin.url, persist_cookies=True)
async with curio.TaskGroup() as g:
await g.spawn(base_tests.hsession_t_stateful(s))
- assert 'www.google.ie' in s._cookie_tracker_obj.domain_dict.keys()
+ domain = f'{self.httpbin.host}:{self.httpbin.port}'
+ cookies = s._cookie_tracker_obj.domain_dict[domain]
+ assert len(cookies) == 1
+ assert cookies[0].name == 'cow'
+ assert cookies[0].value == 'moo'
@curio_run
async def test_session_stateful_double(self):
from asks.sessions import Session
- s = Session('https://google.ie', persist_cookies=True)
+ s = Session(self.httpbin.url, persist_cookies=True)
async with curio.TaskGroup() as g:
for _ in range(4):
- await g.spawn(base_tests.session_t_stateful_double_worker(s))
+ await g.spawn(base_tests.hsession_t_stateful(s))
# Session Tests
diff --git a/tests/test_asks_trio.py b/tests/test_asks_trio.py
index <HASH>..<HASH> 100644
--- a/tests/test_asks_trio.py
+++ b/tests/test_asks_trio.py
@@ -32,20 +32,23 @@ class TestAsksTrio(metaclass=base_tests.TestAsksMeta):
@trio_run
async def test_session_stateful(self):
from asks.sessions import Session
- s = Session(
- 'https://google.ie', persist_cookies=True)
+ s = Session(self.httpbin.url, persist_cookies=True)
async with trio.open_nursery() as n:
n.spawn(base_tests.hsession_t_stateful, s)
- assert 'www.google.ie' in s._cookie_tracker_obj.domain_dict.keys()
+ domain = f'{self.httpbin.host}:{self.httpbin.port}'
+ cookies = s._cookie_tracker_obj.domain_dict[domain]
+ assert len(cookies) == 1
+ assert cookies[0].name == 'cow'
+ assert cookies[0].value == 'moo'
@trio_run
async def test_session_stateful_double(self):
from asks.sessions import Session
- s = Session('https://google.ie', persist_cookies=True)
+ s = Session(self.httpbin.url, persist_cookies=True)
async with trio.open_nursery() as n:
for _ in range(4):
- n.spawn(base_tests.session_t_stateful_double_worker, s)
+ n.spawn(base_tests.hsession_t_stateful, s)
# Session Tests | use httpbin instead of google.ie for cookie tests | theelous3_asks | train |
f6e2fec2cabdd5baa8bb2cef47ecad34872b1591 | diff --git a/packages/ember-extension-support/lib/data_adapter.js b/packages/ember-extension-support/lib/data_adapter.js
index <HASH>..<HASH> 100644
--- a/packages/ember-extension-support/lib/data_adapter.js
+++ b/packages/ember-extension-support/lib/data_adapter.js
@@ -369,11 +369,17 @@ export default EmberObject.extend({
@return {Array} Array of model type strings
*/
_getObjectsOnNamespaces: function() {
- var namespaces = emberA(Namespace.NAMESPACES), types = emberA();
+ var namespaces = emberA(Namespace.NAMESPACES),
+ types = emberA(),
+ self = this;
namespaces.forEach(function(namespace) {
for (var key in namespace) {
if (!namespace.hasOwnProperty(key)) { continue; }
+ // Even though we will filter again in `getModelTypes`,
+ // we should not call `lookupContainer` on non-models
+ // (especially when `Ember.MODEL_FACTORY_INJECTIONS` is `true`)
+ if (!self.detect(namespace[key])) { continue; }
var name = dasherize(key);
if (!(namespace instanceof Application) && namespace.toString()) {
name = namespace + '/' + name; | [BUGFIX beta] Filter models in the data adapter when looping over namespaces | emberjs_ember.js | train |
a034bcab1579f44c675c7d4d9daffac847cd7aa9 | diff --git a/dateparser/search/text_detection.py b/dateparser/search/text_detection.py
index <HASH>..<HASH> 100644
--- a/dateparser/search/text_detection.py
+++ b/dateparser/search/text_detection.py
@@ -26,6 +26,11 @@ class FullTextLanguageDetector(BaseLanguageDetector):
self.language_unique_chars.append(unique_chars)
def character_check(self, date_string, settings):
+ date_string_set = set(date_string)
+ symbol_set = {"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", " ", "/", "-", ")", "(", ".", ":", "\\"}
+ if date_string_set & symbol_set == date_string_set:
+ self.languages = [self.languages[0]]
+ return
self.get_unique_characters(settings=settings)
for i in range(len(self.languages)):
for char in self.language_unique_chars[i]:
@@ -34,7 +39,7 @@ class FullTextLanguageDetector(BaseLanguageDetector):
return
indices_to_pop = []
for i in range(len(self.languages)):
- if len(set(date_string) & self.language_chars[i]) == 0:
+ if len(date_string_set & self.language_chars[i]) == 0:
indices_to_pop.append(i)
self.languages = [i for j, i in enumerate(self.languages) if j not in indices_to_pop]
diff --git a/tests/test_search.py b/tests/test_search.py
index <HASH>..<HASH> 100644
--- a/tests/test_search.py
+++ b/tests/test_search.py
@@ -3,15 +3,34 @@ from __future__ import unicode_literals
from nose_parameterized import parameterized, param
from tests import BaseTestCase
from dateparser.search.search import DateSearchWithDetection
+from dateparser.search import search_dates
from dateparser.conf import Settings, apply_settings
import datetime
+
class TestTranslateSearch(BaseTestCase):
def setUp(self):
super(TestTranslateSearch, self).setUp()
self.search_with_detection = DateSearchWithDetection()
self.exact_language_search = self.search_with_detection.search
+ @staticmethod
+ def make_python3_msg(text):
+ text = text.replace('unicode', 'str')
+ text = text.replace('u\'', '\'')
+ text = text.replace('type', 'class')
+ return text
+
+ def run_search_dates_function_invalid_languages(self, text, languages, error_type):
+ try:
+ search_dates(text=text, languages=languages)
+ except Exception as error:
+ self.error = error
+ self.assertIsInstance(self.error, error_type)
+
+ def check_error_message(self, message):
+ self.assertEqual(self.make_python3_msg(str(self.error)), message)
+
@parameterized.expand([
param('en', "Sep 03 2014"),
param('en', "friday, 03 september 2014"),
@@ -581,6 +600,9 @@ class TestTranslateSearch(BaseTestCase):
# Vietnamese
param('vi', 'Ý theo gương Đức, đã tiến hành xâm lược Ethiopia năm 1935 và sát '
'nhập Albania vào ngày 12 tháng 4 năm 1939.'),
+
+ # only digits
+ param('en', '2007'),
])
def test_detection(self, shortname, text):
result = self.search_with_detection.detect_language(text, languages=None)
@@ -590,13 +612,29 @@ class TestTranslateSearch(BaseTestCase):
# Russian
param(text='19 марта 2001 был хороший день. 20 марта тоже был хороший день. 21 марта был отличный день.',
settings=None,
- expected={'Language': 'ru', 'Dates': [('19 марта 2001', datetime.datetime(2001, 3, 19, 0, 0)),
- ('20 марта', datetime.datetime(2001, 3, 20, 0, 0)),
- ('21 марта', datetime.datetime(2001, 3, 21, 0, 0))]}),
+ expected=[('19 марта 2001', datetime.datetime(2001, 3, 19, 0, 0)),
+ ('20 марта', datetime.datetime(2001, 3, 20, 0, 0)),
+ ('21 марта', datetime.datetime(2001, 3, 21, 0, 0))]),
param(text='Em outubro de 1936, Alemanha e Itália formaram o Eixo Roma-Berlim.',
settings={'RELATIVE_BASE': datetime.datetime(2000, 1, 1)},
- expected={'Language': 'pt', 'Dates': [('Em outubro de 1936', datetime.datetime(1936, 10, 1, 0, 0))]}),
+ expected=[('Em outubro de 1936', datetime.datetime(1936, 10, 1, 0, 0))]),
])
- def test_all_together(self, text, settings, expected):
- result = self.search_with_detection.search_dates(text, settings=settings)
+ def test_date_search_function(self, text, settings, expected):
+ result = search_dates(text, settings=settings)
self.assertEqual(result, expected)
+
+ @parameterized.expand([
+ param(text='19 марта 2001',
+ languages='wrong type: str instead of list'),
+ ])
+ def test_date_search_function_invalid_languages_type(self, text, languages):
+ self.run_search_dates_function_invalid_languages(text=text, languages=languages, error_type=TypeError)
+ self.check_error_message("languages argument must be a list (<class \'str\'> given)")
+
+ @parameterized.expand([
+ param(text='19 марта 2001',
+ languages=['unknown language code']),
+ ])
+ def test_date_search_function_invalid_language_code(self, text, languages):
+ self.run_search_dates_function_invalid_languages(text=text, languages=languages, error_type=ValueError)
+ self.check_error_message("Unknown language(s): 'unknown language code'") | modifed detection, added some more tests | scrapinghub_dateparser | train |
10fe675583a0f33866357e79ddaf83025bb1f566 | diff --git a/lib/rdkafka/bindings.rb b/lib/rdkafka/bindings.rb
index <HASH>..<HASH> 100644
--- a/lib/rdkafka/bindings.rb
+++ b/lib/rdkafka/bindings.rb
@@ -164,6 +164,7 @@ module Rdkafka
attach_function :rd_kafka_offset_store, [:pointer, :int32, :int64], :int
attach_function :rd_kafka_pause_partitions, [:pointer, :pointer], :int
attach_function :rd_kafka_resume_partitions, [:pointer, :pointer], :int
+ attach_function :rd_kafka_seek, [:pointer, :int32, :int64, :int], :int
# Headers
attach_function :rd_kafka_header_get_all, [:pointer, :size_t, :pointer, :pointer, SizePtr], :int
diff --git a/lib/rdkafka/consumer.rb b/lib/rdkafka/consumer.rb
index <HASH>..<HASH> 100644
--- a/lib/rdkafka/consumer.rb
+++ b/lib/rdkafka/consumer.rb
@@ -282,6 +282,37 @@ module Rdkafka
end
end
+ # Seek to a particular message. The next poll on the topic/partition will return the
+ # message at the given offset.
+ #
+ # @param message [Rdkafka::Consumer::Message] The message to which to seek
+ #
+ # @raise [RdkafkaError] When seeking fails
+ #
+ # @return [nil]
+ def seek(message)
+ # rd_kafka_offset_store is one of the few calls that does not support
+ # a string as the topic, so create a native topic for it.
+ native_topic = Rdkafka::Bindings.rd_kafka_topic_new(
+ @native_kafka,
+ message.topic,
+ nil
+ )
+ response = Rdkafka::Bindings.rd_kafka_seek(
+ native_topic,
+ message.partition,
+ message.offset,
+ 0 # timeout
+ )
+ if response != 0
+ raise Rdkafka::RdkafkaError.new(response)
+ end
+ ensure
+ if native_topic && !native_topic.null?
+ Rdkafka::Bindings.rd_kafka_topic_destroy(native_topic)
+ end
+ end
+
# Commit the current offsets of this consumer
#
# @param list [TopicPartitionList,nil] The topic with partitions to commit
diff --git a/spec/rdkafka/consumer_spec.rb b/spec/rdkafka/consumer_spec.rb
index <HASH>..<HASH> 100644
--- a/spec/rdkafka/consumer_spec.rb
+++ b/spec/rdkafka/consumer_spec.rb
@@ -1,4 +1,5 @@
require "spec_helper"
+require "ostruct"
describe Rdkafka::Consumer do
let(:config) { rdkafka_config }
@@ -75,7 +76,7 @@ describe Rdkafka::Consumer do
tpl.add_topic("consume_test_topic", (0..2))
consumer.pause(tpl)
- # 6. unsure that messages are not available
+ # 6. ensure that messages are not available
records = consumer.poll(timeout)
expect(records).to be_nil
@@ -124,6 +125,92 @@ describe Rdkafka::Consumer do
end
end
+ describe "#seek" do
+ it "should raise an error when seeking fails" do
+ fake_msg = OpenStruct.new(topic: "consume_test_topic", partition: 0, offset: 0)
+
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_seek).and_return(20)
+ expect {
+ consumer.seek(fake_msg)
+ }.to raise_error Rdkafka::RdkafkaError
+ end
+
+ context "subscription" do
+ let(:timeout) { 1000 }
+
+ before do
+ consumer.subscribe("consume_test_topic")
+
+ # 1. partitions are assigned
+ wait_for_assignment(consumer)
+ expect(consumer.assignment).not_to be_empty
+
+ # 2. eat unrelated messages
+ while(consumer.poll(timeout)) do; end
+ end
+ after { consumer.unsubscribe }
+
+ def send_one_message(val)
+ producer.produce(
+ topic: "consume_test_topic",
+ payload: "payload #{val}",
+ key: "key 1",
+ partition: 0
+ ).wait
+ end
+
+ it "works when a partition is paused" do
+ # 3. get reference message
+ send_one_message(:a)
+ message1 = consumer.poll(timeout)
+ expect(message1&.payload).to eq "payload a"
+
+ # 4. pause the subscription
+ tpl = Rdkafka::Consumer::TopicPartitionList.new
+ tpl.add_topic("consume_test_topic", 1)
+ consumer.pause(tpl)
+
+ # 5. seek to previous message
+ consumer.seek(message1)
+
+ # 6. resume the subscription
+ tpl = Rdkafka::Consumer::TopicPartitionList.new
+ tpl.add_topic("consume_test_topic", 1)
+ consumer.resume(tpl)
+
+ # 7. ensure same message is read again
+ message2 = consumer.poll(timeout)
+ consumer.commit
+ expect(message1.offset).to eq message2.offset
+ expect(message1.payload).to eq message2.payload
+ end
+
+ it "allows skipping messages" do
+ # 3. send messages
+ send_one_message(:a)
+ send_one_message(:b)
+ send_one_message(:c)
+
+ # 4. get reference message
+ message = consumer.poll(timeout)
+ expect(message&.payload).to eq "payload a"
+
+ # 5. seek over one message
+ fake_msg = message.dup
+ fake_msg.instance_variable_set(:@offset, fake_msg.offset + 2)
+ consumer.seek(fake_msg)
+
+ # 6. ensure that only one message is available
+ records = consumer.poll(timeout)
+ expect(records&.payload).to eq "payload c"
+ records = consumer.poll(timeout)
+ expect(records).to be_nil
+
+ consumer.commit
+ end
+ end
+ end
+
describe "#assign and #assignment" do
it "should return an empty assignment if nothing is assigned" do
expect(consumer.assignment).to be_empty | implement seek (#<I>)
Implement seek for consumer | appsignal_rdkafka-ruby | train |
d9d12638d2fb6e6c39f56011ffdcb213fafbee8d | diff --git a/bin/templates/scripts/cordova/lib/prepare.js b/bin/templates/scripts/cordova/lib/prepare.js
index <HASH>..<HASH> 100644
--- a/bin/templates/scripts/cordova/lib/prepare.js
+++ b/bin/templates/scripts/cordova/lib/prepare.js
@@ -47,8 +47,8 @@ module.exports.prepare = function (cordovaProject) {
return updateProject(self._config, self.locations);
})
.then(function () {
- handleIcons(cordovaProject.projectConfig, self.root);
- handleSplashScreens(cordovaProject.projectConfig, self.root);
+ handleIcons(cordovaProject.projectConfig, self.locations.xcodeCordovaProj);
+ handleSplashScreens(cordovaProject.projectConfig, self.locations.xcodeCordovaProj);
})
.then(function () {
self.events.emit('verbose', 'updated project successfully');
@@ -260,7 +260,7 @@ function handleBuildSettings(platformConfig, locations) {
function handleIcons(projectConfig, platformRoot) {
var icons = projectConfig.getIcons('ios');
- var appRoot = projectConfig.path;
+ var appRoot = path.dirname(projectConfig.path);
// See https://developer.apple.com/library/ios/documentation/userexperience/conceptual/mobilehig/LaunchImages.html
// for launch images sizes reference.
@@ -274,6 +274,7 @@ function handleIcons(projectConfig, platformRoot) {
{dest: '[email protected]', width: 58, height: 58},
{dest: 'icon-40.png', width: 40, height: 40},
{dest: '[email protected]', width: 80, height: 80},
+ {dest: '[email protected]', width: 87, height: 87},
{dest: 'icon.png', width: 57, height: 57},
{dest: '[email protected]', width: 114, height: 114},
{dest: 'icon-72.png', width: 72, height: 72},
@@ -304,7 +305,7 @@ function handleIcons(projectConfig, platformRoot) {
function handleSplashScreens(projectConfig, platformRoot) {
- var appRoot = projectConfig.path;
+ var appRoot = path.dirname(projectConfig.path);
var splashScreens = projectConfig.getSplashScreens('ios');
var platformSplashScreens = [ | CB-<I> correct the paths for iOS icon and splashscreen resources | apache_cordova-ios | train |
0c971013a662a700f1ad5f633628c34ad6892bc7 | diff --git a/djangosaml2/views.py b/djangosaml2/views.py
index <HASH>..<HASH> 100644
--- a/djangosaml2/views.py
+++ b/djangosaml2/views.py
@@ -24,6 +24,7 @@ from django.conf import settings
from django.contrib import auth
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import logout as django_logout
+from django.core.exceptions import PermissionDenied
from django.http import Http404, HttpResponse
from django.http import HttpResponseRedirect # 30x
from django.http import HttpResponseBadRequest, HttpResponseForbidden # 40x
@@ -240,7 +241,7 @@ def assertion_consumer_service(request,
create_unknown_user=create_unknown_user)
if user is None:
logger.error('The user is None')
- return HttpResponseForbidden("Permission denied")
+ raise PermissionDenied
auth.login(request, user)
_set_subject_id(request.session, session_info['name_id']) | Have assertion_consumer service raise PermissionDenied exception
instead of calling PermissionDenied. This enables custom views
using handler<I>. | knaperek_djangosaml2 | train |
11904551f748a0404c8ccc26f096952901069d9b | diff --git a/.ropeproject/globalnames b/.ropeproject/globalnames
index <HASH>..<HASH> 100644
--- a/.ropeproject/globalnames
+++ b/.ropeproject/globalnames
@@ -1,2 +1,2 @@
�}q(Utest]qUmainqaUpython]Uobjects]q(UAnimeqUMangaqeUhelpers]Uspice]q(UMANGA_SCRAPE_BASEqUsearchq Uthisq
-UMANGAqU search_idqU init_authq
UANIME_QUERY_BASEqUANIME_SCRAPE_BASEqUMANGA_QUERY_BASEqUANIMEqeu.
\ No newline at end of file
+UMANGAqU search_idqU init_authq
UANIME_QUERY_BASEqUaddqUANIME_SCRAPE_BASEqUMANGA_QUERY_BASEqUANIMEqeu.
\ No newline at end of file
diff --git a/objects.py b/objects.py
index <HASH>..<HASH> 100644
--- a/objects.py
+++ b/objects.py
@@ -1,6 +1,10 @@
"""
An object that encapsulates an Anime.
+Uses properties so that it doesn't parse through XML
+unnecessarily, and instead, does so only when immediately
+necessary.
+
Has properties:
id - The id of the anime.
title - The title of the anime.
@@ -91,6 +95,26 @@ class Anime:
self._image_url = self.raw_data.image.text
return self._image_url
+"""
+An object that encapsulates an Manga.
+
+Uses properties so that it doesn't parse through XML
+unnecessarily, and instead, does so only when immediately
+necessary.
+
+Has properties:
+ id - The id of the manga.
+ title - The title of the manga.
+ english - The english name of the manga, if applicable.
+ chapter - The number of chapters in this manga.
+ volume - The number of volumes in this manga.
+ score - The rating of the manga.
+ manga_type - The type of manga (e.g. movie, ONA, etc)
+ status - In what state the manga is in (e.g. airing, finished, etc)
+ dates - A tuple of start and end dates of airing of the manga.
+ synopsis - The synopsis text of the manga.
+ image_url - A url to the manga's cover image.
+"""
class Manga:
def __init__(self, manga_data):
self.raw_data = manga_data
@@ -137,7 +161,7 @@ class Manga:
return self._score
@property
- def anime_type(self):
+ def manga_type(self):
if self._type is None:
self._type = self.raw_data.type.text
return self._type
diff --git a/spice.py b/spice.py
index <HASH>..<HASH> 100644
--- a/spice.py
+++ b/spice.py
@@ -48,6 +48,8 @@ def search_id(id, medium):
query = results.find('span', {'itemprop':'name'})
return search(query.text, medium)
+def add(series):
+ return
if __name__ == '__main__':
print("Spice is meant to be imported into a project.") | Add docs to manga object. | Utagai_spice | train |
1bf53a98989e53375b58655c051772222069fcb8 | diff --git a/fulfil_client/client.py b/fulfil_client/client.py
index <HASH>..<HASH> 100755
--- a/fulfil_client/client.py
+++ b/fulfil_client/client.py
@@ -52,6 +52,9 @@ class Client(object):
def model(self, name):
return Model(self, name)
+ def record(self, model_name, id):
+ return Record(self.model(model_name), id)
+
class Record(object):
def __init__(self, model, id): | Introduce a record fetcher on client | fulfilio_fulfil-python-api | train |
3df79fef0bbf67200301e52ab55d4ba181eb02f8 | diff --git a/app/models/camaleon_cms/custom_field_group.rb b/app/models/camaleon_cms/custom_field_group.rb
index <HASH>..<HASH> 100644
--- a/app/models/camaleon_cms/custom_field_group.rb
+++ b/app/models/camaleon_cms/custom_field_group.rb
@@ -52,7 +52,7 @@ class CamaleonCms::CustomFieldGroup < CamaleonCms::CustomField
# only used by form on admin panel (protected)
# return array of failed_fields and full_fields [[failed fields], [all fields]]
def add_fields(items, item_options)
- self.fields.where.not(id: items.map { |_k, obj| obj['id'] }.uniq).destroy_all
+ self.fields.where.not(id: items.to_h.map { |_k, obj| obj['id'] }.uniq).destroy_all
cache_fields = []
order_index = 0
errors_saved = [] | casting to hash to avoid error in rails 5 | owen2345_camaleon-cms | train |
9598a25baccec2a4c919ca05862634f5bd95cc1a | diff --git a/lib/cloud_sesame/query/dsl/k_gram_phrase_methods.rb b/lib/cloud_sesame/query/dsl/k_gram_phrase_methods.rb
index <HASH>..<HASH> 100644
--- a/lib/cloud_sesame/query/dsl/k_gram_phrase_methods.rb
+++ b/lib/cloud_sesame/query/dsl/k_gram_phrase_methods.rb
@@ -10,7 +10,7 @@ module CloudSesame
words = value.split(DELIMITER).compact
or!(options) do
- literal field, phrase(value, boost: MULTIPLIER * words.size)
+ literal field, phrase(value, boost: MULTIPLIER * words.size + MULTIPLIER)
each_k_gram_combination(words, options[:min]) do |combination, original|
remaining_terms = (original - combination).join(' ')
diff --git a/spec/cloud_sesame/query/dsl/k_gram_phrase_methods_spec.rb b/spec/cloud_sesame/query/dsl/k_gram_phrase_methods_spec.rb
index <HASH>..<HASH> 100644
--- a/spec/cloud_sesame/query/dsl/k_gram_phrase_methods_spec.rb
+++ b/spec/cloud_sesame/query/dsl/k_gram_phrase_methods_spec.rb
@@ -30,7 +30,7 @@ module CloudSesame
phrase_node = or_node.children.first
expect(phrase_node).to be_kind_of AST::Phrase
expect(phrase_node.child.value).to eq(phrase)
- expect(phrase_node.options[:boost]).to eq(KGramPhraseMethods::MULTIPLIER * 3)
+ expect(phrase_node.options[:boost]).to eq(KGramPhraseMethods::MULTIPLIER * 3 + KGramPhraseMethods::MULTIPLIER)
end
it 'builds a term node with original term' do | [al] Increase boost on the exact phrase to account for phrase boost and term boost being the same with one word | 47colborne_cloud-sesame | train |
29d115cfd6a37e1f656784c0262f3b11c5a85f8e | diff --git a/ui/app/build-configs/detail/dependencies-tab/pncBuildConfigDependenciesTab.js b/ui/app/build-configs/detail/dependencies-tab/pncBuildConfigDependenciesTab.js
index <HASH>..<HASH> 100644
--- a/ui/app/build-configs/detail/dependencies-tab/pncBuildConfigDependenciesTab.js
+++ b/ui/app/build-configs/detail/dependencies-tab/pncBuildConfigDependenciesTab.js
@@ -45,10 +45,14 @@
function onRemove(dependency) {
console.log('Remove dependency: %O', dependency);
+ return $ctrl.buildConfig.$removeDependency({ dependencyId: dependency.id });
}
function onEdit(dependencies) {
console.log('Update dependencies: %O', dependencies);
+ $ctrl.buildConfig.dependencies = dependencies.reduce((map, dep) => (map[dep.id] = dep, map), {});
+
+ return $ctrl.buildConfig.$update();
}
}
diff --git a/ui/app/build-configs/directives/pnc-build-configs-data-table/pncBuildConfigsDataTable.js b/ui/app/build-configs/directives/pnc-build-configs-data-table/pncBuildConfigsDataTable.js
index <HASH>..<HASH> 100644
--- a/ui/app/build-configs/directives/pnc-build-configs-data-table/pncBuildConfigsDataTable.js
+++ b/ui/app/build-configs/directives/pnc-build-configs-data-table/pncBuildConfigsDataTable.js
@@ -27,11 +27,11 @@
onEdit: '&'
},
templateUrl: 'build-configs/directives/pnc-build-configs-data-table/pnc-build-configs-data-table.html',
- controller: ['$scope', '$q', 'modalSelectService', 'filteringPaginator', 'SortHelper', Controller]
+ controller: ['$scope', '$q', 'modalSelectService', 'filteringPaginator', 'paginator', 'SortHelper', Controller]
});
- function Controller($scope, $q, modalSelectService, filteringPaginator, SortHelper) {
+ function Controller($scope, $q, modalSelectService, filteringPaginator, paginator, SortHelper) {
var $ctrl = this;
const DEFAULT_FIELDS = ['name', 'project', 'buildStatus'];
@@ -53,6 +53,7 @@
$ctrl.displayFields = $ctrl.displayFields || DEFAULT_FIELDS;
$ctrl.filterPage = filteringPaginator($ctrl.page);
+ $ctrl.pageCopy = paginator($ctrl.page);
$ctrl.filterFields = [
{
@@ -89,16 +90,17 @@
function remove(buildConfig) {
$q.when($ctrl.onRemove()(buildConfig)).then(() => {
$ctrl.filterPage.refresh();
+ $ctrl.pageCopy.refresh();
});
}
function edit() {
$q.when()
.then(function () {
- if ($ctrl.page.total === 1) {
- return $ctrl.page.data;
+ if ($ctrl.pageCopy.total === 1) {
+ return $ctrl.pageCopy.data;
} else {
- return $ctrl.page.getWithNewSize($ctrl.page.total * $ctrl.page.count).then(function (resp) { return resp.data; });
+ return $ctrl.pageCopy.getWithNewSize($ctrl.pageCopy.total * $ctrl.pageCopy.size).then(function (resp) { return resp.data; });
}
})
.then(function (buildConfigs) {
@@ -109,7 +111,8 @@
})
.then(function (editedBuildConfigs) {
$q.when($ctrl.onEdit()(editedBuildConfigs)).then(function () {
- $ctrl.page.refresh();
+ $ctrl.filterPage.refresh();
+ $ctrl.pageCopy.refresh();
});
});
}
diff --git a/ui/app/common/pnc-client/resources/BuildConfigResource.js b/ui/app/common/pnc-client/resources/BuildConfigResource.js
index <HASH>..<HASH> 100644
--- a/ui/app/common/pnc-client/resources/BuildConfigResource.js
+++ b/ui/app/common/pnc-client/resources/BuildConfigResource.js
@@ -84,6 +84,10 @@
url: ENDPOINT + '/dependencies',
isPaged: true
},
+ removeDependency: {
+ method: 'DELETE',
+ url: ENDPOINT + '/dependencies/:dependencyId'
+ },
getGroupConfigs: {
method: 'GET',
url: ENDPOINT + '/group-configs', | [NCL-<I>] Editing build configuration dependencies works | project-ncl_pnc | train |
5439bd72342405a171080ac3cfa7a8b4487afe8d | diff --git a/repository/spec/integration/typed_structs_spec.rb b/repository/spec/integration/typed_structs_spec.rb
index <HASH>..<HASH> 100644
--- a/repository/spec/integration/typed_structs_spec.rb
+++ b/repository/spec/integration/typed_structs_spec.rb
@@ -37,7 +37,7 @@ RSpec.describe 'ROM repository with typed structs' do
schema(:books, infer: true) do
attribute :title,
ROM::Types::Coercible::String.meta(
- read: ROM::Types::Symbol.constructor(&:to_sym)
+ read: ROM::Types::Symbol.constructor { |s| (s + '!').to_sym }
)
end
end
@@ -53,7 +53,7 @@ RSpec.describe 'ROM repository with typed structs' do
expect(created_book).to be_kind_of(Dry::Struct)
expect(created_book.id).to be_kind_of(Integer)
- expect(created_book.title).to eql(:'Hello World')
+ expect(created_book.title).to eql(:'Hello World!')
expect(created_book.created_at).to be_kind_of(Time)
book = repo.books.to_a.first | Ensure read types are used only once | rom-rb_rom | train |
2512341872db7da8ce89dd19676e68678e1e8caf | diff --git a/mod/quiz/report/statistics/responseanalysis.php b/mod/quiz/report/statistics/responseanalysis.php
index <HASH>..<HASH> 100644
--- a/mod/quiz/report/statistics/responseanalysis.php
+++ b/mod/quiz/report/statistics/responseanalysis.php
@@ -182,6 +182,7 @@ class quiz_statistics_response_analyser {
}
foreach ($rows as $row) {
+ $this->responses[$row->subqid][$row->aid][$row->response] = new stdClass();
$this->responses[$row->subqid][$row->aid][$row->response]->count = $row->rcount;
$this->responses[$row->subqid][$row->aid][$row->response]->fraction = $row->credit;
} | MDL-<I> quiz statistics: fix stict syntax problem. | moodle_moodle | train |
033fc09d3028c46e277d4aad470576f805ff867d | diff --git a/src/components/TimeTravel/TimeTravel.js b/src/components/TimeTravel/TimeTravel.js
index <HASH>..<HASH> 100644
--- a/src/components/TimeTravel/TimeTravel.js
+++ b/src/components/TimeTravel/TimeTravel.js
@@ -291,14 +291,14 @@ class TimeTravel extends React.Component {
}
switchToLiveMode() {
- if (!this.state.showingLive) {
+ if (this.props.hasLiveMode && !this.state.showingLive) {
this.setState({ showingLive: true });
this.props.onChangeLiveMode(true);
}
}
switchToPausedMode() {
- if (this.state.showingLive) {
+ if (this.props.hasLiveMode && this.state.showingLive) {
this.setState({ showingLive: false });
this.props.onChangeLiveMode(false);
}
@@ -436,8 +436,10 @@ TimeTravel.defaultProps = {
earliestTimestamp: '2014-01-01T00:00:00Z',
hasLiveMode: false,
showingLive: true, // only relevant if live mode is enabled
+ onChangeLiveMode: noop,
hasRangeSelector: false,
rangeMs: 3600000, // 1 hour as a default, only relevant if range selector is enabled
+ onChangeRange: noop,
onTimestampInputEdit: noop,
onTimelinePanButtonClick: noop,
onTimelineLabelClick: noop, | Do not use onChangeLiveMode callback if live mode is not supported. | weaveworks_ui-components | train |
6cb1924468f09d6e4901aff7214ac8f1d76d2f2e | diff --git a/test/profile/valgrind.rb b/test/profile/valgrind.rb
index <HASH>..<HASH> 100644
--- a/test/profile/valgrind.rb
+++ b/test/profile/valgrind.rb
@@ -59,8 +59,8 @@ class Worker
when "get-increasing"
@i.times do |i|
key = "#{@key1}_#{'x'*i}"[0..256]
- @cache.set key, 'Val'*i
- @cache.get key
+ @cache.set key, 'x'*i, 0, false
+ @cache.get key, false
end
when "get-miss-increasing"
@i.times do |i| | Raw mode so we can predict the size. | arthurnn_memcached | train |
11c564594740f23559de234e6c56fc04984e9f4c | diff --git a/salt/states/cmd.py b/salt/states/cmd.py
index <HASH>..<HASH> 100644
--- a/salt/states/cmd.py
+++ b/salt/states/cmd.py
@@ -672,6 +672,14 @@ def run(name,
### of unsupported arguments in a cmd.run state will result in a
### traceback.
+ test_name = None
+ if not isinstance(stateful, list):
+ stateful = stateful is True
+ elif isinstance(stateful, list) and 'test_name' in stateful[0]:
+ test_name = stateful[0]['test_name']
+ if __opts__['test'] and test_name:
+ name = test_name
+
ret = {'name': name,
'changes': {},
'result': False,
@@ -709,23 +717,28 @@ def run(name,
ret.update(cret)
return ret
- # Wow, we passed the test, run this sucker!
- if not __opts__['test']:
- try:
- cmd_all = __salt__['cmd.run_all'](
- name, timeout=timeout, python_shell=True, **cmd_kwargs
- )
- except CommandExecutionError as err:
- ret['comment'] = str(err)
- return ret
-
- ret['changes'] = cmd_all
- ret['result'] = not bool(cmd_all['retcode'])
- ret['comment'] = 'Command "{0}" run'.format(name)
+ if __opts__['test'] and not test_name:
+ ret['result'] = None
+ ret['comment'] = 'Command "{0}" would have been executed'.format(name)
return _reinterpreted_state(ret) if stateful else ret
- ret['result'] = None
- ret['comment'] = 'Command "{0}" would have been executed'.format(name)
- return _reinterpreted_state(ret) if stateful else ret
+
+ # Wow, we passed the test, run this sucker!
+ try:
+ cmd_all = __salt__['cmd.run_all'](
+ name, timeout=timeout, python_shell=True, **cmd_kwargs
+ )
+ except CommandExecutionError as err:
+ ret['comment'] = str(err)
+ return ret
+
+ ret['changes'] = cmd_all
+ ret['result'] = not bool(cmd_all['retcode'])
+ ret['comment'] = 'Command "{0}" run'.format(name)
+ if stateful:
+ ret = _reinterpreted_state(ret)
+ if __opts__['test'] and cmd_all['retcode'] == 0 and ret['changes']:
+ ret['result'] = None
+ return ret
finally:
if HAS_GRP: | Support test mode for the cmd states when stateful=True (WIP) | saltstack_salt | train |
3220c796f6c565440e7f0250d60fc0c9a2b51b6c | diff --git a/lib/softlayer/Account.rb b/lib/softlayer/Account.rb
index <HASH>..<HASH> 100644
--- a/lib/softlayer/Account.rb
+++ b/lib/softlayer/Account.rb
@@ -4,6 +4,10 @@ module SoftLayer
class Account < SoftLayer::ModelBase
include ::SoftLayer::ModelResource
+ ##
+ # The bare metal (or Hardware) servers associated with the
+ # account. Unless you force these to update, they will be refreshed every
+ # five minutes.
softlayer_resource :bare_metal_servers do |bare_metal|
bare_metal.should_update_if do
@last_bare_metal_update ||= Time.at(0)
@@ -16,6 +20,10 @@ module SoftLayer
end
end
+ ##
+ # The virtual servers (aka. CCIs or Virtual_Guests) associated with the
+ # account. Unless you force these to update, they will be refreshed every
+ # five minutes.
softlayer_resource :virtual_servers do |virtual_servers|
virtual_servers.should_update_if do
@last_virtual_server_update ||= Time.at(0)
@@ -30,7 +38,7 @@ module SoftLayer
##
# The tickets resource consists of all open tickets, and tickets closed
- # "recently"
+ # "recently". These refresh every 5 minutes
softlayer_resource :tickets do |tickets|
tickets.should_update_if do
@last_ticket_update ||= Time.at(0)
diff --git a/lib/softlayer/BareMetalServer.rb b/lib/softlayer/BareMetalServer.rb
index <HASH>..<HASH> 100644
--- a/lib/softlayer/BareMetalServer.rb
+++ b/lib/softlayer/BareMetalServer.rb
@@ -16,7 +16,7 @@ module SoftLayer
'networkManagementIpAddress',
'networkComponents[id, status, speed, maxSpeed, name, ipmiMacAddress, ipmiIpAddress, macAddress, primaryIpAddress, port, primarySubnet]',
'activeTransaction[id, transactionStatus[friendlyName,name]]',
- 'hardwareChassis[id,name]']
+ 'hardwareChassis[id, name]']
super + [sub_mask]
end
diff --git a/lib/softlayer/ModelResource.rb b/lib/softlayer/ModelResource.rb
index <HASH>..<HASH> 100644
--- a/lib/softlayer/ModelResource.rb
+++ b/lib/softlayer/ModelResource.rb
@@ -46,13 +46,31 @@ module SoftLayer
@resource_definitions ||= {};
@resource_definitions[resource_name] = resource_definition;
- # define an instance method of the class this is being
- # called on which will get the value of the resource.
- getter_name = resource_name.to_sym
- value_instance_variable = "@#{resource_name}".to_sym
+
+ # define a method called "update_<resource_name>!" which calls the update block
+ # stored in the resource definition
update_symbol = "update_#{resource_name}!".to_sym
+ define_method(update_symbol, &resource_definition.update_block)
+
+ # define a method called "should_update_<resource_name>?" which calls the
+ # should update block stored in the resource definition
should_update_symbol = "should_update_#{resource_name}?".to_sym
+ define_method(should_update_symbol, &resource_definition.should_update_block)
+ # define an instance method of the class this is being
+ # called on which will get the value of the resource.
+ #
+ # The getter will take one argument "force_update" which
+ # is treated as boolean value. If true, then the getter will
+ # force the resource to update (by using it's "to_update") block.
+ #
+ # If the force variable is false, or not given, then the
+ # getter will call the "should update" block to find out if the
+ # resource needs to be updated.
+ #
+ getter_name = resource_name.to_sym
+ value_instance_variable = "@#{resource_name}".to_sym
+
define_method(getter_name) do |*args|
force_update = args[0] || false
@@ -63,10 +81,6 @@ module SoftLayer
instance_variable_get(value_instance_variable)
end
- # define a method called "update_<resource_name>!" which calls the update block
- # stored in the resource definition
- define_method(update_symbol, &resource_definition.update_block)
- define_method(should_update_symbol, &resource_definition.should_update_block)
end
def softlayer_resource_definition(resource_name) | Improved the documentation for the softlayer_resource metaprogramming system | softlayer_softlayer-ruby | train |
c76ac17cbfcfb07d2f1e4ea26907f7ab509d0039 | diff --git a/lib/graphite/graphiteCollector.js b/lib/graphite/graphiteCollector.js
index <HASH>..<HASH> 100644
--- a/lib/graphite/graphiteCollector.js
+++ b/lib/graphite/graphiteCollector.js
@@ -27,7 +27,7 @@ GraphiteCollector.prototype.collect = function(aggregates, pages, domains) {
if (this.config.graphiteData.indexOf('summary') > -1 || this.config.graphiteData.indexOf(
'all') > -1) {
- statistics += this._getSummaryStats(aggregates, config.urlObject.hostname);
+ statistics += this._getSummaryStats(aggregates, config.urlObject.hostname, pages.length);
}
statistics += self._getDomainStats(domains, config.urlObject.hostname);
@@ -50,9 +50,6 @@ GraphiteCollector.prototype._getPageStats = function(page) {
statistics += this._getWPTStats(page, urlKey);
statistics += this._getAssetsStats(page, urlKey);
- // and add the number of runs, lets use to divide
- statistics += this.namespace + '.' + urlKey + 'config.runs ' + this.config.no + this.timeStamp;
-
return statistics;
};
@@ -273,7 +270,7 @@ GraphiteCollector.prototype._getDomainStats = function(domains, hostname) {
};
-GraphiteCollector.prototype._getSummaryStats = function(aggregates, hostname) {
+GraphiteCollector.prototype._getSummaryStats = function(aggregates, hostname, noOfPages) {
var statistics = '';
var self = this;
var values = ['min', 'p10', 'median', 'mean', 'p90', 'p99', 'max'];
@@ -291,6 +288,13 @@ GraphiteCollector.prototype._getSummaryStats = function(aggregates, hostname) {
}
});
});
+
+ // and add the number of runs
+ statistics += self.namespace + '.summary.' + hostname + '.runsperbrowser ' + this.config.no + this.timeStamp;
+
+ // and number of tested pages per
+ statistics += this.namespace + '.summary.' + hostname + '.testedpages ' + noOfPages + this.timeStamp;
+
return statistics;
}; | better naming of runs and number of tested pages #<I> | sitespeedio_sitespeed.io | train |
a700e37cf56887959bee2d137c7817a1dc1970d6 | diff --git a/cp-main.go b/cp-main.go
index <HASH>..<HASH> 100644
--- a/cp-main.go
+++ b/cp-main.go
@@ -199,14 +199,14 @@ func doCopyCmdSession(session *sessionV2) {
isCopied := isCopiedFactory(session.Header.LastCopied)
wg := new(sync.WaitGroup)
- // Limit numner of cast routines based on available CPU resources.
+ // Limit number of copy routines based on available CPU resources.
cpQueue := make(chan bool, int(math.Max(float64(runtime.NumCPU())-1, 1)))
defer close(cpQueue)
- // Status channel for receiveing cast return status.
+ // Status channel for receiveing copy return status.
statusCh := make(chan copyURLs)
- // Go routine to monitor doCast status and signal traps.
+ // Go routine to monitor doCopy status and signal traps.
wg.Add(1)
go func() {
defer wg.Done()
@@ -231,7 +231,7 @@ func doCopyCmdSession(session *sessionV2) {
}
}()
- // Go routine to perform concurrently casting.
+ // Go routine to perform concurrently copying.
wg.Add(1)
go func() {
defer wg.Done()
@@ -244,13 +244,13 @@ func doCopyCmdSession(session *sessionV2) {
if isCopied(cpURLs.SourceContent.Name) {
doCopyFake(cpURLs, &bar)
} else {
- // Wait for other cast routines to
+ // Wait for other copy routines to
// complete. We only have limited CPU
// and network resources.
cpQueue <- true
- // Account for each cast routines we start.
+ // Account for each copy routines we start.
copyWg.Add(1)
- // Do casting in background concurrently.
+ // Do copying in background concurrently.
go doCopy(cpURLs, &bar, cpQueue, copyWg, statusCh)
}
}
diff --git a/docs/mirror.md b/docs/mirror.md
index <HASH>..<HASH> 100644
--- a/docs/mirror.md
+++ b/docs/mirror.md
@@ -2,25 +2,25 @@
```go
NAME:
- mc mirror - Copy files and folders from a single source to many destinations
+ mc mirror - Mirror files and folders from a single source to many destinations
USAGE:
mc mirror SOURCE TARGET [TARGET...]
EXAMPLES:
- 1. Cast an object from local filesystem to Amazon S3 cloud storage.
+ 1. Mirror an object from local filesystem to Amazon S3 cloud storage.
$ mc mirror star-trek-episode-10-season4.ogg https://s3.amazonaws.com/trekarchive
- 2. Cast a bucket recursively from Minio cloud storage to multiple buckets on Amazon S3 cloud storage.
+ 2. Mirror a bucket recursively from Minio cloud storage to multiple buckets on Amazon S3 cloud storage.
$ mc mirror https://play.minio.io:9000/photos/2014... https://s3.amazonaws.com/backup-photos https://s3.amazonaws.com/my-photos
- 3. Cast a local folder recursively to Minio cloud storage and Amazon S3 cloud storage.
+ 3. Mirror a local folder recursively to Minio cloud storage and Amazon S3 cloud storage.
$ mc mirror backup/... https://play.minio.io:9000/archive https://s3.amazonaws.com/archive
- 4. Cast a bucket from aliased Amazon S3 cloud storage to multiple folders on Windows.
+ 4. Mirror a bucket from aliased Amazon S3 cloud storage to multiple folders on Windows.
$ mc mirror s3:documents/2014/... C:\backup\2014 C:\shared\volume\backup\2014
- 5. Cast a local file of non english character to Amazon s3 cloud storage.
+ 5. Mirror a local file of non english character to Amazon s3 cloud storage.
$ mc mirror 本語/... s3:mylocaldocuments C:\backup\2014 play:backup
```
diff --git a/mirror-main.go b/mirror-main.go
index <HASH>..<HASH> 100644
--- a/mirror-main.go
+++ b/mirror-main.go
@@ -34,7 +34,7 @@ import (
// Help message.
var mirrorCmd = cli.Command{
Name: "mirror",
- Usage: "Copy files and folders from a single source to many destinations",
+ Usage: "Mirror files and folders from a single source to many destinations",
Action: runMirrorCmd,
CustomHelpTemplate: `NAME:
mc {{.Name}} - {{.Usage}} | Change more cast to 'mirror' and fix some typos | minio_mc | train |
167921aba483d66d43257361fcdac48c68dc5e21 | diff --git a/src/pyrocore/torrent/queue.py b/src/pyrocore/torrent/queue.py
index <HASH>..<HASH> 100644
--- a/src/pyrocore/torrent/queue.py
+++ b/src/pyrocore/torrent/queue.py
@@ -58,15 +58,22 @@ class QueueManager(object):
# Start eligible items
for idx, item in enumerate(startable):
+ # Check if we reached 'start_now' in this run
if idx >= start_now:
self.LOG.debug("Only starting %d item(s) in this run, %d more could be downloading" % (
start_now, len(startable)-idx,))
break
-
- if len(downloading) >= self.config.downloading_max:
- self.LOG.debug("Already downloading %d item(s) out of %d max, %d more could be downloading" % (
- len(downloading), downloading_max, len(startable)-idx,))
- break
+
+ # Only check the other conditions when we have `downloading_min` covered
+ if len(downloading) < self.config.downloading_min:
+ self.LOG.debug("Catching up from %d to a minimum of %d downloading item(s)" % (
+ len(downloading), self.config.downloading_min))
+ else:
+ # Limit to the given maximum of downloading items
+ if len(downloading) >= self.config.downloading_max:
+ self.LOG.debug("Already downloading %d item(s) out of %d max, %d more could be downloading" % (
+ len(downloading), self.config.downloading_max, len(startable)-idx,))
+ break
# If we made it here, start it!
downloading.append(item) | take 'downloading_min' into account | pyroscope_pyrocore | train |
ff3273c96917555eb7a574ecea21196323ca7b6b | diff --git a/AnnotationReader.php b/AnnotationReader.php
index <HASH>..<HASH> 100644
--- a/AnnotationReader.php
+++ b/AnnotationReader.php
@@ -58,7 +58,7 @@ final class AnnotationReader implements Reader
'access', 'author', 'copyright', 'deprecated', 'example', 'ignore',
'internal', 'link', 'see', 'since', 'tutorial', 'version', 'package',
'subpackage', 'name', 'global', 'param', 'return', 'staticvar',
- 'static', 'var', 'throws', 'inheritdoc', 'inheritDoc',
+ 'static', 'var', 'throws', 'inheritdoc', 'inheritDoc', 'license', 'todo',
);
/** | added @license and @todo to globally ignored names | doctrine_annotations | train |
450e5d8faed3c513bb59c63f73ac34e939904910 | diff --git a/nipap-www/nipapwww/controllers/pool.py b/nipap-www/nipapwww/controllers/pool.py
index <HASH>..<HASH> 100644
--- a/nipap-www/nipapwww/controllers/pool.py
+++ b/nipap-www/nipapwww/controllers/pool.py
@@ -56,7 +56,7 @@ class PoolController(BaseController):
"""
c.pool = Pool.get(int(id))
- c.prefix_list = Prefix.list({ 'pool': c.pool.id })
+ c.prefix_list = Prefix.list({ 'pool_id': c.pool.id })
# save changes to NIPAP
if request.method == 'POST':
diff --git a/nipap/nipap/nipap.py b/nipap/nipap/nipap.py
index <HASH>..<HASH> 100644
--- a/nipap/nipap/nipap.py
+++ b/nipap/nipap/nipap.py
@@ -661,8 +661,8 @@ class Nipap:
self._logger.debug("add_vrf called; attr: %s" % str(attr))
# sanity check - do we have all attributes?
- req_attr = [ 'vrf', 'name', 'description' ]
- self._check_attr(attr, req_attr, req_attr)
+ req_attr = [ 'vrf', 'name' ]
+ self._check_attr(attr, req_attr, req_attr + [ 'description', ])
insert, params = self._sql_expand_insert(attr)
sql = "INSERT INTO ip_net_vrf " + insert
@@ -785,7 +785,7 @@ class Nipap:
vrf = self.list_vrf(auth, { 'name': spec['vrf_name'] })
else:
# no VRF specified - return the no-VRF VRF
- return { 'id': 0, 'vrf': None, 'vrf_name': None }
+ return { 'id': 0, 'vrf': None, 'name': None }
if len(vrf) > 0:
return vrf[0]
@@ -1901,7 +1901,8 @@ class Nipap:
# write to audit table
audit_params = {
- 'vrf': vrf['vrf'],
+ 'vrf': vrf['id'],
+ 'vrf_vrf': vrf['vrf'],
'vrf_name': vrf['name'],
'prefix': prefix_id,
'prefix_prefix': attr['prefix'],
@@ -2002,18 +2003,17 @@ class Nipap:
self._execute(sql, params)
# write to audit table
- # TODO: add VRF stuff
audit_params = {
'username': auth.username,
'authenticated_as': auth.authenticated_as,
'full_name': auth.full_name,
'authoritative_source': auth.authoritative_source,
- 'vrf': vrf['vrf'],
- 'vrf_name': vrf['name'],
- 'vrf_id': vrf['id']
}
for p in prefixes:
+ audit_params['vrf'] = p['vrf_id']
+ audit_params['vrf_name'] = p['vrf_name'],
+ audit_params['vrf_vrf'] = p['vrf']
audit_params['prefix'] = p['id']
audit_params['prefix_prefix'] = p['prefix']
audit_params['description'] = 'Edited prefix %s attr: %s' % (p['prefix'], str(attr))
@@ -2028,6 +2028,9 @@ class Nipap:
continue
audit_params2 = {
+ 'vrf': p['vrf_id'],
+ 'vrf_name': p['vrf_name'],
+ 'vrf_vrf': p['vrf'],
'prefix': p['id'],
'prefix_prefix': p['prefix'],
'pool': pool['id'],
diff --git a/nipap/sql/ip_net.plsql b/nipap/sql/ip_net.plsql
index <HASH>..<HASH> 100644
--- a/nipap/sql/ip_net.plsql
+++ b/nipap/sql/ip_net.plsql
@@ -59,6 +59,8 @@ CREATE TABLE ip_net_pool (
COMMENT ON TABLE ip_net_pool IS 'IP Pools for assigning prefixes from';
+COMMENT ON INDEX ip_net_pool_name_key IS 'pool name';
+
--
-- this table stores the actual prefixes in the address plan, or net | Various small fixes...
Part of #<I> | SpriteLink_NIPAP | train |
ec9bc848e1ad23bf77f5bb05f31ad0640d1bc318 | diff --git a/generator/lib/model/Column.php b/generator/lib/model/Column.php
index <HASH>..<HASH> 100644
--- a/generator/lib/model/Column.php
+++ b/generator/lib/model/Column.php
@@ -212,7 +212,12 @@ class Column extends XMLElement
// Add type, size information to associated Domain object
$this->getDomain()->replaceSqlType($this->getAttribute("sqlType"));
- $this->getDomain()->replaceSize($this->getAttribute("size", $type == 'VARCHAR' ? 255 : null));
+ if (!$this->getAttribute("size") && $this->getDomain()->getType() == 'VARCHAR' && !$this->getAttribute("sqlType")) {
+ $size = 255;
+ } else {
+ $size = $this->getAttribute("size");
+ }
+ $this->getDomain()->replaceSize($size);
$this->getDomain()->replaceScale($this->getAttribute("scale"));
$defval = $this->getAttribute("defaultValue", $this->getAttribute("default")); | [<I>][<I>] Fixed generated SQL code for VARCHAR columns with custom SQLType | propelorm_Propel | train |
953c9ded77fec1cda589594f1f41dab5003fe5b8 | diff --git a/core/src/main/java/org/infinispan/util/logging/Log.java b/core/src/main/java/org/infinispan/util/logging/Log.java
index <HASH>..<HASH> 100644
--- a/core/src/main/java/org/infinispan/util/logging/Log.java
+++ b/core/src/main/java/org/infinispan/util/logging/Log.java
@@ -1395,7 +1395,7 @@ public interface Log extends BasicLogger {
Object clusterManagerStatus);
@LogMessage(level = WARN)
- @Message(value = "No indexable classes were defined for this indexed cache; switching to autodetection (support for autodetection will be removed in Infinispan 10.0).", id = 403)
+ @Message(value = "No indexable classes were defined for this indexed cache; switching to autodetection (support for autodetection will be removed in Infinispan 11).", id = 403)
void noIndexableClassesDefined();
// @Message(value = "The configured entity class %s is not indexable. Please remove it from the indexing configuration.", id = 404) | ISPN<I> use outdated release for deprecation warning
Indexing work was moved to ISPN <I> | infinispan_infinispan | train |
e4fbd210f5d4c82de8a723fd84074556edc5dc0d | diff --git a/djangojs/runners.py b/djangojs/runners.py
index <HASH>..<HASH> 100644
--- a/djangojs/runners.py
+++ b/djangojs/runners.py
@@ -110,7 +110,7 @@ class PhantomJsRunner(object):
'''
separator = '=' * LINE_SIZE
title = kwargs['title'] if 'title' in kwargs else 'phantomjs output'
- nb_spaces = (LINE_SIZE - len(title)) / 2
+ nb_spaces = (LINE_SIZE - len(title)) // 2
if VERBOSE:
print ''
diff --git a/djangojs/tests/test_context.py b/djangojs/tests/test_context.py
index <HASH>..<HASH> 100644
--- a/djangojs/tests/test_context.py
+++ b/djangojs/tests/test_context.py
@@ -175,4 +175,4 @@ class ContextJsonViewTest(TestCase):
self.assertIsNotNone(response)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
- self.assertIsNotNone(json.loads(response.content))
+ self.assertIsNotNone(json.loads(response.content.decode()))
diff --git a/djangojs/tests/test_urls.py b/djangojs/tests/test_urls.py
index <HASH>..<HASH> 100644
--- a/djangojs/tests/test_urls.py
+++ b/djangojs/tests/test_urls.py
@@ -158,7 +158,7 @@ class UrlsJsonViewTest(UrlsTestMixin, TestCase):
def get_result(self):
self.response = self.client.get(reverse('django_js_urls'))
- return json.loads(self.response.content)
+ return json.loads(self.response.content.decode())
def test_render(self):
'''It should render a JSON URLs descriptor'''
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -33,6 +33,7 @@ setup(
include_package_data=True,
install_requires=['django'],
license='LGPL',
+ use_2to3 = True,
classifiers=[
"Framework :: Django",
"Development Status :: 4 - Beta", | Support Python3 using 2to3.
Have setup run 2to3 during installation.
Use .decode() to get unicode strings in tests.
Use integer division. | noirbizarre_django.js | train |
7428f9fbb10c91d6b35a1c9e6513a605c732b9d0 | diff --git a/input/carbon/carbon.go b/input/carbon/carbon.go
index <HASH>..<HASH> 100644
--- a/input/carbon/carbon.go
+++ b/input/carbon/carbon.go
@@ -117,19 +117,21 @@ func New() *Carbon {
addrStr: addr,
addr: addrT,
schemas: schemas,
- quit: make(chan struct{}),
connTrack: NewConnTrack(),
}
}
func (c *Carbon) Start(metrics mdata.Metrics, metricIndex idx.MetricIndex, usg *usage.Usage) {
- c.Input = input.New(metrics, metricIndex, usg, "carbon")
+ if c.Input.MsgsAge == nil {
+ c.Input = input.New(metrics, metricIndex, usg, "carbon")
+ }
l, err := net.ListenTCP("tcp", c.addr)
if nil != err {
log.Fatal(4, "carbon-in: %s", err.Error())
}
c.listener = l
log.Info("carbon-in: listening on %v/tcp", c.addr)
+ c.quit = make(chan struct{})
go c.accept()
} | support calling Start and Stop over and over | grafana_metrictank | train |
Subsets and Splits