hash
stringlengths 40
40
| diff
stringlengths 131
114k
| message
stringlengths 7
980
| project
stringlengths 5
67
| split
stringclasses 1
value |
---|---|---|---|---|
22d3986863a981d0eb2bc32b3874c2ab9391858a | diff --git a/salt/modules/virtualenv_mod.py b/salt/modules/virtualenv_mod.py
index <HASH>..<HASH> 100644
--- a/salt/modules/virtualenv_mod.py
+++ b/salt/modules/virtualenv_mod.py
@@ -125,7 +125,7 @@ def create(path,
# Virtualenv package
try:
import virtualenv
- VIRTUALENV_VERSION_INFO = tuple(
+ virtualenv_version_info = tuple(
[int(i) for i in
virtualenv.__version__.split('rc')[0].split('.')]
)
@@ -138,7 +138,7 @@ def create(path,
'Unable to get the virtualenv version output using {0!r}. '
'Returned data: {1!r}'.format(version_cmd, ret)
)
- VIRTUALENV_VERSION_INFO = tuple(
+ virtualenv_version_info = tuple(
[int(i) for i in
ret['stdout'].strip().split('rc')[0].split('.')]
)
@@ -146,7 +146,7 @@ def create(path,
if no_site_packages is True:
cmd.append('--no-site-packages')
if distribute:
- if VIRTUALENV_VERSION_INFO >= (1, 10):
+ if virtualenv_version_info >= (1, 10):
log.info(
'The virtualenv \'--distribute\' option has been '
'deprecated in virtualenv(>=1.10), as such, the '
@@ -166,7 +166,7 @@ def create(path,
for entry in extra_search_dir:
cmd.append('--extra-search-dir={0}'.format(entry))
if never_download is True:
- if VIRTUALENV_VERSION_INFO >= (1, 10):
+ if virtualenv_version_info >= (1, 10):
log.info(
'The virtualenv \'--never-download\' option has been '
'deprecated in virtualenv(>=1.10), as such, the ' | Local variables should not be uppercased. | saltstack_salt | train |
80a37fbca4e81db655c41628bfdc88035da6edd3 | diff --git a/plugin/pkg/admission/namespace/lifecycle/admission_test.go b/plugin/pkg/admission/namespace/lifecycle/admission_test.go
index <HASH>..<HASH> 100644
--- a/plugin/pkg/admission/namespace/lifecycle/admission_test.go
+++ b/plugin/pkg/admission/namespace/lifecycle/admission_test.go
@@ -18,6 +18,7 @@ package lifecycle
import (
"fmt"
+ "sync"
"testing"
"k8s.io/kubernetes/pkg/admission"
@@ -39,6 +40,7 @@ func TestAdmission(t *testing.T) {
Phase: api.NamespaceActive,
},
}
+ var namespaceLock sync.RWMutex
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
store.Add(namespaceObj)
@@ -46,12 +48,16 @@ func TestAdmission(t *testing.T) {
mockClient := &testclient.Fake{}
mockClient.AddWatchReactor("*", testclient.DefaultWatchReactor(fakeWatch, nil))
mockClient.AddReactor("get", "namespaces", func(action testclient.Action) (bool, runtime.Object, error) {
+ namespaceLock.RLock()
+ defer namespaceLock.RUnlock()
if getAction, ok := action.(testclient.GetAction); ok && getAction.GetName() == namespaceObj.Name {
return true, namespaceObj, nil
}
return true, nil, fmt.Errorf("No result for action %v", action)
})
mockClient.AddReactor("list", "namespaces", func(action testclient.Action) (bool, runtime.Object, error) {
+ namespaceLock.RLock()
+ defer namespaceLock.RUnlock()
return true, &api.NamespaceList{Items: []api.Namespace{*namespaceObj}}, nil
})
@@ -78,7 +84,9 @@ func TestAdmission(t *testing.T) {
}
// change namespace state to terminating
+ namespaceLock.Lock()
namespaceObj.Status.Phase = api.NamespaceTerminating
+ namespaceLock.Unlock()
store.Add(namespaceObj)
// verify create operations in the namespace cause an error | Fix race in lifecycle admission test
.State.Phase is read and written by multiple goroutines as reported by `godep
go test -race` on Go <I>. Adding the mutex around the object fixes the
issue. | kubernetes_kubernetes | train |
9b0a166660c3b3e3b166d1e4f96115c7f338a4af | diff --git a/tests/Log/LogLoggerTest.php b/tests/Log/LogLoggerTest.php
index <HASH>..<HASH> 100755
--- a/tests/Log/LogLoggerTest.php
+++ b/tests/Log/LogLoggerTest.php
@@ -36,6 +36,17 @@ class LogLoggerTest extends TestCase
$writer->error('foo');
}
+ public function testContextIsFlushed()
+ {
+ $writer = new Logger($monolog = m::mock(Monolog::class));
+ $writer->withContext(['bar' => 'baz']);
+ $writer->withoutContext();
+
+ $monolog->expects('error')->with('foo', []);
+
+ $writer->error('foo');
+ }
+
public function testLoggerFiresEventsDispatcher()
{
$writer = new Logger($monolog = m::mock(Monolog::class), $events = new Dispatcher); | Add test for withoutContext (#<I>) | laravel_framework | train |
cab1154d7a29789eea78db5c58b9aa092c29d4dc | diff --git a/phy/gui/widgets.py b/phy/gui/widgets.py
index <HASH>..<HASH> 100644
--- a/phy/gui/widgets.py
+++ b/phy/gui/widgets.py
@@ -61,6 +61,16 @@ class IPythonView(RichJupyterWidget):
gui.add_view(self)
self.start_kernel()
self.inject(gui=gui, **kwargs)
+ try:
+ import numpy
+ self.inject(np=numpy)
+ except ImportError: # pragma: no cover
+ pass
+ try:
+ import matplotlib.pyplot as plt
+ self.inject(plt=plt)
+ except ImportError: # pragma: no cover
+ pass
@connect
def on_close_view(sender, view): | Inject common libraries in IPythonView | kwikteam_phy | train |
5be47eaf27551b2ebb45ea30385686aace5a914d | diff --git a/cmd/kubeadm/app/apis/kubeadm/types.go b/cmd/kubeadm/app/apis/kubeadm/types.go
index <HASH>..<HASH> 100644
--- a/cmd/kubeadm/app/apis/kubeadm/types.go
+++ b/cmd/kubeadm/app/apis/kubeadm/types.go
@@ -116,7 +116,7 @@ type ClusterConfiguration struct {
CertificatesDir string
// ImageRepository sets the container registry to pull images from.
- // If empty, `k8s.gcr.io` will be used by default; in case of kubernetes version is a CI build (kubernetes version starts with `ci/` or `ci-cross/`)
+ // If empty, `k8s.gcr.io` will be used by default; in case of kubernetes version is a CI build (kubernetes version starts with `ci/`)
// `gcr.io/k8s-staging-ci-images` will be used as a default for control plane components and for kube-proxy, while `k8s.gcr.io`
// will be used for all the other images.
ImageRepository string
diff --git a/cmd/kubeadm/app/apis/kubeadm/v1beta2/types.go b/cmd/kubeadm/app/apis/kubeadm/v1beta2/types.go
index <HASH>..<HASH> 100644
--- a/cmd/kubeadm/app/apis/kubeadm/v1beta2/types.go
+++ b/cmd/kubeadm/app/apis/kubeadm/v1beta2/types.go
@@ -96,7 +96,7 @@ type ClusterConfiguration struct {
CertificatesDir string `json:"certificatesDir,omitempty"`
// ImageRepository sets the container registry to pull images from.
- // If empty, `k8s.gcr.io` will be used by default; in case of kubernetes version is a CI build (kubernetes version starts with `ci/` or `ci-cross/`)
+ // If empty, `k8s.gcr.io` will be used by default; in case of kubernetes version is a CI build (kubernetes version starts with `ci/`)
// `gcr.io/k8s-staging-ci-images` will be used as a default for control plane components and for kube-proxy, while `k8s.gcr.io`
// will be used for all the other images.
ImageRepository string `json:"imageRepository,omitempty"`
diff --git a/cmd/kubeadm/app/apis/kubeadm/v1beta3/types.go b/cmd/kubeadm/app/apis/kubeadm/v1beta3/types.go
index <HASH>..<HASH> 100644
--- a/cmd/kubeadm/app/apis/kubeadm/v1beta3/types.go
+++ b/cmd/kubeadm/app/apis/kubeadm/v1beta3/types.go
@@ -121,7 +121,7 @@ type ClusterConfiguration struct {
CertificatesDir string `json:"certificatesDir,omitempty"`
// ImageRepository sets the container registry to pull images from.
- // If empty, `k8s.gcr.io` will be used by default; in case of kubernetes version is a CI build (kubernetes version starts with `ci/` or `ci-cross/`)
+ // If empty, `k8s.gcr.io` will be used by default; in case of kubernetes version is a CI build (kubernetes version starts with `ci/`)
// `gcr.io/k8s-staging-ci-images` will be used as a default for control plane components and for kube-proxy, while `k8s.gcr.io`
// will be used for all the other images.
// +optional
diff --git a/cmd/kubeadm/app/constants/constants.go b/cmd/kubeadm/app/constants/constants.go
index <HASH>..<HASH> 100644
--- a/cmd/kubeadm/app/constants/constants.go
+++ b/cmd/kubeadm/app/constants/constants.go
@@ -312,7 +312,7 @@ const (
// NodeBootstrapTokenAuthGroup specifies which group a Node Bootstrap Token should be authenticated in
NodeBootstrapTokenAuthGroup = "system:bootstrappers:kubeadm:default-node-token"
- // DefaultCIImageRepository points to image registry where CI uploads images from ci-cross build job
+ // DefaultCIImageRepository points to image registry where CI uploads images from ci build job
DefaultCIImageRepository = "gcr.io/k8s-staging-ci-images"
// CoreDNSConfigMap specifies in what ConfigMap in the kube-system namespace the CoreDNS config should be stored
diff --git a/cmd/kubeadm/app/util/version.go b/cmd/kubeadm/app/util/version.go
index <HASH>..<HASH> 100644
--- a/cmd/kubeadm/app/util/version.go
+++ b/cmd/kubeadm/app/util/version.go
@@ -43,7 +43,7 @@ var (
kubeCIBucketURL = "https://storage.googleapis.com/k8s-release-dev"
kubeReleaseRegex = regexp.MustCompile(`^v?(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)([-0-9a-zA-Z_\.+]*)?$`)
kubeReleaseLabelRegex = regexp.MustCompile(`^((latest|stable)+(-[1-9](\.[1-9]([0-9])?)?)?)\z`)
- kubeBucketPrefixes = regexp.MustCompile(`^((release|ci|ci-cross)/)?([-\w_\.+]+)$`)
+ kubeBucketPrefixes = regexp.MustCompile(`^((release|ci)/)?([-\w_\.+]+)$`)
)
// KubernetesReleaseVersion is helper function that can fetch | kubeadm: update references to legacy artifacts locations | kubernetes_kubernetes | train |
916fe5c81cbcc7a6f01d17e292f73254300b2476 | diff --git a/CHANGELOG b/CHANGELOG
index <HASH>..<HASH> 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,5 +1,6 @@
= ActiveMerchant CHANGELOG
+* PayGate: Add support for refunds [StephanAtG2]
* PayPal: Add #scrub for scrubbing PCI information out of HTTP transcripts [girasquid]
* Stripe: Add #scrub for scrubbing PCI information out of HTTP transcripts [girasquid]
diff --git a/lib/active_merchant/billing/gateways/pay_gate_xml.rb b/lib/active_merchant/billing/gateways/pay_gate_xml.rb
index <HASH>..<HASH> 100644
--- a/lib/active_merchant/billing/gateways/pay_gate_xml.rb
+++ b/lib/active_merchant/billing/gateways/pay_gate_xml.rb
@@ -178,6 +178,13 @@ module ActiveMerchant #:nodoc:
action = 'settletx'
options.merge!(:money => money, :authorization => authorization)
+ commit_capture(action, authorization, build_request(action, options))
+ end
+
+ def refund(money, authorization, options={})
+ action = 'refundtx'
+
+ options.merge!(:money => money, :authorization => authorization)
commit(action, build_request(action, options))
end
@@ -201,6 +208,10 @@ module ActiveMerchant #:nodoc:
money = options.delete(:money)
authorization = options.delete(:authorization)
build_capture(protocol, money, authorization, options)
+ when 'refundtx'
+ money = options.delete(:money)
+ authorization = options.delete(:authorization)
+ build_refund(protocol, money, authorization, options)
else
raise "no action specified for build_request"
end
@@ -228,6 +239,13 @@ module ActiveMerchant #:nodoc:
}
end
+ def build_refund(xml, money, authorization, options={})
+ xml.tag! 'refundtx', {
+ :tid => authorization,
+ :amt => amount(money)
+ }
+ end
+
def parse(action, body)
hash = {}
xml = REXML::Document.new(body)
@@ -252,6 +270,14 @@ module ActiveMerchant #:nodoc:
)
end
+ def commit_capture(action, authorization, request)
+ response = parse(action, ssl_post(self.live_url, request))
+ Response.new(successful?(response), message_from(response), response,
+ :test => test?,
+ :authorization => authorization
+ )
+ end
+
def message_from(response)
(response[:rdesc] || response[:edesc])
end
diff --git a/test/remote/gateways/remote_pay_gate_xml_test.rb b/test/remote/gateways/remote_pay_gate_xml_test.rb
index <HASH>..<HASH> 100644
--- a/test/remote/gateways/remote_pay_gate_xml_test.rb
+++ b/test/remote/gateways/remote_pay_gate_xml_test.rb
@@ -52,4 +52,13 @@ class RemotePayGateXmlTest < Test::Unit::TestCase
assert_failure response
assert_equal 'Incorrect Credentials Supplied', response.message
end
+
+ def test_purchase_and_full_credit
+ purchase = @gateway.purchase(@amount, @credit_card, @options)
+ assert_success purchase
+
+ credit = @gateway.refund(@amount, purchase.authorization, :note => 'Sorry')
+ assert_success credit
+ assert credit.test?
+ end
end
diff --git a/test/unit/gateways/pay_gate_xml_test.rb b/test/unit/gateways/pay_gate_xml_test.rb
index <HASH>..<HASH> 100644
--- a/test/unit/gateways/pay_gate_xml_test.rb
+++ b/test/unit/gateways/pay_gate_xml_test.rb
@@ -7,9 +7,10 @@ class PayGateTest < Test::Unit::TestCase
@amount = 245000
@credit_card = credit_card('4000000000000002')
@declined_card = credit_card('4000000000000036')
-
+
+ # May need to generate a unique order id as server responds with duplicate order detected
@options = {
- :order_id => 'abc123',
+ :order_id => Time.now.getutc,
:billing_address => address,
:description => 'Store Purchase',
}
@@ -38,6 +39,15 @@ class PayGateTest < Test::Unit::TestCase
assert response.test?
end
+ def test_successful_refund
+ @gateway.expects(:ssl_post).returns(successful_refund_response)
+
+ assert response = @gateway.refund(@amount, '16996548', @options)
+ assert_instance_of Response, response
+ assert_success response
+
+ assert response.test?
+ end
def test_unsuccessful_request
@gateway.expects(:ssl_post).returns(failed_authorization_response)
@@ -82,5 +92,15 @@ class PayGateTest < Test::Unit::TestCase
ENDOFXML
end
+ def successful_refund_response
+ <<-ENDOFXML
+ <?xml version="1.0" encoding="UTF-8"?>
+ <!DOCTYPE protocol SYSTEM "https://www.paygate.co.za/payxml/payxml_v4.dtd">
+ <protocol ver="4.0" pgid="10011021600" pwd="test" >
+ <refundrx tid="16996548" cref="abc123" stat="5" sdesc="Received by Paygate" res="990005" rdesc="Request for Refund Received" bno="" />
+ </protocol>
+ ENDOFXML
+ end
+
end | PayGate: add support for refund
Closes #<I> | activemerchant_active_merchant | train |
9e819c5da1f920a81cdb62031b9f2d62d07cedf6 | diff --git a/app/controllers/api/v1/owners_controller.rb b/app/controllers/api/v1/owners_controller.rb
index <HASH>..<HASH> 100644
--- a/app/controllers/api/v1/owners_controller.rb
+++ b/app/controllers/api/v1/owners_controller.rb
@@ -34,7 +34,7 @@ class Api::V1::OwnersController < Api::BaseController
end
def gems
- user = User.find_by_handle(params[:handle])
+ user = User.find_by_slug!(params[:handle])
if user
rubygems = user.rubygems.with_versions
respond_to do |format|
diff --git a/test/functional/api/v1/owners_controller_test.rb b/test/functional/api/v1/owners_controller_test.rb
index <HASH>..<HASH> 100644
--- a/test/functional/api/v1/owners_controller_test.rb
+++ b/test/functional/api/v1/owners_controller_test.rb
@@ -48,6 +48,24 @@ class Api::V1::OwnersControllerTest < ActionController::TestCase
YAML.load body
end
+ context "on GET to owner gems with handle" do
+ setup do
+ @user = create(:user)
+ get :gems, handle: @user.handle, format: :json
+ end
+
+ should respond_with :success
+ end
+
+ context "on GET to owner gems with id" do
+ setup do
+ @user = create(:user)
+ get :gems, handle: @user.id, format: :json
+ end
+
+ should respond_with :success
+ end
+
should "route POST" do
route = {:controller => 'api/v1/owners',
:action => 'create', | update owners api to accept user id | rubygems_rubygems.org | train |
6c3a168d10aeab5aec48fa565347553fb864e96a | diff --git a/lib/slop/option.rb b/lib/slop/option.rb
index <HASH>..<HASH> 100644
--- a/lib/slop/option.rb
+++ b/lib/slop/option.rb
@@ -65,17 +65,10 @@ class Slop
@delimiter = options[:delimiter] || ','
@limit = options[:limit] || 0
- if @long_flag && @long_flag.size > @slop.longest_flag
- if @help.respond_to? :to_str
- size = @long_flag.size + @help.size
- else
- size = @long_flag.size
- end
- @slop.longest_flag = size
- end
-
@callback = blk if block_given?
@callback ||= options[:callback]
+
+ build_longest_flag
end
# @return [Boolean] true if this option expects an argument
@@ -176,5 +169,16 @@ class Slop
value
end
end
+
+ def build_longest_flag
+ if @long_flag && @long_flag.size > @slop.longest_flag
+ if @help.respond_to? :to_str
+ size = @long_flag.size + @help.size
+ else
+ size = @long_flag.size
+ end
+ @slop.longest_flag = size
+ end
+ end
end
end | move this logic into a private method | leejarvis_slop | train |
1de35ebb2dc4281427c6660f3e3cd453d736f256 | diff --git a/salt/utils/debug.py b/salt/utils/debug.py
index <HASH>..<HASH> 100644
--- a/salt/utils/debug.py
+++ b/salt/utils/debug.py
@@ -40,4 +40,9 @@ def enable_sigusr1_handler():
Pretty print a stack trace to the console or a debug log under /tmp
when any of the salt daemons such as salt-master are sent a SIGUSR1
'''
- signal.signal(signal.SIGUSR1, _handle_sigusr1)
+ # Skip setting up this signal on Windows
+ # SIGUSR1 doesn't exist on Windows and causes the minion to crash
+ if os.environ['os'].startswith('Windows'):
+ pass
+ else:
+ signal.signal(signal.SIGUSR1, _handle_sigusr1) | Skip this signal creation on Windows.
SIGUSR1 doesn't exist on Windows and this event was causing
the minion to crash on Windows
I'm not sure this is the best way to take care of this. But I'm not
sure how to actually implement this in Windows yet.
Google seems to imply that win<I>events would provided this
functionality, but I haven't figured out how to use it yet. | saltstack_salt | train |
0b7f38e00ac509cd41c1d9a6069665d90dd614ca | diff --git a/Gemfile.lock b/Gemfile.lock
index <HASH>..<HASH> 100644
--- a/Gemfile.lock
+++ b/Gemfile.lock
@@ -1,9 +1,9 @@
PATH
remote: .
specs:
- authlogic (3.2.1)
- activerecord (>= 3.0.0)
- activesupport (>= 3.0.0)
+ authlogic (3.3.0)
+ activerecord (>= 3.2)
+ activesupport (>= 3.2)
GEM
remote: http://rubygems.org/
diff --git a/authlogic.gemspec b/authlogic.gemspec
index <HASH>..<HASH> 100644
--- a/authlogic.gemspec
+++ b/authlogic.gemspec
@@ -3,7 +3,7 @@ $:.push File.expand_path("../lib", __FILE__)
Gem::Specification.new do |s|
s.name = "authlogic"
- s.version = "3.2.1"
+ s.version = "3.3.0"
s.platform = Gem::Platform::RUBY
s.authors = ["Ben Johnson"]
s.email = ["[email protected]"]
@@ -11,8 +11,8 @@ Gem::Specification.new do |s|
s.summary = %q{A clean, simple, and unobtrusive ruby authentication solution.}
s.description = %q{A clean, simple, and unobtrusive ruby authentication solution.}
- s.add_dependency 'activerecord', '>= 3.0.0'
- s.add_dependency 'activesupport', '>= 3.0.0'
+ s.add_dependency 'activerecord', '>= 3.2'
+ s.add_dependency 'activesupport', '>= 3.2'
s.add_development_dependency 'rake'
s.add_development_dependency 'bcrypt-ruby'
s.add_development_dependency 'scrypt'
diff --git a/lib/authlogic/acts_as_authentic/login.rb b/lib/authlogic/acts_as_authentic/login.rb
index <HASH>..<HASH> 100644
--- a/lib/authlogic/acts_as_authentic/login.rb
+++ b/lib/authlogic/acts_as_authentic/login.rb
@@ -90,21 +90,19 @@ module Authlogic
end
# This method allows you to find a record with the given login. If you notice, with Active Record you have the
- # validates_uniqueness_of validation function. They give you a :case_sensitive option. I handle this in the same
- # manner that they handle that. If you are using the login field and set false for the :case_sensitive option in
- # validates_uniqueness_of_login_field_options this method will modify the query to look something like:
+ # UniquenessValidator class. They give you a :case_sensitive option. I handle this in the same
+ # manner that they handle that. If you are using the login field, set false for the :case_sensitive option in
+ # validates_uniqueness_of_login_field_options and the column doesn't have a case-insensitive collation,
+ # this method will modify the query to look something like:
#
- # where("LOWER(#{quoted_table_name}.#{login_field}) = ?", login.downcase).first
+ # "LOWER(#{quoted_table_name}.#{login_field}) = LOWER(#{login})"
#
- # If you don't specify this it calls the good old find_by_* method:
+ # If you don't specify this it just uses a regular case-sensitive search (with the binary modifier if necessary):
#
- # find_by_login(login)
+ # "BINARY #{login_field} = #{login}"
#
# The above also applies for using email as your login, except that you need to set the :case_sensitive in
# validates_uniqueness_of_email_field_options to false.
- #
- # The only reason I need to do the above is for Postgres and SQLite since they perform case sensitive searches with the
- # find_by_* methods.
def find_by_smart_case_login_field(login)
if login_field
find_with_case(login_field, login, validates_uniqueness_of_login_field_options[:case_sensitive] != false)
@@ -115,11 +113,14 @@ module Authlogic
private
def find_with_case(field, value, sensitivity = true)
- if sensitivity
- send("find_by_#{field}", value)
+ relation = if not sensitivity
+ connection.case_insensitive_comparison(arel_table, field.to_s, columns_hash[field.to_s], value)
else
- where("LOWER(#{quoted_table_name}.#{field}) = ?", value.mb_chars.downcase).first
+ value = connection.case_sensitive_modifier(value) if value
+ relation = arel_table[field.to_s].eq(value)
end
+
+ where(relation).first
end
end
diff --git a/lib/authlogic/session/active_record_trickery.rb b/lib/authlogic/session/active_record_trickery.rb
index <HASH>..<HASH> 100644
--- a/lib/authlogic/session/active_record_trickery.rb
+++ b/lib/authlogic/session/active_record_trickery.rb
@@ -27,17 +27,7 @@ module Authlogic
def human_name(*args)
I18n.t("models.#{name.underscore}", {:count => 1, :default => name.humanize})
end
-
- # For rails < 2.3, mispelled
- def self_and_descendents_from_active_record
- [self]
- end
-
- # For rails >= 2.3, mispelling fixed
- def self_and_descendants_from_active_record
- [self]
- end
-
+
# For rails >= 3.0
def model_name
if defined?(::ActiveModel)
diff --git a/test/session_test/active_record_trickery_test.rb b/test/session_test/active_record_trickery_test.rb
index <HASH>..<HASH> 100644
--- a/test/session_test/active_record_trickery_test.rb
+++ b/test/session_test/active_record_trickery_test.rb
@@ -12,14 +12,6 @@ module SessionTest
assert_equal "Usersession", UserSession.human_name
end
- def test_self_and_descendents_from_active_record
- assert_equal [UserSession], UserSession.self_and_descendents_from_active_record
- end
-
- def test_self_and_descendants_from_active_record
- assert_equal [UserSession], UserSession.self_and_descendants_from_active_record
- end
-
def test_i18n_of_human_name
I18n.backend.store_translations 'en', :authlogic => {:models => {:user_session => "MySession" } }
assert_equal "MySession", UserSession.human_name | version to <I>, require rails <I>, improve find_by_smart_case_login_field | binarylogic_authlogic | train |
417b915e5422a9704c851493bca75c7a666cb0bc | diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -267,8 +267,8 @@ class PathFinder():
p = os.path.join(HADOOP_HOME, "c++", HADOOP_ARCH_STR, "lib")
if os.path.isdir(p):
self.__hdfs_link = [p]
- if not os.path.exists("/usr/lib/libhdfs.so"):
- self.__error("HDFS link paths", "HDFS_LINK")
+ elif not os.path.exists("/usr/lib/libhdfs.so"):
+ self.__error("HDFS link paths", "HDFS_LINK")
return self.__hdfs_link | fixed a bug in setup.py | crs4_pydoop | train |
f15da2488558b7d63aa4708bb27863d6ad8a11d9 | diff --git a/lib/bundler/gem_version_tasks/version.rb b/lib/bundler/gem_version_tasks/version.rb
index <HASH>..<HASH> 100644
--- a/lib/bundler/gem_version_tasks/version.rb
+++ b/lib/bundler/gem_version_tasks/version.rb
@@ -1,5 +1,5 @@
module Bundler
class GemVersionTasks
-VERSION='0.2.0'
+VERSION='0.2.1'
end
end | Bumping to version <I> | yarmand_bundler-gem_version_tasks | train |
20f26c4d30160514bace8e394a9b87e41082341a | diff --git a/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/mongo/embedded/EmbeddedMongoAutoConfiguration.java b/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/mongo/embedded/EmbeddedMongoAutoConfiguration.java
index <HASH>..<HASH> 100644
--- a/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/mongo/embedded/EmbeddedMongoAutoConfiguration.java
+++ b/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/mongo/embedded/EmbeddedMongoAutoConfiguration.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2018 the original author or authors.
+ * Copyright 2012-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/spring-boot-project/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/mongo/embedded/EmbeddedMongoAutoConfigurationTests.java b/spring-boot-project/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/mongo/embedded/EmbeddedMongoAutoConfigurationTests.java
index <HASH>..<HASH> 100644
--- a/spring-boot-project/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/mongo/embedded/EmbeddedMongoAutoConfigurationTests.java
+++ b/spring-boot-project/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/mongo/embedded/EmbeddedMongoAutoConfigurationTests.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2018 the original author or authors.
+ * Copyright 2012-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -21,6 +21,7 @@ import java.util.EnumSet;
import java.util.stream.Collectors;
import com.mongodb.MongoClient;
+import de.flapdoodle.embed.mongo.MongodExecutable;
import de.flapdoodle.embed.mongo.config.IMongodConfig;
import de.flapdoodle.embed.mongo.config.Storage;
import de.flapdoodle.embed.mongo.distribution.Feature;
@@ -174,6 +175,13 @@ public class EmbeddedMongoAutoConfigurationTests {
.isEqualTo("testing");
}
+ @Test
+ public void shutdownHookIsNotRegistered() {
+ load();
+ assertThat(this.context.getBean(MongodExecutable.class).isRegisteredJobKiller())
+ .isFalse();
+ }
+
private void assertVersionConfiguration(String configuredVersion,
String expectedVersion) {
this.context = new AnnotationConfigApplicationContext(); | Polish "Fix stopping of Embedded Mongo before context is closed"
See gh-<I> | spring-projects_spring-boot | train |
4f32cec30fb456576ec70dea24f6056b58e8725d | diff --git a/keys.go b/keys.go
index <HASH>..<HASH> 100644
--- a/keys.go
+++ b/keys.go
@@ -264,13 +264,11 @@ func (priv *PrivateKey) sign(rand io.Reader, hashed []byte) ([]byte, error) {
}
func (pub *PublicKey) verify(hashed, sig []byte) (nextPoint []byte, sigOk bool) {
- // TODO: errors?
- if len(sig) != 2*20 {
- // TODO: this is likely to be incorrect. We need only to see if it's _less_ than the expected signature length
+ if len(sig) < 2*20 {
return nil, false
}
r := new(big.Int).SetBytes(sig[:20])
- s := new(big.Int).SetBytes(sig[20:])
+ s := new(big.Int).SetBytes(sig[20:40])
ok := dsa.Verify(&pub.PublicKey, hashed, r, s)
return sig[20*2:], ok
} | Fix the implementation of verify of signatures to only check for minimum length. | coyim_otr3 | train |
91de0d33b26067cad0fb633165f04a5d7fd7d85a | diff --git a/dedupe/clustering/hierarchical.py b/dedupe/clustering/hierarchical.py
index <HASH>..<HASH> 100644
--- a/dedupe/clustering/hierarchical.py
+++ b/dedupe/clustering/hierarchical.py
@@ -3,6 +3,9 @@
import numpy
import fastcluster
import hcluster
+import networkx
+from networkx.algorithms.components.connected import connected_components
+import itertools
def condensedDistance(dupes):
@@ -47,17 +50,40 @@ def cluster(dupes, threshold=.5):
number will increase precision, raising it will increase
recall
"""
- (i_to_id, condensed_distances) = condensedDistance(dupes)
- linkage = fastcluster.linkage(condensed_distances,
- method='centroid',
- preserve_input=False)
- partition = hcluster.fcluster(linkage, threshold)
+ score_dtype = [('pairs', 'i4', 2), ('score', 'f4', 1)]
+
+
+ dupe_graph = networkx.Graph()
+ dupe_graph.add_weighted_edges_from(((x[0], x[1], y) for x, y in dupes))
+ del dupes
+
+ dupe_sub_graphs = connected_components(dupe_graph)
clustering = {}
+ cluster_id = 0
+ for sub_graph in dupe_sub_graphs :
+ if len(sub_graph) > 2 :
+ pair_gen = ((x[0:2], x[2]['weight'])
+ for x
+ in dupe_graph.edges_iter(sub_graph, data=True))
+
+ pairs = numpy.fromiter(pair_gen, dtype=score_dtype)
+
+ (i_to_id, condensed_distances) = condensedDistance(pairs)
+ linkage = fastcluster.linkage(condensed_distances,
+ method='centroid',
+ preserve_input=False)
+
+ partition = hcluster.fcluster(linkage, threshold)
+
+ for (i, cluster_id) in enumerate(partition):
+ clustering.setdefault(cluster_id, []).append(i_to_id[i])
+ cluster_id += 1
- for (i, cluster_id) in enumerate(partition):
- clustering.setdefault(cluster_id, []).append(i_to_id[i])
+ else :
+ clustering[cluster_id] = sub_graph
+ cluster_id += 1
clusters = [set(l) for l in clustering.values() if len(l) > 1]
diff --git a/dedupe/dedupe.py b/dedupe/dedupe.py
index <HASH>..<HASH> 100644
--- a/dedupe/dedupe.py
+++ b/dedupe/dedupe.py
@@ -296,8 +296,6 @@ class Dedupe:
self.data_model,
pairwise_threshold)
- print self.dupes.nbytes
-
clusters = clustering.hierarchical.cluster(self.dupes, cluster_threshold)
return clusters
diff --git a/examples/sqlite_example/sqlite_clustering.py b/examples/sqlite_example/sqlite_clustering.py
index <HASH>..<HASH> 100644
--- a/examples/sqlite_example/sqlite_clustering.py
+++ b/examples/sqlite_example/sqlite_clustering.py
@@ -40,7 +40,7 @@ def candidates_gen() :
print time.time() - t0, "seconds"
block = itertools.combinations(((row['donor_id'], row) for row in con.execute('select * from donors inner join bm.blocking_map using (donor_id) where key = ? order by donor_id', (block_key,))), 2)
for candidate in block :
- cache.put(candidate, True)
+ yield candidate
diff --git a/requirements.txt b/requirements.txt
index <HASH>..<HASH> 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,2 +1,3 @@
fastcluster>=1.1.8
hcluster>=0.2.0
+networkx | only clustering connected components allows completion of deduplication of <I> rows
: | dedupeio_dedupe | train |
2d8e41016314582631e4bbc6311f8421dc278be5 | diff --git a/symphony/lib/toolkit/class.general.php b/symphony/lib/toolkit/class.general.php
index <HASH>..<HASH> 100644
--- a/symphony/lib/toolkit/class.general.php
+++ b/symphony/lib/toolkit/class.general.php
@@ -897,6 +897,8 @@ class General
* with the input content. This function will ignore errors in opening,
* writing, closing and changing the permissions of the resulting file.
* If opening or writing the file fail then this will return false.
+ * This method calls `clearstatcache()` in order to make sure we do not
+ * hit the cache when checking for permissions.
*
* @param string $file
* the path of the file to write.
@@ -916,6 +918,8 @@ class General
*/
public static function writeFile($file, $data, $perm = 0644, $mode = 'w', $trim = false)
{
+ clearstatcache();
+
if (
(!is_writable(dirname($file)) || !is_readable(dirname($file))) // Folder
|| (file_exists($file) && (!is_readable($file) || !is_writable($file))) // File | Call clearstatcache before checking permission
This will insure that we do not hit the cache on is_writable and is_readable calls.
Closes #<I> | symphonycms_symphony-2 | train |
02a562a3a0fba04c94f48cd56b66af587481077a | diff --git a/src/java/org/apache/cassandra/service/StorageService.java b/src/java/org/apache/cassandra/service/StorageService.java
index <HASH>..<HASH> 100644
--- a/src/java/org/apache/cassandra/service/StorageService.java
+++ b/src/java/org/apache/cassandra/service/StorageService.java
@@ -417,16 +417,9 @@ public final class StorageService implements IEndPointStateChangeSubscriber, Sto
Token token = getPartitioner().getTokenFactory().fromString(state.getValue());
if (logger_.isDebugEnabled())
logger_.debug(endpoint + " state normal, token " + token);
- if (isClientMode)
- {
- tokenMetadata_.update(token, endpoint);
- // do NOT update systemtable in client mode
- }
- else
- {
- tokenMetadata_.update(token, endpoint);
+ tokenMetadata_.update(token, endpoint);
+ if (!isClientMode)
SystemTable.updateToken(endpoint, token);
- }
replicationStrategy_.removeObsoletePendingRanges();
}
else if (STATE_LEAVING.equals(stateName)) | simplify client mode gossip handling. patch by Jaakko Laine; reviewed by jbellis
git-svn-id: <URL> | Stratio_stratio-cassandra | train |
4fc0ea9bf4a2d775f49d1003eac6af415740f61d | diff --git a/salt/fileclient.py b/salt/fileclient.py
index <HASH>..<HASH> 100644
--- a/salt/fileclient.py
+++ b/salt/fileclient.py
@@ -26,8 +26,7 @@ import salt.utils
import salt.utils.templates
import salt.utils.gzip_util
from salt._compat import (
- URLError, HTTPError, BaseHTTPServer, urlparse, urlunparse,
- url_passwd_mgr, url_auth_handler, url_build_opener, url_install_opener)
+ URLError, HTTPError, BaseHTTPServer, urlparse, urlunparse)
from salt.utils.openstack.swift import SaltSwift
log = logging.getLogger(__name__)
@@ -575,26 +574,22 @@ class Client(object):
except Exception:
raise MinionError('Could not fetch from {0}'.format(url))
+ get_kwargs = {}
if url_data.username is not None \
and url_data.scheme in ('http', 'https'):
_, netloc = url_data.netloc.split('@', 1)
fixed_url = urlunparse(
(url_data.scheme, netloc, url_data.path,
url_data.params, url_data.query, url_data.fragment))
- passwd_mgr = url_passwd_mgr()
- passwd_mgr.add_password(
- None, fixed_url, url_data.username, url_data.password)
- auth_handler = url_auth_handler(passwd_mgr)
- opener = url_build_opener(auth_handler)
- url_install_opener(opener)
+ get_kwargs['auth'] = (url_data.username, url_data.password)
else:
fixed_url = url
try:
if requests.__version__[0] == '0':
# 'stream' was called 'prefetch' before 1.0, with flipped meaning
- get_kwargs = {'prefetch': False}
+ get_kwargs['prefetch'] = False
else:
- get_kwargs = {'stream': True}
+ get_kwargs['stream'] = True
response = requests.get(fixed_url, **get_kwargs)
with salt.utils.fopen(dest, 'wb') as destfp:
for chunk in response.iter_content(chunk_size=32*1024): | Fix #<I>
The url password gathering was not fixed for requests | saltstack_salt | train |
f6a6bdcebcf7eefd42e9d58c37e3206361f147de | diff --git a/src/Mmanos/Metable/Stubs/MetableMigration.stub.php b/src/Mmanos/Metable/Stubs/MetableMigration.stub.php
index <HASH>..<HASH> 100644
--- a/src/Mmanos/Metable/Stubs/MetableMigration.stub.php
+++ b/src/Mmanos/Metable/Stubs/MetableMigration.stub.php
@@ -29,7 +29,7 @@ class {{class}} extends Migration
});
if ('mysql' == DB::connection()->getDriverName()) {
- DB::statement('CREATE INDEX meta_value ON user_metas (meta_id, value(255), xref_id);');
+ DB::statement('CREATE INDEX meta_value ON {{table}} (meta_id, value(255), xref_id);');
}
} | Fixed creating index for table in metas table migration stub | mmanos_laravel-metable | train |
6f13d3109007f56a620ec7ecf6345f0b63b6f38c | diff --git a/lib/poise/utils.rb b/lib/poise/utils.rb
index <HASH>..<HASH> 100644
--- a/lib/poise/utils.rb
+++ b/lib/poise/utils.rb
@@ -37,10 +37,19 @@ module Poise
# end
def find_cookbook_name(run_context, filename)
run_context.cookbook_collection.each do |name, ver|
- Chef::CookbookVersion::COOKBOOK_SEGMENTS.each do |seg|
- ver.segment_filenames(seg).each do |file|
- if file == filename
- return name
+ # This special method is added by Halite::Gem#as_cookbook_version.
+ if ver.respond_to?(:halite_root)
+ # The join is there because ../poise-ruby/lib starts with ../poise so
+ # we want a trailing /.
+ if filename.start_with?(File.join(ver.halite_root, ''))
+ return name
+ end
+ else
+ Chef::CookbookVersion::COOKBOOK_SEGMENTS.each do |seg|
+ ver.segment_filenames(seg).each do |file|
+ if file == filename
+ return name
+ end
end
end
end | Add explicit support for the hacky new #halite_root.
This is gross but it is the best solution I can think of for finding
which cookbook a lib file is defined in during spec tests, where the
lib files are outside of the "root" of the synthetic CookbookVersion
that Halite injects for us. | poise_poise | train |
36c8249a18311cd0b194eec206fe8b85eabfed6e | diff --git a/src/Local/Toolstack/Drupal.php b/src/Local/Toolstack/Drupal.php
index <HASH>..<HASH> 100644
--- a/src/Local/Toolstack/Drupal.php
+++ b/src/Local/Toolstack/Drupal.php
@@ -27,6 +27,10 @@ class Drupal extends ToolstackBase
*/
public static function isDrupal($directory, $depth = '< 2')
{
+ if (!is_dir($directory)) {
+ return false;
+ }
+
$finder = new Finder();
// Look for at least one Drush make file. | Prevent fatals when repository doesn't exist | platformsh_platformsh-cli | train |
b97a36a10f738d946cb8ff920c8e1177db40c74b | diff --git a/transport/src/test/java/io/netty/channel/ChannelInitializerTest.java b/transport/src/test/java/io/netty/channel/ChannelInitializerTest.java
index <HASH>..<HASH> 100644
--- a/transport/src/test/java/io/netty/channel/ChannelInitializerTest.java
+++ b/transport/src/test/java/io/netty/channel/ChannelInitializerTest.java
@@ -17,6 +17,7 @@ package io.netty.channel;
import io.netty.bootstrap.Bootstrap;
import io.netty.bootstrap.ServerBootstrap;
+import io.netty.channel.embedded.EmbeddedChannel;
import io.netty.channel.local.LocalAddress;
import io.netty.channel.local.LocalChannel;
import io.netty.channel.local.LocalServerChannel;
@@ -25,9 +26,11 @@ import org.junit.Before;
import org.junit.Test;
import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
public class ChannelInitializerTest {
private static final int TIMEOUT_MILLIS = 1000;
@@ -76,6 +79,38 @@ public class ChannelInitializerTest {
});
}
+ @Test
+ public void testAddFirstChannelInitializer() {
+ testAddChannelInitializer(true);
+ }
+
+ @Test
+ public void testAddLastChannelInitializer() {
+ testAddChannelInitializer(false);
+ }
+
+ private static void testAddChannelInitializer(final boolean first) {
+ final AtomicBoolean called = new AtomicBoolean();
+ EmbeddedChannel channel = new EmbeddedChannel(new ChannelInitializer<Channel>() {
+ @Override
+ protected void initChannel(Channel ch) throws Exception {
+ ChannelHandler handler = new ChannelInitializer<Channel>() {
+ @Override
+ protected void initChannel(Channel ch) throws Exception {
+ called.set(true);
+ }
+ };
+ if (first) {
+ ch.pipeline().addFirst(handler);
+ } else {
+ ch.pipeline().addLast(handler);
+ }
+ }
+ });
+ channel.finish();
+ assertTrue(called.get());
+ }
+
private void testChannelRegisteredEventPropagation(ChannelInitializer<LocalChannel> init) {
Channel clientChannel = null, serverChannel = null;
try { | Add test to verify that its possible to add another ChannelInitializer in the initChannel(...) method.
Motivation:
I received a report the its not possible to add another ChannelInitialiter in the initChannel(...) method, so we should add a test case for it.
Modifications:
Added testcase.
Result:
Validate that all works as expected. | netty_netty | train |
6fc2776512457b56ab65c799b081da82b2845f59 | diff --git a/app/adapters/application.js b/app/adapters/application.js
index <HASH>..<HASH> 100644
--- a/app/adapters/application.js
+++ b/app/adapters/application.js
@@ -26,7 +26,7 @@ export default DS.PouchDBAdapter.extend(PouchAdapterUtils, {
try {
doctype = doc._id.substring(0, uidx);
if(doctype === type.typeKey) {
- if (query.containsValue) {
+ if (query.containsValue && query.containsValue.value) {
queryValue = query.containsValue.value.toLowerCase();
query.containsValue.keys.forEach(function(key) {
if (doc[key] && doc[key].toLowerCase().indexOf(queryValue) >= 0) { | Fixed js error when search query isn't present. | HospitalRun_hospitalrun-frontend | train |
91753409b93a7cfcecbe5008959bfe41d4d7edd8 | diff --git a/builtin/providers/aws/cloudfront_distribution_configuration_structure.go b/builtin/providers/aws/cloudfront_distribution_configuration_structure.go
index <HASH>..<HASH> 100644
--- a/builtin/providers/aws/cloudfront_distribution_configuration_structure.go
+++ b/builtin/providers/aws/cloudfront_distribution_configuration_structure.go
@@ -229,8 +229,14 @@ func defaultCacheBehaviorHash(v interface{}) int {
}
}
if d, ok := m["lambda_function_association"]; ok {
- s := d.(*schema.Set)
- for _, lfa := range s.List() {
+ var associations []interface{}
+ switch d.(type) {
+ case *schema.Set:
+ associations = d.(*schema.Set).List()
+ default:
+ associations = d.([]interface{})
+ }
+ for _, lfa := range associations {
buf.WriteString(fmt.Sprintf("%d-", lambdaFunctionAssociationHash(lfa.(map[string]interface{}))))
}
}
@@ -367,8 +373,14 @@ func cacheBehaviorHash(v interface{}) int {
buf.WriteString(fmt.Sprintf("%s-", d))
}
if d, ok := m["lambda_function_association"]; ok {
- s := d.(*schema.Set)
- for _, lfa := range s.List() {
+ var associations []interface{}
+ switch d.(type) {
+ case *schema.Set:
+ associations = d.(*schema.Set).List()
+ default:
+ associations = d.([]interface{})
+ }
+ for _, lfa := range associations {
buf.WriteString(fmt.Sprintf("%d-", lambdaFunctionAssociationHash(lfa.(map[string]interface{}))))
}
} | fix issue where, depending on a creation of flattening, the lambda functions may be a set or an []interface{} | hashicorp_terraform | train |
3047101b4bcd55bbee0295b204e32e370b65b45c | diff --git a/test/conftest.py b/test/conftest.py
index <HASH>..<HASH> 100644
--- a/test/conftest.py
+++ b/test/conftest.py
@@ -204,7 +204,10 @@ def socket(request):
signal.alarm(5)
def end_socket():
- socket.close(request.node.rep_call.failed)
+ failed = False
+ if hasattr(request.node, 'rep_call'):
+ failed = request.node.rep_call.failed
+ socket.close(failed)
signal.alarm(0)
request.addfinalizer(end_socket)
diff --git a/test/test_no_error.py b/test/test_no_error.py
index <HASH>..<HASH> 100644
--- a/test/test_no_error.py
+++ b/test/test_no_error.py
@@ -2,12 +2,6 @@
from .conftest import use
-@use('error_ignored_in_script.py')
-def test_with_error_ignored(socket):
- # If it doesn't timeout this is good
- socket.join()
-
-
@use('error_not_ignored_in_script.py')
def test_with_error_not_ignored_because_of_full(socket):
socket.start() | Now we need a connection even though there's no interaction | Kozea_wdb | train |
b77dce65d5b28ca261002a64e477307193409132 | diff --git a/lib/mango/dependencies.rb b/lib/mango/dependencies.rb
index <HASH>..<HASH> 100644
--- a/lib/mango/dependencies.rb
+++ b/lib/mango/dependencies.rb
@@ -9,9 +9,9 @@ module Mango
# is not always a better version.
#
# `Mango::Dependencies` automatically enforces a strict parse-time check for the
- # `REQUIRED_RUBY_VERSION` on both application and development processes for the `Mango` library.
- # (i.e. `bin/mango`, `rake`, `spec`, etc) Because of this, I've ensured this file is
- # syntactically compatible with Ruby 1.8.6 or higher.
+ # `SUPPORTED_RUBY_VERSIONS` on both application and development processes for the `Mango`
+ # library. (i.e. `bin/mango`, `rake`, `spec`, `rackup`, etc) Because of this, I've ensured this
+ # file is syntactically compatible with Ruby 1.8.7 or higher.
#
# Currently, `Mango` does **not** enforce strict parse-time version checking on `DEVELOPMENT_GEMS`.
# In the future, I would like to experiment with using RubyGems and the `Kernel#gem` method to
@@ -36,8 +36,7 @@ module Mango
# @see Mango::Dependencies.create_warning_for
# @see Mango::Dependencies.warn_at_exit
class Dependencies
- # For now, starting with Ruby 1.9.1 but I would like to experiment with compatibility with Ruby >= 1.9.1 in the future.
- REQUIRED_RUBY_VERSIONS = ["1.9.1", "1.9.2"]
+ SUPPORTED_RUBY_VERSIONS = ["1.9.1", "1.9.2"]
# bluecloth is a hidden yard dependency for markdown support
DEVELOPMENT_GEMS = {
@@ -49,9 +48,9 @@ module Mango
}
FILE_NAME_TO_GEM_NAME = {
- :"rack/test" => :"rack-test",
- :"spec/rake/spectask" => :rspec,
- :"yard/sinatra" => :"yard-sinatra"
+ :"rack/test" => :"rack-test",
+ :"rspec/core/rake_task" => :rspec,
+ :"yard/sinatra" => :"yard-sinatra"
}
# Empties the warnings cache. This method is called when the class is required.
@@ -107,15 +106,15 @@ module Mango
private
# Checks that the version of the current Ruby process matches the one of the
- # `REQUIRED_RUBY_VERSIONS`. This method is automatically invoked at the first time this class
+ # `SUPPORTED_RUBY_VERSIONS`. This method is automatically invoked at the first time this class
# is required, ensuring the correct Ruby version at parse-time.
#
# @param [String] ruby_version Useful for automated specifications. Defaults to `RUBY_VERSION`.
# @raise [SystemExit] Raised, with a message, when the process is using an incorrect version of Ruby.
def self.check_ruby_version(ruby_version = RUBY_VERSION)
- unless REQUIRED_RUBY_VERSIONS.include?(ruby_version)
+ unless SUPPORTED_RUBY_VERSIONS.include?(ruby_version)
abort <<-ERROR
-This library requires Ruby #{REQUIRED_RUBY_VERSIONS.join(" or ")}, but you're using #{ruby_version}.
+This library requires Ruby #{SUPPORTED_RUBY_VERSIONS.join(" or ")}, but you're using #{ruby_version}.
Please visit http://www.ruby-lang.org/ or http://rvm.beginrescueend.com/ for installation instructions.
ERROR
end
diff --git a/spec/mango/dependencies_spec.rb b/spec/mango/dependencies_spec.rb
index <HASH>..<HASH> 100644
--- a/spec/mango/dependencies_spec.rb
+++ b/spec/mango/dependencies_spec.rb
@@ -6,8 +6,8 @@ describe Mango::Dependencies do
#################################################################################################
describe "class constant and variable defaults" do
- it "required ruby version should be correct" do
- Mango::Dependencies::REQUIRED_RUBY_VERSIONS.should == ["1.9.1", "1.9.2"]
+ it "supports ruby 1.9.1 or 1.9.2" do
+ Mango::Dependencies::SUPPORTED_RUBY_VERSIONS.should == ["1.9.1", "1.9.2"]
end
it "development gem names and versions should be correct" do
@@ -24,9 +24,9 @@ describe Mango::Dependencies do
it "file name to gem name look-up table should be correct" do
expected = {
- :"rack/test" => :"rack-test",
- :"spec/rake/spectask" => :rspec,
- :"yard/sinatra" => :"yard-sinatra"
+ :"rack/test" => :"rack-test",
+ :"rspec/core/rake_task" => :rspec,
+ :"yard/sinatra" => :"yard-sinatra"
}
Mango::Dependencies::FILE_NAME_TO_GEM_NAME.should == expected
end
@@ -156,7 +156,7 @@ Please visit http://www.ruby-lang.org/ or http://rvm.beginrescueend.com/ for ins
end
it "displays a warning message to the user on the standard output channel" do
- Mango::Dependencies.create_warning_for(LoadError.new("no such file to load -- spec/rake/spectask"))
+ Mango::Dependencies.create_warning_for(LoadError.new("no such file to load -- rspec/core/rake_task"))
Mango::Dependencies.create_warning_for(LoadError.new("no such file to load -- yard"))
Mango::Dependencies.create_warning_for(LoadError.new("no such file to load -- bluecloth"))
Mango::Dependencies.render_warnings | lib/mango/dependencies.rb
* Refactored SUPPORTED_RUBY_VERSIONS to REQUIRED_RUBY_VERSION
* Forgot to refactor :"spec/rake/spectask" to :"rspec/core/rake_task"
spec/mango/dependencies_spec.rb
* Updated spec coverage to reflect these changes | ryansobol_mango | train |
1ac8ab2bf82800d80a8335841a5c83aa6aa3e5b6 | diff --git a/src/main/java/com/blackducksoftware/integration/hub/HubScanJobConfigBuilder.java b/src/main/java/com/blackducksoftware/integration/hub/HubScanJobConfigBuilder.java
index <HASH>..<HASH> 100644
--- a/src/main/java/com/blackducksoftware/integration/hub/HubScanJobConfigBuilder.java
+++ b/src/main/java/com/blackducksoftware/integration/hub/HubScanJobConfigBuilder.java
@@ -52,6 +52,34 @@ public class HubScanJobConfigBuilder {
public void assertValid(final IntLogger logger) throws HubIntegrationException, IOException {
boolean valid = true;
+ if (validateProjectAndVersion(logger)) {
+ valid = false;
+ }
+
+ if (validateScanMemory(logger, DEFAULT_MEMORY_IN_MEGABYTES)) {
+ valid = false;
+ }
+
+ if (validateShouldGenerateRiskReport(logger)) {
+ valid = false;
+ }
+
+ if (validateMaxWaitTimeForRiskReport(logger, DEFAULT_REPORT_WAIT_TIME_IN_MINUTES)) {
+ valid = false;
+ }
+
+ if (validateScanTargetPaths(logger, workingDirectory)) {
+ valid = false;
+ }
+
+ if (!valid) {
+ throw new HubIntegrationException("The configuration is not valid - please check the log for the specific issues.");
+ }
+ }
+
+ public boolean validateProjectAndVersion(final IntLogger logger) throws IOException {
+ boolean valid = true;
+
if (null == projectName && null == version) {
logger.warn("No Project name or Version were found. Any scans run will not be mapped to a Version.");
} else if (null == projectName) {
@@ -62,28 +90,56 @@ public class HubScanJobConfigBuilder {
logger.error("No Version was found.");
}
+ return valid;
+ }
+
+ public boolean validateScanMemory(final IntLogger logger) throws IOException {
+ return validateScanMemory(logger, null);
+ }
+
+ private boolean validateScanMemory(final IntLogger logger, final Integer defaultScanMemory) throws IOException {
+ boolean scanMemoryValid = true;
+
+ if (scanMemory < MINIMUM_MEMORY_IN_MEGABYTES && defaultScanMemory != null) {
+ scanMemory = defaultScanMemory;
+ }
+
if (scanMemory < MINIMUM_MEMORY_IN_MEGABYTES) {
- valid = false;
+ scanMemoryValid = false;
logger.error("The minimum amount of memory for the scan is " + MINIMUM_MEMORY_IN_MEGABYTES + " MB.");
}
+ return scanMemoryValid;
+ }
+
+ public boolean validateShouldGenerateRiskReport(final IntLogger logger) throws IOException {
+ boolean shouldGenerateReportValid = true;
+
if ((null == projectName || null == version) && shouldGenerateRiskReport) {
- valid = false;
+ shouldGenerateReportValid = false;
logger.error("To generate the Risk Report, you need to provide a Project name or version.");
}
- if (shouldGenerateRiskReport && maxWaitTimeForRiskReport <= 0) {
- valid = false;
- logger.error("The maximum wait time for the Risk Report must be greater than 0.");
- }
+ return shouldGenerateReportValid;
+ }
- if (!validateScanTargetPaths(logger, workingDirectory)) {
- valid = false;
- }
+ public boolean validateMaxWaitTimeForRiskReport(final IntLogger logger) throws IOException {
+ return validateMaxWaitTimeForRiskReport(logger, null);
+ }
- if (!valid) {
- throw new HubIntegrationException("The configuration is not valid - please check the log for the specific issues.");
+ private boolean validateMaxWaitTimeForRiskReport(final IntLogger logger, final Integer defaultMaxWaitTime) throws IOException {
+ boolean waitTimeValid = true;
+
+ if (shouldGenerateRiskReport && maxWaitTimeForRiskReport <= 0) {
+ if (defaultMaxWaitTime != null) {
+ maxWaitTimeForRiskReport = defaultMaxWaitTime;
+ } else {
+ waitTimeValid = false;
+ logger.error("The maximum wait time for the Risk Report must be greater than 0.");
+ }
}
+
+ return waitTimeValid;
}
/** | IHCIC-<I> Udpating the validation | blackducksoftware_blackduck-common | train |
316b2665286d83a1e19b32bead5638bc6aaadd5a | diff --git a/Storage.py b/Storage.py
index <HASH>..<HASH> 100644
--- a/Storage.py
+++ b/Storage.py
@@ -890,7 +890,7 @@ def db_write_data(c, workspace_dir, parent_uuid, key, data, data_file_path, data
assert data is not None
data_directory = os.path.join(workspace_dir, "Nion Swift Data")
absolute_file_path = os.path.join(workspace_dir, "Nion Swift Data", data_file_path)
- logging.debug("WRITE data file %s for %s", absolute_file_path, key)
+ #logging.debug("WRITE data file %s for %s", absolute_file_path, key)
db_make_directory_if_needed(os.path.dirname(absolute_file_path))
pickle.dump(data, open(absolute_file_path, "wb"), pickle.HIGHEST_PROTOCOL)
# convert to utc time. this is temporary until datetime is cleaned up (again) and we can get utc directly from datetime.
@@ -913,7 +913,7 @@ def db_get_data(c, workspace_dir, parent_uuid, key, default_value=None):
if data_row:
data_file_path = data_row[0]
absolute_file_path = os.path.join(workspace_dir, "Nion Swift Data", data_file_path)
- logging.debug("READ data file %s for %s", absolute_file_path, key)
+ #logging.debug("READ data file %s for %s", absolute_file_path, key)
if os.path.isfile(absolute_file_path):
data = pickle.load(open(absolute_file_path, "rb"))
return data if data is not None else default_value
@@ -1097,7 +1097,7 @@ class DbDatastore(object):
data_file_path = row[0]
data_directory = os.path.join(self.workspace_dir, "Nion Swift Data")
absolute_file_path = os.path.join(self.workspace_dir, "Nion Swift Data", data_file_path)
- logging.debug("DELETE data file %s", absolute_file_path)
+ #logging.debug("DELETE data file %s", absolute_file_path)
if os.path.isfile(absolute_file_path):
os.remove(absolute_file_path)
self.execute(c, "DELETE FROM data WHERE uuid = ?", (str(uuid_), )) | Disable file read/write log messages.
svn r<I> | nion-software_nionswift | train |
41f30a8fc46a564039923cacd4fdcd107c9498d4 | diff --git a/ui/src/directives/TouchPan.js b/ui/src/directives/TouchPan.js
index <HASH>..<HASH> 100644
--- a/ui/src/directives/TouchPan.js
+++ b/ui/src/directives/TouchPan.js
@@ -67,7 +67,7 @@ function getChanges (evt, ctx, isFinal) {
let synthetic = false
- if (dir === void 0 && isFinal !== true) {
+ if (dir === void 0 && isFinal === false) {
if (ctx.event.isFirst === true || ctx.event.lastDir === void 0) {
return {}
} | fix(TouchPan): Return event with payload when calling getChanges in end handler for synthetic isFirst event (#<I>) | quasarframework_quasar | train |
6d825df2b075a37cb26b8d226a84f92d5bbe0e09 | diff --git a/platform.js b/platform.js
index <HASH>..<HASH> 100644
--- a/platform.js
+++ b/platform.js
@@ -389,6 +389,7 @@
'Waterfox',
'WebPositive',
{ 'label': 'Yandex Browser', 'pattern': 'YaBrowser' },
+ { 'label': 'UC Browser', 'pattern': 'UCBrowser' },
'Opera Mini',
{ 'label': 'Opera Mini', 'pattern': 'OPiOS' },
'Opera',
@@ -673,6 +674,10 @@
description.unshift('accelerated');
}
}
+ // Detect UC Browser speed mode.
+ else if (name == 'UC Browser' && /\bUCWEB\b/.test(ua)) {
+ description.push('speed mode');
+ }
// Detect PaleMoon identifying as Firefox.
else if (name == 'PaleMoon' && (data = /\bFirefox\/([\d.]+)\b/.exec(ua))) {
description.push('identifying as Firefox ' + data[1]);
@@ -702,7 +707,7 @@
// Detect non-Opera (Presto-based) versions (order is important).
if (!version) {
version = getVersion([
- '(?:Cloud9|CriOS|CrMo|Edge|Edg|EdgA|EdgiOS|FxiOS|HeadlessChrome|IEMobile|Iron|Opera ?Mini|OPiOS|OPR|Raven|SamsungBrowser|Silk(?!/[\\d.]+$)|YaBrowser)',
+ '(?:Cloud9|CriOS|CrMo|Edge|Edg|EdgA|EdgiOS|FxiOS|HeadlessChrome|IEMobile|Iron|Opera ?Mini|OPiOS|OPR|Raven|SamsungBrowser|Silk(?!/[\\d.]+$)|UCBrowser|YaBrowser)',
'Version',
qualify(name),
'(?:Firefox|Minefield|NetFront)'
diff --git a/test/test.js b/test/test.js
index <HASH>..<HASH> 100644
--- a/test/test.js
+++ b/test/test.js
@@ -2612,6 +2612,43 @@
'version': '3.0.10#{alpha}'
},
+ 'UC Browser 11.3.0.950 (like Chrome 12.x) on Android 5.0':{
+ 'ua': 'Mozilla/5.0 (Linux; U; Android 5.0; en-US; SM-N900 Build/LRX21V) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 UCBrowser/11.3.0.950 U3/0.8.0 Mobile Safari/534.30',
+ 'layout': 'WebKit',
+ 'likeChrome': true,
+ 'name': 'UC Browser',
+ 'os': 'Android 5.0',
+ 'version': '11.3.0.950'
+ },
+
+ 'UC Browser 11.3.5.1203 (like Safari 7.x) on Apple iPhone (iOS 12.4.7)':{
+ 'ua': 'Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_7 like Mac OS X; ru) AppleWebKit/537.51.1 (KHTML, like Gecko) Mobile/16G192 UCBrowser/11.3.5.1203 Mobile',
+ 'layout': 'WebKit',
+ 'manufacturer': 'Apple',
+ 'name': 'UC Browser',
+ 'os': 'iOS 12.4.7',
+ 'product': 'iPhone',
+ 'version': '11.3.5.1203'
+ },
+
+ 'UC Browser 13.2.0.1296 (like Chrome 57.0.2987.108) on Android 10':{
+ 'ua': 'Mozilla/5.0 (Linux; U; Android 10; ar-SA; SM-N975F Build/QP1A.190711.020) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.108 UCBrowser/13.2.0.1296 Mobile Safari/537.36',
+ 'layout': 'Blink',
+ 'likeChrome': true,
+ 'name': 'UC Browser',
+ 'os': 'Android 10',
+ 'version': '13.2.0.1296'
+ },
+
+ 'UC Browser 13.2.0.1296 (speed mode; like Chrome 12.x) on Android 9':{
+ 'ua': 'Mozilla/5.0 (Linux; U; Android 9; en-US; Redmi Note 5 Pro Build/PKQ1.180904.001) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 UCBrowser/13.2.0.1296 (SpeedMode) U4/1.0 UCWEB/2.0 Mobile Safari/534.30',
+ 'layout': 'WebKit',
+ 'likeChrome': true,
+ 'name': 'UC Browser',
+ 'os': 'Android 9',
+ 'version': '13.2.0.1296'
+ },
+
'Vivaldi 3.1.4 (like Chrome 81.0.3298.14) on Linux 64-bit': {
'ua': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.3298.14 Safari/537.36 Vivaldi/3.1.4',
'layout': 'Blink', | Add support for UC Browser.
Closes #<I>. | bestiejs_platform.js | train |
5dad1fc09434b5ac99b39ca1fa231688b17fb459 | diff --git a/cpenv/resolver.py b/cpenv/resolver.py
index <HASH>..<HASH> 100644
--- a/cpenv/resolver.py
+++ b/cpenv/resolver.py
@@ -5,7 +5,7 @@ import os
# Local imports
from . import utils
-from .repos import Repo, LocalRepo
+from .repos import LocalRepo
from .module import Module, is_module, is_exact_match, best_match
@@ -104,14 +104,8 @@ class Copier(object):
def __init__(self, to_repo):
from .api import get_repo
-
self.to_repo = get_repo(to_repo)
- if not isinstance(self.to_repo, LocalRepo):
- raise ValueError(
- 'Copier expected LocalRepo got %s' % type(to_repo)
- )
-
def copy(self, module_specs, overwrite=False):
'''Given ModuleSpecs, copy them to this copiers to_repo.'''
from .api import get_cache_path | fix: copier failed when to_repo was not a LocalRepo | cpenv_cpenv | train |
e060fbf9376ae609246f1feaeefbbe398defcdd2 | diff --git a/lib/elm-test.js b/lib/elm-test.js
index <HASH>..<HASH> 100644
--- a/lib/elm-test.js
+++ b/lib/elm-test.js
@@ -496,6 +496,11 @@ function generatePackageJson(filePathArgs) {
}
function ensurePackagesInstalled(dir) {
+ if (args["skip-install"]) {
+ infoLog("Warning: Skipping missing packages check.");
+ return true;
+ }
+
// We need to install missing packages here.
var cmd = [pathToElmPackage, "install", "--yes"].join(" ");
var processOpts = processOptsForReporter(report); | add --skip-install flag
I often code on the bus or places without internet. Because of the missing packages check, I can't run my tests in these situations.
This change makes it possible to opt out of the package check so I can run my tests again without wifi. | rtfeldman_node-test-runner | train |
96d12449bc0d52ea8e9ff7a7e21b3a927a1dd14b | diff --git a/syntax/parser_test.go b/syntax/parser_test.go
index <HASH>..<HASH> 100644
--- a/syntax/parser_test.go
+++ b/syntax/parser_test.go
@@ -64,6 +64,7 @@ var bashParamExp = regexp.MustCompile(`\${[^}]*[,^:]`)
func confirmParse(in string, posix, fail bool) func(*testing.T) {
return func(t *testing.T) {
+ t.Parallel()
var opts []string
if posix {
if strings.HasPrefix(in, "function") { | syntax: run bash confirmation tests in parallel
Calling bash is really slow, so parallelize it. Brings down `go test`
total time on my machine from ~2s to ~1s. | mvdan_sh | train |
7738c5009e09776c0a2f32c5797f6839a65b4834 | diff --git a/molgenis-jobs/src/main/java/org/molgenis/data/jobs/Job.java b/molgenis-jobs/src/main/java/org/molgenis/data/jobs/Job.java
index <HASH>..<HASH> 100644
--- a/molgenis-jobs/src/main/java/org/molgenis/data/jobs/Job.java
+++ b/molgenis-jobs/src/main/java/org/molgenis/data/jobs/Job.java
@@ -31,18 +31,22 @@ public abstract class Job<Result> implements Callable<Result>
@Override
public Result call()
{
+ if (transactionTemplate != null)
+ {
+ return transactionTemplate.execute((status) -> doCallInTransaction());
+ }
+ else
+ {
+ return doCallInTransaction();
+ }
+ }
+
+ private Result doCallInTransaction()
+ {
progress.start();
try
{
- Result result;
- if (transactionTemplate != null)
- {
- result = transactionTemplate.execute((status) -> tryRunWithAuthentication());
- }
- else
- {
- result = tryRunWithAuthentication();
- }
+ Result result = tryRunWithAuthentication();
progress.success();
return result;
} | Fix #<I> Permissions lost when doing something transactional in the afterCommit handler of a Job | molgenis_molgenis | train |
222aa9af8ecc81181dcb310164399027e7bc3fa9 | diff --git a/lib/plugin.js b/lib/plugin.js
index <HASH>..<HASH> 100644
--- a/lib/plugin.js
+++ b/lib/plugin.js
@@ -342,7 +342,7 @@ exports.register = function (server, opts, next) {
if(settings.relsTemplate) {
reply.view('rel', { rel: rel, relData: marked(data.toString()) });
} else {
- reply(marked(data.toString()))
+ reply(marked(data.toString()));
}
});
} else {
@@ -389,11 +389,13 @@ exports.register = function (server, opts, next) {
link = value;
});
- if (relativeTo && internals.isRelativePath(link.href)) {
+ if (relativeTo && (internals.isRelativePath(link.href) || settings.absolute )) {
link.href = new URI(link.href).absoluteTo(relativeTo + '/')
.toString();
}
+// if (relativeTo) link.href = new URI(link.href).absoluteTo(relativeTo + '/').toString();
+
return link;
};
@@ -856,7 +858,7 @@ exports.register = function (server, opts, next) {
request.response.source = rep.toJSON();
request.response.type(mediaType);
- reply.continue()
+ reply.continue();
});
} else {
reply.continue();
diff --git a/test/plugin-spec.js b/test/plugin-spec.js
index <HASH>..<HASH> 100644
--- a/test/plugin-spec.js
+++ b/test/plugin-spec.js
@@ -1475,6 +1475,48 @@ describe('Halacious Plugin', function () {
done();
});
});
+
+ it('should make configured links absolute', function(done) {
+ var server = new hapi.Server();
+ server.connection({ port: 9090 });
+
+ server.route({
+ method: 'post',
+ path: '/api/people',
+ config: {
+ handler: function (req, reply) {
+ reply({ firstName: 'Bob', lastName: 'Smith' });
+ },
+ plugins: {
+ hal: {
+ absolute: true,
+ prepare: function(rep, done) {
+ rep.link('mco:boss', '/api/people/101');
+ done();
+ }
+ }
+ }
+ }
+ });
+
+ server.register({
+ register: halacious,
+ options: { mediaTypes: ['application/json', 'application/hal+json'], absolute: true }
+
+ }, function (err) {
+ if (err) return done(err);
+ });
+
+ server.inject({
+ method: 'post',
+ url: 'http://localhost:9090/api/people',
+ headers: { Accept: 'application/hal+json' }
+ }, function (res) {
+ var result = JSON.parse(res.payload);
+ result.should.have.property('_links').that.has.property('mco:boss').that.has.property('href', 'http://localhost:9090/api/people/101')
+ done();
+ });
+ });
});
it('should support resolving embedded hrefs by ids', function (done) { | absolute: true ignored when _links were resolved | bleupen_halacious | train |
3d9f98d7ff8d02c433e1c7e726dd34e44b12b20e | diff --git a/bin/copy-assets.js b/bin/copy-assets.js
index <HASH>..<HASH> 100644
--- a/bin/copy-assets.js
+++ b/bin/copy-assets.js
@@ -90,8 +90,7 @@ function copySVGs({ projectPath, mcPath }) {
usedSvgs.push(...searchText(fs.readFileSync(file, "utf-8"), svgTest))
);
- const files = fs
- .readdirSync(projectImagesPath)
+ const files = fs.readdirSync(projectImagesPath)
.filter(file => file.match(/svg$/))
.filter(file => usedSvgs.includes(file));
@@ -109,6 +108,24 @@ function copySVGs({ projectPath, mcPath }) {
const mozBuildPath = path.join(mcPath, "devtools/client/debugger/new/images/moz.build");
fs.writeFileSync(mozBuildPath, mozBuildText, "utf-8");
+
+ const sourceFiles = fs.readdirSync(path.join(projectImagesPath, "sources"))
+ .filter(file => file.match(/svg$/))
+ .filter(file => usedSvgs.some(svg => svg.includes(file)));
+
+ sourceFiles.forEach(file =>
+ fsExtra.copySync(
+ path.join(path.join(projectImagesPath, "sources"), file),
+ path.join(path.join(mcImagesPath, "sources"), `${file}`)
+ )
+ );
+ const mozBuildSourceText = moz_build_tpl
+ .replace('__FILES__',sourceFiles.map(f => ` '${f}',`).join("\n"))
+
+ const mozBuildSourcePath = path.join(mcPath, "devtools/client/debugger/new/images/sources/moz.build");
+ fs.writeFileSync(mozBuildSourcePath, mozBuildSourceText, "utf-8");
+
+
console.log("[copy-assets] - Svg.js");
copyFile(
path.join(projectPath, "/images/Svg.js"),
diff --git a/src/components/shared/Accordion.css b/src/components/shared/Accordion.css
index <HASH>..<HASH> 100644
--- a/src/components/shared/Accordion.css
+++ b/src/components/shared/Accordion.css
@@ -4,10 +4,12 @@
:root {
--accordion-header-background: var(--theme-toolbar-background);
+ --disclosure-arrow: #b2b2b2;
}
:root.theme-dark {
--accordion-header-background: #222225;
+ --disclosure-arrow: #7f7f81;
}
.accordion {
@@ -23,31 +25,45 @@
border-bottom: 1px solid var(--theme-splitter-color);
display: flex;
font-size: 12px;
- padding: 4px 8px;
+ padding: 4px;
+ transition: all 0.25s ease;
width: 100%;
- /* 24px for content + 1px for the bottom border */
- height: 25px;
+ height: 24px;
align-items: center;
margin: 0px;
font-weight: normal;
+
+ -webkit-user-select: none;
-moz-user-select: none;
+ -ms-user-select: none;
+ -o-user-select: none;
user-select: none;
cursor: default;
}
-.accordion ._header:hover,
-.accordion ._header:focus {
+.accordion ._header:hover {
background-color: var(--theme-toolbar-background-hover);
}
-.accordion ._header .arrow {
- margin-inline-start: -3px;
- margin-inline-end: 5px;
+.accordion ._header button svg,
+.accordion ._header:hover button svg {
+ fill: currentColor;
+ height: 16px;
+}
+
+.accordion ._content {
+ border-bottom: 1px solid var(--theme-splitter-color);
+ font-size: 12px;
+}
+
+.accordion div:last-child ._content {
+ border-bottom: none;
}
.accordion ._header .header-buttons {
display: flex;
- margin-inline-start: auto;
+ margin-left: auto;
+ padding-right: 5px;
}
.accordion .header-buttons .add-button {
@@ -69,16 +85,6 @@
border: none;
}
-/* vertical-align:middle results in slightly wrong alignment */
-.accordion .header-buttons .img {
- display: block;
-}
-
-.accordion ._content {
- border-bottom: 1px solid var(--theme-splitter-color);
- font-size: 12px;
-}
-
-.accordion div:last-child ._content {
- border-bottom: none;
+.accordion .arrow svg {
+ fill: var(--disclosure-arrow);
}
diff --git a/test/mochitest/browser_dbg-breakpoints-cond.js b/test/mochitest/browser_dbg-breakpoints-cond.js
index <HASH>..<HASH> 100644
--- a/test/mochitest/browser_dbg-breakpoints-cond.js
+++ b/test/mochitest/browser_dbg-breakpoints-cond.js
@@ -165,6 +165,7 @@ add_task(async function() {
await setLogPoint(dbg, 5, "44");
await waitForBreakpointWithLog(dbg, "simple2", 5);
assertEditorBreakpoint(dbg, 5, { hasLog: true });
+
bp = findBreakpoint(dbg, "simple2", 5);
is(bp.options.logValue, "44", "breakpoint condition removed");
}); | Sync commits from release <I> (#<I>)
* [releases] copy source images
* sync changes relase <I> (2) | firefox-devtools_debugger | train |
719e75d554f9b7596e2097c6aa18ccef1c02495b | diff --git a/library/src/com/actionbarsherlock/internal/ActionBarSherlockCompat.java b/library/src/com/actionbarsherlock/internal/ActionBarSherlockCompat.java
index <HASH>..<HASH> 100644
--- a/library/src/com/actionbarsherlock/internal/ActionBarSherlockCompat.java
+++ b/library/src/com/actionbarsherlock/internal/ActionBarSherlockCompat.java
@@ -1025,6 +1025,7 @@ public class ActionBarSherlockCompat extends ActionBarSherlock implements MenuBu
layoutResource = R.layout.abs__screen_simple;
}
+ if (DEBUG) Log.d(TAG, "[generateLayout] using screen XML " + mActivity.getResources().getString(layoutResource));
View in = mActivity.getLayoutInflater().inflate(layoutResource, null);
mDecor.addView(in, new ViewGroup.LayoutParams(MATCH_PARENT, MATCH_PARENT)); | Log layout being used when debugging. | JakeWharton_ActionBarSherlock | train |
f632d8c33840a62d94ab402d99b49cfdf947cb86 | diff --git a/fs/btrfs.go b/fs/btrfs.go
index <HASH>..<HASH> 100644
--- a/fs/btrfs.go
+++ b/fs/btrfs.go
@@ -42,7 +42,7 @@ func (fs *BtrfsFilesystem) Create(bytes uint64) error {
fi
if file $IMAGE_PATH | grep -v BTRFS; then
- mkfs.btrfs $lo
+ mkfs.btrfs --nodiscard $lo
fi
mkdir -p $MOUNT_PATH | do not perform TRIM on mkfs | concourse_baggageclaim | train |
d01581c380111974cccf01d343fb61c05434c1f5 | diff --git a/api/ParserService.js b/api/ParserService.js
index <HASH>..<HASH> 100644
--- a/api/ParserService.js
+++ b/api/ParserService.js
@@ -28,6 +28,7 @@ var env = new ParserEnv( {
maxDepth: 40
} );
+
var parserPipelineFactory = new ParserPipelineFactory( env );
//var parser = parserPipelineFactory.makePipeline( 'text/x-mediawiki/full' );
@@ -126,6 +127,36 @@ app.post(/\/_wikitext\/(.*)/, function(req, res){
});
/**
+ * Perform char-based diff on a line-based diff. The char-based algorithm is
+ * practically unusable for inputs > 5k bytes, so we only perform it on the
+ * output of the more efficient line-based diff.
+ */
+var refineDiff = function( diff ) {
+ var added = null,
+ out = [];
+ for ( var i = 0, l = diff.length; i < l; i++ ) {
+ var d = diff[i];
+ if ( d.added ) {
+ added = d;
+ } else if ( d.removed ) {
+ if ( added ) {
+ var fineDiff = jsDiff.diffChars( d.value, added.value );
+ out.push.apply( out, fineDiff );
+ added = null;
+ }
+ } else if ( added ) {
+ out.push( added );
+ added = null;
+ out.push(d);
+ }
+ }
+ if ( added ) {
+ out.push(added);
+ }
+ return out;
+};
+
+/**
* Round-trip article testing
*/
app.get(/\/_roundtrip\/(.*)/, function(req, res){
@@ -151,7 +182,13 @@ app.get(/\/_roundtrip\/(.*)/, function(req, res){
var out = new WikitextSerializer({env: env}).serializeDOM( document.body );
res.write('<pre>' + htmlSpecialChars( out ) + '</pre><hr>');
res.write( '<h2>Diff between original Wikitext (green) and round-tripped wikitext (red)</h2><hr>' );
- var patch = jsDiff.convertChangesToXML( jsDiff.diffLines( out, src ) );
+ var patch;
+ if ( src.length < 4000 ) {
+ // Use word-based diff for small articles
+ patch = jsDiff.convertChangesToXML( jsDiff.diffChars( out, src ) );
+ } else {
+ patch = jsDiff.convertChangesToXML( refineDiff( jsDiff.diffLines( out, src ) ) );
+ }
res.end( '<pre>' + patch);
});
try { | Create a 'refinement diff' algorithm
The word or char-based algorithm does not scale well beyond 5k chars or so. We
now perform a line-based diff and then continue to diff the line differences
using the char-based algorithm. This gives a char-based diff even for bigger
inputs.
Change-Id: Iec<I>ca<I>e4df<I>ba<I>c<I>e7ff5cfe<I> | wikimedia_parsoid | train |
990803267198df45e14f3bfbaaaaf938140e3dfe | diff --git a/go/vt/vtgate/vtgate.go b/go/vt/vtgate/vtgate.go
index <HASH>..<HASH> 100644
--- a/go/vt/vtgate/vtgate.go
+++ b/go/vt/vtgate/vtgate.go
@@ -56,6 +56,7 @@ import (
var (
transactionMode = flag.String("transaction_mode", "MULTI", "SINGLE: disallow multi-db transactions, MULTI: allow multi-db transactions with best effort commit, TWOPC: allow multi-db transactions with 2pc commit")
normalizeQueries = flag.Bool("normalize_queries", true, "Rewrite queries with bind vars. Turn this off if the app itself sends normalized queries with bind vars.")
+ terseErrors = flag.Bool("vtgate-config-terse-errors", false, "prevent bind vars from escaping in returned errors")
streamBufferSize = flag.Int("stream_buffer_size", 32*1024, "the number of bytes sent from vtgate for each stream call. It's recommended to keep this value in sync with vttablet's query-server-config-stream-buffer-size.")
queryPlanCacheSize = flag.Int64("gate_query_cache_size", 10000, "gate server query cache size, maximum number of queries to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache.")
legacyAutocommit = flag.Bool("legacy_autocommit", false, "DEPRECATED: set this flag to true to get the legacy behavior: all transactions will need an explicit begin, and DMLs outside transactions will return an error.")
@@ -1039,6 +1040,10 @@ func (vtg *VTGate) VSchemaStats() *VSchemaStats {
func truncateErrorStrings(data map[string]interface{}) map[string]interface{} {
ret := map[string]interface{}{}
+ if *terseErrors {
+ // request might have PII information. Return an empty map
+ return ret
+ }
for key, val := range data {
mapVal, ok := val.(map[string]interface{})
if ok { | Strip bindVars from response when terse errors is on | vitessio_vitess | train |
c75e0adcfa6ed3dfa99e8ecc36d8fe6c784aff1e | diff --git a/lib/sup/thread.rb b/lib/sup/thread.rb
index <HASH>..<HASH> 100644
--- a/lib/sup/thread.rb
+++ b/lib/sup/thread.rb
@@ -357,7 +357,7 @@ class ThreadSet
return if threads.size < 2
containers = threads.map do |t|
- c = @messages.member?(c) ? @messages[t.first.id] : nil
+ c = @messages.member?(t.first.id) ? @messages[t.first.id] : nil
raise "not in threadset: #{t.first.id}" unless c && c.message
c
end | fix a thread merging bug introduced by refactoring in <I>f8fc2 | sup-heliotrope_sup | train |
d37a4bb1c97694520450e11947fde00c8c778527 | diff --git a/src/Parsers/Login/OneEight.php b/src/Parsers/Login/OneEight.php
index <HASH>..<HASH> 100644
--- a/src/Parsers/Login/OneEight.php
+++ b/src/Parsers/Login/OneEight.php
@@ -13,6 +13,8 @@ class OneEight extends OneX
@list($name, $value) = explode("=", $line, 2);
}
+ $value = trim($value);
+
if ($name == 'Info') {
if ($value) {
// break it up on the 2 required parts | Space at the beginning of capability value can cause malformed URL. Fixes #<I> (#<I>) | troydavisson_PHRETS | train |
ff90a2f81ea53563f0be844a3babbb36699a501e | diff --git a/test/mocha/each.js b/test/mocha/each.js
index <HASH>..<HASH> 100644
--- a/test/mocha/each.js
+++ b/test/mocha/each.js
@@ -4,9 +4,6 @@ var assert = require("assert");
var adapter = require("../../js/debug/bluebird.js");
var Promise = adapter;
-var fulfilled = adapter.fulfilled;
-var rejected = adapter.rejected;
-var pending = adapter.pending;
function promised(val) {
return new Promise(function(f) {
@@ -29,75 +26,68 @@ function thenabled(val, arr) {
describe("Promise.prototype.each", function() {
-
- it("should return the array's values", function(done) {
+ it("should return the array's values", function() {
var a = [promised(1), promised(2), promised(3)];
var b = [];
- Promise.each(a, function(val) {
+ return Promise.each(a, function(val) {
b.push(3-val);
return val;
}).then(function(ret) {
assert.deepEqual(ret, [1,2,3]);
assert.deepEqual(b, [2, 1, 0]);
- done();
});
});
- it("takes value, index and length", function(done) {
+ it("takes value, index and length", function() {
var a = [promised(1), promised(2), promised(3)];
var b = [];
- Promise.each(a, function(value, index, length) {
+ return Promise.each(a, function(value, index, length) {
b.push(value, index, length);
}).then(function(ret) {
assert.deepEqual(b, [1, 0, 3, 2, 1, 3, 3, 2, 3]);
- done();
});
});
- it("waits for returned promise before proceeding next", function(done) {
+ it("waits for returned promise before proceeding next", function() {
var a = [promised(1), promised(2), promised(3)];
var b = [];
- Promise.each(a, function(value) {
+ return Promise.each(a, function(value) {
b.push(value);
return Promise.delay(10).then(function(){
b.push(value*2);
});
}).then(function(ret) {
assert.deepEqual(b, [1,2,2,4,3,6]);
- done();
});
});
- it("waits for returned thenable before proceeding next", function(done) {
+ it("waits for returned thenable before proceeding next", function() {
var b = [1, 2, 3];
var a = [thenabled(1), thenabled(2), thenabled(3)];
- Promise.each(a, function(val) {
+ return Promise.each(a, function(val) {
b.push(val * 50);
return thenabled(val * 500, b);
}).then(function(ret) {
assert.deepEqual(b, [1, 2, 3, 50, 500, 100, 1000, 150, 1500]);
- done();
});
});
- it("doesnt iterate with an empty array", function(done) {
- Promise.each([], function(val) {
+ it("doesnt iterate with an empty array", function() {
+ return Promise.each([], function(val) {
throw new Error();
}).then(function(ret) {
assert.deepEqual(ret, []);
- done();
});
});
- it("iterates with an array of single item", function(done) {
+ it("iterates with an array of single item", function() {
var b = [];
- Promise.each([promised(1)], function(val) {
+ return Promise.each([promised(1)], function(val) {
b.push(val);
return thenabled(val*2, b);
}).then(function(ret) {
assert.deepEqual(b, [1,2]);
- done();
});
});
}); | Convert to promise syntax, remove redundancy
These tests are much better than the older ones Petka! | petkaantonov_bluebird | train |
f6ba03eb4f575a4d238ab3b6ea94937a6a00c8f7 | diff --git a/ta4j/src/main/java/eu/verdelhan/ta4j/indicators/trackers/EMAIndicator.java b/ta4j/src/main/java/eu/verdelhan/ta4j/indicators/trackers/EMAIndicator.java
index <HASH>..<HASH> 100644
--- a/ta4j/src/main/java/eu/verdelhan/ta4j/indicators/trackers/EMAIndicator.java
+++ b/ta4j/src/main/java/eu/verdelhan/ta4j/indicators/trackers/EMAIndicator.java
@@ -36,16 +36,14 @@ public class EMAIndicator extends CachedIndicator<TADecimal> {
private final int timeFrame;
+ private final TADecimal multiplier;
+
public EMAIndicator(Indicator<? extends TADecimal> indicator, int timeFrame) {
this.indicator = indicator;
this.timeFrame = timeFrame;
+ multiplier = TADecimal.TWO.dividedBy(TADecimal.valueOf(timeFrame + 1));
}
- private TADecimal multiplier() {
- return TADecimal.TWO.dividedBy(TADecimal.valueOf(timeFrame + 1));
- }
-
-
@Override
protected TADecimal calculate(int index) {
if (index + 1 < timeFrame) {
@@ -55,7 +53,7 @@ public class EMAIndicator extends CachedIndicator<TADecimal> {
return indicator.getValue(0);
}
TADecimal emaPrev = getValue(index - 1);
- return indicator.getValue(index).minus(emaPrev).multipliedBy(multiplier()).plus(emaPrev);
+ return indicator.getValue(index).minus(emaPrev).multipliedBy(multiplier).plus(emaPrev);
}
@Override
diff --git a/ta4j/src/main/java/eu/verdelhan/ta4j/strategies/NotSoFastStrategy.java b/ta4j/src/main/java/eu/verdelhan/ta4j/strategies/NotSoFastStrategy.java
index <HASH>..<HASH> 100644
--- a/ta4j/src/main/java/eu/verdelhan/ta4j/strategies/NotSoFastStrategy.java
+++ b/ta4j/src/main/java/eu/verdelhan/ta4j/strategies/NotSoFastStrategy.java
@@ -60,7 +60,7 @@ public class NotSoFastStrategy extends AbstractStrategy {
@Override
public boolean shouldExit(int index) {
- return (strategy.shouldExit(index) && ((index - enterTickIndex) > numberOfTicks));
+ return ((index - enterTickIndex) > numberOfTicks) && strategy.shouldExit(index);
}
@Override | Improving performance of an indicator and a strategy | mdeverdelhan_ta4j-origins | train |
2a37afb648554576f862692d86aea61ee314edcd | diff --git a/GVRf/Framework/src/org/gearvrf/GVRTransform.java b/GVRf/Framework/src/org/gearvrf/GVRTransform.java
index <HASH>..<HASH> 100644
--- a/GVRf/Framework/src/org/gearvrf/GVRTransform.java
+++ b/GVRf/Framework/src/org/gearvrf/GVRTransform.java
@@ -396,6 +396,19 @@ public class GVRTransform extends GVRComponent {
NativeTransform.rotateByAxisWithPivot(getNative(), angle, axisX, axisY,
axisZ, pivotX, pivotY, pivotZ);
}
+
+
+ /**
+ * Reset the transform
+ *
+ * This will undo any translations, rotations, or scaling and reset the Transform back to default values. This is the equivilent to setting the Transform to an identity matrix.
+ */
+ public void reset() {
+ setPosition(0, 0, 0);
+ setRotation(1, 0, 0, 0);
+ setScale(1, 1, 1);
+ }
+
}
class NativeTransform { | Adding a reset() to GVRTransform.
Adding a reset() to GVRTransform. This will be the equivilent to
glLoadIdentity(). | Samsung_GearVRf | train |
1fad744611b04d5323de77be45d7bbf26dd5d25e | diff --git a/mpldatacursor/datacursor.py b/mpldatacursor/datacursor.py
index <HASH>..<HASH> 100644
--- a/mpldatacursor/datacursor.py
+++ b/mpldatacursor/datacursor.py
@@ -573,7 +573,7 @@ class DataCursor(object):
# Need to draw the annotation to get the correct extent
try:
anno.draw(fig.canvas.renderer)
- except AttributeError:
+ except (AttributeError, RuntimeError):
# Can't draw properly on OSX and NbAgg backends. Disable keep_inside
return
bbox = anno.get_window_extent() | Add workaround for issue #<I> | joferkington_mpldatacursor | train |
ec2c2802d1756b99521931bef61390dff3871608 | diff --git a/authority/permissions.py b/authority/permissions.py
index <HASH>..<HASH> 100644
--- a/authority/permissions.py
+++ b/authority/permissions.py
@@ -41,6 +41,7 @@ class BasePermission(object):
super(BasePermission, self).__init__(*args, **kwargs)
def has_user_perms(self, perm, obj, approved, check_groups=True):
+ print 'in has_user_perms'
if self.user:
if self.user.is_superuser:
return True
@@ -48,7 +49,7 @@ class BasePermission(object):
return False
# check if a Permission object exists for the given params
return Permission.objects.user_permissions(self.user, perm, obj,
- approved, check_groups).filter(object_id=obj.id)
+ approved, check_groups).filter(object_id=obj.pk)
return False
def has_group_perms(self, perm, obj, approved):
@@ -58,7 +59,7 @@ class BasePermission(object):
if self.group:
perms = Permission.objects.group_permissions(self.group, perm, obj,
approved)
- return perms.filter(object_id=obj.id)
+ return perms.filter(object_id=obj.pk)
return False
def has_perm(self, perm, obj, check_groups=True, approved=True): | Changing object references from object.id to object.pk | jazzband_django-authority | train |
75c18ddd859e8412879d4cdb79a0c348d9267ab3 | diff --git a/governator-core/src/main/java/com/netflix/governator/LifecycleModule.java b/governator-core/src/main/java/com/netflix/governator/LifecycleModule.java
index <HASH>..<HASH> 100644
--- a/governator-core/src/main/java/com/netflix/governator/LifecycleModule.java
+++ b/governator-core/src/main/java/com/netflix/governator/LifecycleModule.java
@@ -5,6 +5,7 @@ import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentLinkedDeque;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicBoolean;
@@ -82,7 +83,7 @@ public final class LifecycleModule extends SingletonModule {
@Singleton
static class LifecycleProvisionListener extends DefaultLifecycleListener implements ProvisionListener {
- private final ConcurrentLinkedQueue<Runnable> shutdownActions = new ConcurrentLinkedQueue<Runnable>();
+ private final ConcurrentLinkedDeque<Runnable> shutdownActions = new ConcurrentLinkedDeque<Runnable>();
private final ConcurrentMap<Class<?>, TypeLifecycleActions> cache = new ConcurrentHashMap<>();
private Set<LifecycleFeature> features;
private final AtomicBoolean isShutdown = new AtomicBoolean();
@@ -152,7 +153,7 @@ public final class LifecycleModule extends SingletonModule {
// Add any PreDestroy methods to the shutdown list of actions
if (!actions.preDestroyActions.isEmpty()) {
if (isShutdown.get() == false) {
- shutdownActions.add(new Runnable() {
+ shutdownActions.addFirst(new Runnable() {
@Override
public void run() {
for (LifecycleAction m : actions.preDestroyActions) {
diff --git a/governator-core/src/test/java/com/netflix/governator/LifecycleModuleTest.java b/governator-core/src/test/java/com/netflix/governator/LifecycleModuleTest.java
index <HASH>..<HASH> 100644
--- a/governator-core/src/test/java/com/netflix/governator/LifecycleModuleTest.java
+++ b/governator-core/src/test/java/com/netflix/governator/LifecycleModuleTest.java
@@ -1,6 +1,7 @@
package com.netflix.governator;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
@@ -32,7 +33,9 @@ public class LifecycleModuleTest {
@Singleton
private static class MySingleton {
private static AtomicInteger initCounter = new AtomicInteger(0);
+ private static AtomicLong initTime = new AtomicLong(0);
private static AtomicInteger shutdownCounter = new AtomicInteger(0);
+ private static AtomicLong shutdownTime = new AtomicLong(0);
@Configuration("foo")
String field1;
@@ -51,11 +54,13 @@ public class LifecycleModuleTest {
void init() {
System.out.println("*****Post constructing");
initCounter.incrementAndGet();
+ initTime.set(System.nanoTime());
}
@PreDestroy
void shutdown() {
shutdownCounter.incrementAndGet();
+ shutdownTime.set(System.nanoTime());
}
}
@@ -67,6 +72,32 @@ public class LifecycleModuleTest {
}
}
+ @Singleton
+ private static class SingletonWithDependency {
+ private static AtomicInteger initCounter = new AtomicInteger(0);
+ private static AtomicLong initTime = new AtomicLong(0);
+ private static AtomicInteger shutdownCounter = new AtomicInteger(0);
+ private static AtomicLong shutdownTime = new AtomicLong(0);
+
+ @Inject
+ public SingletonWithDependency(MySingleton mySingleton) {
+ System.out.println("*****Injecting SingletonWithDependency");
+ }
+
+ @PostConstruct
+ void init() {
+ System.out.println("*****Post constructing SingletonWithDependency");
+ initCounter.incrementAndGet();
+ initTime.set(System.nanoTime());
+ }
+
+ @PreDestroy
+ void shutdown() {
+ shutdownCounter.incrementAndGet();
+ shutdownTime.set(System.nanoTime());
+ }
+ }
+
@BeforeMethod
public void before() {
MySingleton.initCounter.set(0);
@@ -92,6 +123,24 @@ public class LifecycleModuleTest {
injector.shutdown();
Assert.assertEquals(1, singleton.shutdownCounter.get());
}
+
+ @Test
+ public void testOrderWithLifecycle() {
+ LifecycleInjector injector = Governator.createInjector(
+ Stage.DEVELOPMENT);
+ SingletonWithDependency singleton = injector.getInstance(SingletonWithDependency.class);
+ Assert.assertEquals(1, singleton.initCounter.get());
+ Assert.assertEquals(1, MySingleton.initCounter.get());
+ Assert.assertTrue("MySingleton was constructed before SingletonWithDependency",
+ MySingleton.initTime.get() < singleton.initTime.get());
+ Assert.assertEquals(0, singleton.shutdownCounter.get());
+ Assert.assertEquals(0, MySingleton.shutdownCounter.get());
+ injector.shutdown();
+ Assert.assertEquals(1, singleton.shutdownCounter.get());
+ Assert.assertEquals(1, MySingleton.shutdownCounter.get());
+ Assert.assertTrue("SingletonWithDependency was destroyed before MySingleton",
+ MySingleton.shutdownTime.get() > singleton.shutdownTime.get());
+ }
@Test
public void testWithExternalLifecycleManager() { | Invoke pre destroy methods in the reverse order of post construct
- new unit test to reproduce the behaviour and detect potential future regression | Netflix_governator | train |
867837ba1d95618df59f13332f392a3ca98e036a | diff --git a/src/pymlab/sensors/windgauge.py b/src/pymlab/sensors/windgauge.py
index <HASH>..<HASH> 100644
--- a/src/pymlab/sensors/windgauge.py
+++ b/src/pymlab/sensors/windgauge.py
@@ -239,6 +239,26 @@ class WINDGAUGE03A(Device):
return((gyro_x_raw / gyro_sens), (gyro_y_raw / gyro_sens), (gyro_z_raw / gyro_sens))
+ def get_mag_raw(self, *args):
+
+ mag_raw_data = self.read_icm20948_reg_data(self.ICM20948_EXT_SLV_SENS_DATA_00 + 9, 0, 9)
+
+ magX = (mag_raw_data[2] << 8) + mag_raw_data[1]
+ magY = (mag_raw_data[4] << 8) + mag_raw_data[3]
+ magZ = (mag_raw_data[6] << 8) + mag_raw_data[5]
+
+ if magX > 0x7fff:
+ magX -= 65536
+ if magY > 0x7fff:
+ magY -= 65536
+ if magZ > 0x7fff:
+ magZ -= 65536
+
+ mag_scf = 4912/32752.0
+
+ return(magX, magY, magZ)
+
+
def get_mag(self, *args):
offset_x, offset_y, offset_z = 0, 0, 0 | Add new method to read raw magnetometer data. | MLAB-project_pymlab | train |
338cfbb8f19158184868aa15a270e6baa5623954 | diff --git a/lib/javascript.php b/lib/javascript.php
index <HASH>..<HASH> 100644
--- a/lib/javascript.php
+++ b/lib/javascript.php
@@ -9,7 +9,7 @@
echo '<script language="JavaScript" type="text/javascript" src="'.$CFG->wwwroot.'/lib/speller/spellChecker.js"></script>'."\n";
}
?>
-<style type="text/css">/*<![CDATA[*/ body{behavior:url(<?php echo $CFG->wwwroot ?>/lib/csshover.htc);} /*]]>*/</style>
+<!--<style type="text/css">/*<![CDATA[*/ body{behavior:url(<?php echo $CFG->wwwroot ?>/lib/csshover.htc);} /*]]>*/</style>-->
<script language="JavaScript" type="text/javascript" src="<?php echo $CFG->wwwroot ?>/lib/javascript-static.js"></script>
<script language="JavaScript" type="text/javascript" src="<?php echo $CFG->wwwroot ?>/lib/javascript-mod.php"></script> | Commented out hover hack ... see bug <I> | moodle_moodle | train |
efb882721550d3ff76d1eb912da0b0a8446aed75 | diff --git a/cmd/kube-controller-manager/app/autoscaling.go b/cmd/kube-controller-manager/app/autoscaling.go
index <HASH>..<HASH> 100644
--- a/cmd/kube-controller-manager/app/autoscaling.go
+++ b/cmd/kube-controller-manager/app/autoscaling.go
@@ -67,13 +67,12 @@ func startHPAControllerWithLegacyClient(ctx ControllerContext) (bool, error) {
}
func startHPAControllerWithMetricsClient(ctx ControllerContext, metricsClient metrics.MetricsClient) (bool, error) {
- hpaClientGoClient := ctx.ClientBuilder.ClientGoClientOrDie("horizontal-pod-autoscaler")
hpaClient := ctx.ClientBuilder.ClientOrDie("horizontal-pod-autoscaler")
hpaClientConfig := ctx.ClientBuilder.ConfigOrDie("horizontal-pod-autoscaler")
// we don't use cached discovery because DiscoveryScaleKindResolver does its own caching,
// so we want to re-fetch every time when we actually ask for it
- scaleKindResolver := scale.NewDiscoveryScaleKindResolver(hpaClientGoClient.Discovery())
+ scaleKindResolver := scale.NewDiscoveryScaleKindResolver(hpaClient.Discovery())
scaleClient, err := scale.NewForConfig(hpaClientConfig, ctx.RESTMapper, dynamic.LegacyAPIPathResolverFunc, scaleKindResolver)
if err != nil {
return false, err
@@ -85,7 +84,7 @@ func startHPAControllerWithMetricsClient(ctx ControllerContext, metricsClient me
ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerTolerance,
)
go podautoscaler.NewHorizontalController(
- hpaClientGoClient.CoreV1(),
+ hpaClient.CoreV1(),
scaleClient,
hpaClient.AutoscalingV1(),
ctx.RESTMapper,
diff --git a/cmd/kube-controller-manager/app/bootstrap.go b/cmd/kube-controller-manager/app/bootstrap.go
index <HASH>..<HASH> 100644
--- a/cmd/kube-controller-manager/app/bootstrap.go
+++ b/cmd/kube-controller-manager/app/bootstrap.go
@@ -24,7 +24,7 @@ import (
func startBootstrapSignerController(ctx ControllerContext) (bool, error) {
bsc, err := bootstrap.NewBootstrapSigner(
- ctx.ClientBuilder.ClientGoClientOrDie("bootstrap-signer"),
+ ctx.ClientBuilder.ClientOrDie("bootstrap-signer"),
ctx.InformerFactory.Core().V1().Secrets(),
ctx.InformerFactory.Core().V1().ConfigMaps(),
bootstrap.DefaultBootstrapSignerOptions(),
@@ -38,7 +38,7 @@ func startBootstrapSignerController(ctx ControllerContext) (bool, error) {
func startTokenCleanerController(ctx ControllerContext) (bool, error) {
tcc, err := bootstrap.NewTokenCleaner(
- ctx.ClientBuilder.ClientGoClientOrDie("token-cleaner"),
+ ctx.ClientBuilder.ClientOrDie("token-cleaner"),
ctx.InformerFactory.Core().V1().Secrets(),
bootstrap.DefaultTokenCleanerOptions(),
)
diff --git a/pkg/controller/client_builder.go b/pkg/controller/client_builder.go
index <HASH>..<HASH> 100644
--- a/pkg/controller/client_builder.go
+++ b/pkg/controller/client_builder.go
@@ -46,8 +46,6 @@ type ControllerClientBuilder interface {
ConfigOrDie(name string) *restclient.Config
Client(name string) (clientset.Interface, error)
ClientOrDie(name string) clientset.Interface
- ClientGoClient(name string) (clientset.Interface, error)
- ClientGoClientOrDie(name string) clientset.Interface
}
// SimpleControllerClientBuilder returns a fixed client with different user agents
@@ -85,22 +83,6 @@ func (b SimpleControllerClientBuilder) ClientOrDie(name string) clientset.Interf
return client
}
-func (b SimpleControllerClientBuilder) ClientGoClient(name string) (clientset.Interface, error) {
- clientConfig, err := b.Config(name)
- if err != nil {
- return nil, err
- }
- return clientset.NewForConfig(clientConfig)
-}
-
-func (b SimpleControllerClientBuilder) ClientGoClientOrDie(name string) clientset.Interface {
- client, err := b.ClientGoClient(name)
- if err != nil {
- glog.Fatal(err)
- }
- return client
-}
-
// SAControllerClientBuilder is a ControllerClientBuilder that returns clients identifying as
// service accounts
type SAControllerClientBuilder struct {
@@ -274,19 +256,3 @@ func (b SAControllerClientBuilder) ClientOrDie(name string) clientset.Interface
}
return client
}
-
-func (b SAControllerClientBuilder) ClientGoClient(name string) (clientset.Interface, error) {
- clientConfig, err := b.Config(name)
- if err != nil {
- return nil, err
- }
- return clientset.NewForConfig(clientConfig)
-}
-
-func (b SAControllerClientBuilder) ClientGoClientOrDie(name string) clientset.Interface {
- client, err := b.ClientGoClient(name)
- if err != nil {
- glog.Fatal(err)
- }
- return client
-} | pkg/controller: remove old clientbuilder methods
everything has moved to client-go now so these are the same as the
original Client* methods. | kubernetes_kubernetes | train |
48a4dcb1f8db60ae38851ee2faee31032849cf78 | diff --git a/multi_key_dict.py b/multi_key_dict.py
index <HASH>..<HASH> 100644
--- a/multi_key_dict.py
+++ b/multi_key_dict.py
@@ -27,6 +27,10 @@ OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
DEALINGS IN THE SOFTWARE.
'''
+import sys
+_python3 = sys.version_info.major == 3
+del sys
+
class multi_key_dict(object):
""" Purpose of this type is to provie a multi-key dictionary.
This kind of dictionary has a similar interface to the standard dictionary, and indeed if used
@@ -173,10 +177,14 @@ class multi_key_dict(object):
Otherwise (if not specified) ((keys,...), value)
i.e. (tuple of keys, values) pairs for all items in this dictionary will be generated.
@param return_all_keys if set to True - tuple of keys is retuned instead of a key of this type."""
+ used_keys = set()
if key_type is not None:
key = str(key_type)
if key in self.__dict__:
for key, keys in self.__dict__[key].items():
+ if keys in used_keys:
+ continue
+ used_keys.add(keys)
if return_all_keys:
yield keys, self.items_dict[keys]
else:
@@ -217,26 +225,12 @@ class multi_key_dict(object):
yield value
def items(self, key_type=None, return_all_keys=False):
- """ Return a copy of the dictionary's list of (key, value) pairs.
- @param key_type if specified, (key, value) pairs for keys of this type will be returned.
- Otherwise list of pairs: ((keys), value) for all items will be returned.
- @param return_all_keys if set to True - tuple of keys is retuned instead of a key of this type."""
- all_items = []
- if key_type is not None:
- keys_used_so_far = set()
- direct_key = str(key_type)
- if direct_key in self.__dict__:
- for key, keys in self.__dict__[direct_key].items():
- if not keys in keys_used_so_far:
- keys_used_so_far.add(keys)
- if return_all_keys:
- all_items.append((keys, self.items_dict[keys]))
- else:
- all_items.append((key, self.items_dict[keys]))
- else:
- for keys, value in self.items_dict.items():
- all_items.append((keys, value))
- return all_items
+ result = self.iteritems(key_type, return_all_keys)
+ if not _python3:
+ result = list(result)
+ return result
+ items.__doc__ = iteritems.__doc__
+
def keys(self, key_type=None):
""" Returns a copy of the dictionary's keys.
@@ -464,7 +458,7 @@ def test_multi_key_dict():
for k in keys:
all_items.append( (tuple(k), m[k[0]]) )
- res = m.items()
+ res = list(m.items())
assert (all_items == res), 'items() (all items): expected {0},\n\t\t\t\tbut collected {1}'.format(all_items, res)
# now test deletion..
@@ -585,7 +579,7 @@ def test_multi_key_dict():
# test items..
exp_items = [((n,), 'now')]
- r = l.items()
+ r = list(l.items())
assert(r == exp_items), 'Expected for items(): tuple of keys: {0}, but got: {1}'.format(r, exp_items)
assert(exp_items[0][1] == 'now'), 'Expected for items(): value: {0}, but got: {1}'.format('now',
exp_items[0][1]) | Refactors 'items' to use 'iteritems'
Fixes 'iteritems' to yield each value just once - as
'items' used to do. | formiaczek_multi_key_dict | train |
d891600a89931cbb2a11bd63cc5a673663d6fc10 | diff --git a/providers/dns/vscale/vscale.go b/providers/dns/vscale/vscale.go
index <HASH>..<HASH> 100644
--- a/providers/dns/vscale/vscale.go
+++ b/providers/dns/vscale/vscale.go
@@ -128,6 +128,7 @@ func (d *DNSProvider) Present(domain, token, keyAuth string) error {
// CleanUp removes a TXT record used for DNS-01 challenge.
func (d *DNSProvider) CleanUp(domain, token, keyAuth string) error {
fqdn, _ := dns01.GetRecord(domain, keyAuth)
+ recordName := dns01.UnFqdn(fqdn)
domainObj, err := d.client.GetDomainByName(domain)
if err != nil {
@@ -142,7 +143,7 @@ func (d *DNSProvider) CleanUp(domain, token, keyAuth string) error {
// Delete records with specific FQDN
var lastErr error
for _, record := range records {
- if record.Name == fqdn {
+ if record.Name == recordName {
err = d.client.DeleteRecord(domainObj.ID, record.ID)
if err != nil {
lastErr = fmt.Errorf("vscale: %v", err) | fix: TXT records clean up for Vscale DNS provider (#<I>) | go-acme_lego | train |
7369575844e93873973fd49b86f302619c555f29 | diff --git a/montblanc/impl/biro/v4/cpu/SolverCPU.py b/montblanc/impl/biro/v4/cpu/SolverCPU.py
index <HASH>..<HASH> 100644
--- a/montblanc/impl/biro/v4/cpu/SolverCPU.py
+++ b/montblanc/impl/biro/v4/cpu/SolverCPU.py
@@ -164,7 +164,7 @@ class SolverCPU(object):
# e^(2*pi*sqrt(u*l+v*m+w*n)*frequency/C).
# Dim. ntime x na x nchan x nsrcs
- cplx_phase = ne.evaluate('exp(2*pi*1j*p*f/C)', {
+ cplx_phase = ne.evaluate('exp(-2*pi*1j*p*f/C)', {
'p': phase[:, :, :, np.newaxis],
'f': freq[np.newaxis, np.newaxis, np.newaxis, :],
'C': montblanc.constants.C,
diff --git a/montblanc/impl/biro/v4/gpu/RimeEKBSqrt.py b/montblanc/impl/biro/v4/gpu/RimeEKBSqrt.py
index <HASH>..<HASH> 100644
--- a/montblanc/impl/biro/v4/gpu/RimeEKBSqrt.py
+++ b/montblanc/impl/biro/v4/gpu/RimeEKBSqrt.py
@@ -149,7 +149,7 @@ void rime_jones_EKBSqrt_impl(
+ s_uvw[threadIdx.z][threadIdx.y].y*s_lm.y
+ s_uvw[threadIdx.z][threadIdx.y].x*s_lm.x;
- phase *= T(TWO_PI_OVER_C) * freq[threadIdx.x];
+ phase *= T(-TWO_PI_OVER_C) * freq[threadIdx.x];
typename Tr::ct cplx_phase;
Po::sincos(phase, &cplx_phase.y, &cplx_phase.x); | Correct phase calculation in v4.
In v4, the phase Was previously calculated using 2*pi*1j.
It is now calculated with -2*pi*1j, which agrees with the RIME,
and with this commit, the v2 and v4 visibilities are the same.
However, v2 is still calculated with 2*pi*1j. | ska-sa_montblanc | train |
3225e604853b48a0a8147cfd934454379fc3ed76 | diff --git a/src/com/tapsterrock/mpx/MPXTimeFormat.java b/src/com/tapsterrock/mpx/MPXTimeFormat.java
index <HASH>..<HASH> 100644
--- a/src/com/tapsterrock/mpx/MPXTimeFormat.java
+++ b/src/com/tapsterrock/mpx/MPXTimeFormat.java
@@ -26,6 +26,7 @@ package com.tapsterrock.mpx;
import java.text.SimpleDateFormat;
import java.util.Date;
+import java.util.TimeZone;
import java.text.ParseException;
/**
@@ -39,6 +40,8 @@ class MPXTimeFormat
*/
public MPXTimeFormat()
{
+ m_format = new SimpleDateFormat ();
+ m_format.setTimeZone(TimeZone.getTimeZone("GMT"));
}
/**
@@ -107,5 +110,5 @@ class MPXTimeFormat
/**
* Internal SimpleDateFormat object used to carry out the formatting work.
*/
- private SimpleDateFormat m_format = new SimpleDateFormat ();
+ private SimpleDateFormat m_format;
}
\ No newline at end of file | Updated to force time zone to GMT. | joniles_mpxj | train |
16068e4d150572cbc1a87436b3b5698b8bcf8f89 | diff --git a/holoviews/plotting/widgets/__init__.py b/holoviews/plotting/widgets/__init__.py
index <HASH>..<HASH> 100644
--- a/holoviews/plotting/widgets/__init__.py
+++ b/holoviews/plotting/widgets/__init__.py
@@ -125,7 +125,7 @@ class NdWidget(param.Parameterized):
self.renderer = renderer
# Create mock NdMapping to hold the common dimensions and keys
self.mock_obj = NdMapping([(k, None) for k in self.keys],
- kdims=self.dimensions)
+ kdims=self.dimensions, sort=False)
NdWidget.widgets[self.id] = self | Handle unsorted data in widgets | pyviz_holoviews | train |
2667336f6ebe9809963ebe04f5cfb9448109141c | diff --git a/src/main/java/org/efaps/ui/wicket/components/values/AbstractField.java b/src/main/java/org/efaps/ui/wicket/components/values/AbstractField.java
index <HASH>..<HASH> 100644
--- a/src/main/java/org/efaps/ui/wicket/components/values/AbstractField.java
+++ b/src/main/java/org/efaps/ui/wicket/components/values/AbstractField.java
@@ -32,6 +32,7 @@ import org.efaps.ui.wicket.behaviors.AjaxFieldUpdateBehavior;
import org.efaps.ui.wicket.models.field.AbstractUIField;
import org.efaps.ui.wicket.models.field.FieldConfiguration;
import org.efaps.ui.wicket.models.field.validators.StandartValidator;
+import org.efaps.ui.wicket.models.objects.UITable;
import org.efaps.util.EFapsException;
/**
@@ -87,7 +88,9 @@ public abstract class AbstractField<T extends Serializable>
}
add(new AjaxFieldUpdateBehavior(eventName, Model.of(this.uiField), false));
}
- if (getFieldConfig().hasProperty(UIFormFieldProperty.WIDTH)) {
+ // only if explecitely set and not part of a table set the with here
+ if (getFieldConfig().hasProperty(UIFormFieldProperty.WIDTH)
+ && !(_model.getObject().getParent() instanceof UITable)) {
add(new AttributeAppender("style", "width:" + getFieldConfig().getWidth(), ";"));
}
add(new AttributeAppender("style", "text-align:" + getFieldConfig().getAlign(), ";")); | - Issue #<I>: Update of Webapp to do not use UIFieldValue any more
field with m ust not be set for fields inside tables | eFaps_eFaps-WebApp | train |
18dff36bbcdd22bd3ba76846d68d860a432387a0 | diff --git a/shutit_main.py b/shutit_main.py
index <HASH>..<HASH> 100644
--- a/shutit_main.py
+++ b/shutit_main.py
@@ -153,35 +153,30 @@ def resolve_dependencies(config_dict, shutit_map, to_build, depender):
and config_dict[dependee_id]['build_ifneeded']):
to_build.append(dependee)
config_dict[dependee_id]['build'] = True
-def check_dependees_exist(config_dict, shutit_map, depender):
- for dependee_id in depender.depends_on:
- dependee = shutit_map.get(dependee_id)
- # If the module id isn't there, there's a problem.
- if dependee == None:
- return (dependee_id + ' module not found in paths: ' +
- str(config_dict['host']['shutit_module_paths']) +
- '\nCheck your --shutit_module_path setting and ensure that ' +
- 'all modules configured to be built are in that path setting, ' +
- 'eg "--shutit_module_path /path/to/other/module/:." See also help.')
-def check_dependees_build(config_dict, shutit_map, depender):
- for dependee_id in depender.depends_on:
- dependee = shutit_map.get(dependee_id)
- # If depender is installed or will be installed, so must the dependee
- if not (config_dict[dependee.module_id]['build'] or dependee.is_installed(config_dict)):
- return ('depender module id: [' + depender.module_id + '] ' +
- 'is configured: "build:yes" or is already built ' +
- 'but dependee module_id: [' + dependee_id + '] ' +
- 'is not configured: "build:yes"')
-def check_dependees_order(config_dict, shutit_map, depender):
- for dependee_id in depender.depends_on:
- dependee = shutit_map.get(dependee_id)
- # If it depends on a module id, then the module id should be higher up in the run order.
- if dependee.run_order > depender.run_order:
- return ('depender module id: ' + depender.module_id +
- ' (run order: ' + str(depender.run_order) + ') ' +
- 'depends on dependee module_id: ' + dependee_id +
- ' (run order: ' + str(dependee.run_order) + ') ' +
- 'but the latter is configured to run after the former')
+def check_dependee_exists(config_dict, depender, dependee, dependee_id):
+ # If the module id isn't there, there's a problem.
+ if dependee == None:
+ return (dependee_id + ' module not found in paths: ' +
+ str(config_dict['host']['shutit_module_paths']) +
+ ' but needed for ' + depender.module_id +
+ '\nCheck your --shutit_module_path setting and ensure that ' +
+ 'all modules configured to be built are in that path setting, ' +
+ 'eg "--shutit_module_path /path/to/other/module/:." See also help.')
+def check_dependee_build(config_dict, depender, dependee, dependee_id):
+ # If depender is installed or will be installed, so must the dependee
+ if not (config_dict[dependee.module_id]['build'] or dependee.is_installed(config_dict)):
+ return ('depender module id: [' + depender.module_id + '] ' +
+ 'is configured: "build:yes" or is already built ' +
+ 'but dependee module_id: [' + dependee_id + '] ' +
+ 'is not configured: "build:yes"')
+def check_dependee_order(config_dict, depender, dependee, dependee_id):
+ # If it depends on a module id, then the module id should be higher up in the run order.
+ if dependee.run_order > depender.run_order:
+ return ('depender module id: ' + depender.module_id +
+ ' (run order: ' + str(depender.run_order) + ') ' +
+ 'depends on dependee module_id: ' + dependee_id +
+ ' (run order: ' + str(dependee.run_order) + ') ' +
+ 'but the latter is configured to run after the former')
def make_dep_graph(config_dict, depender):
digraph = ''
for dependee_id in depender.depends_on:
@@ -201,15 +196,29 @@ def check_deps(config_dict, shutit_map, shutit_id_list):
# Add any deps we may need by extending to_build and altering config_dict
[resolve_dependencies(config_dict, shutit_map, to_build, module) for module in to_build]
+ triples = []
+ for depender in to_build:
+ for dependee_id in depender.depends_on:
+ triples.append((depender, shutit_map.get(dependee_id), dependee_id))
+
# Dep checking
def err_checker(generator):
for err in generator:
if not err: continue
util.log(util.red(util.print_modules(shutit_map,shutit_id_list,config_dict)))
util.fail(err)
- err_checker((check_dependees_exist(config_dict, shutit_map, module) for module in to_build))
- err_checker((check_dependees_build(config_dict, shutit_map, module) for module in to_build))
- err_checker((check_dependees_order(config_dict, shutit_map, module) for module in to_build))
+ err_checker((
+ check_dependee_exists(config_dict, depender, dependee, dependee_id)
+ for depender, dependee, dependee_id in triples
+ ))
+ err_checker((
+ check_dependee_build(config_dict, depender, dependee, dependee_id)
+ for depender, dependee, dependee_id in triples
+ ))
+ err_checker((
+ check_dependee_order(config_dict, depender, dependee, dependee_id)
+ for depender, dependee, dependee_id in triples
+ ))
# Show dependency graph
if config_dict['build']['show_depgraph_only']: | More complex but more flexible way of checking for errors | ianmiell_shutit | train |
010fb64910221b4c613c2d79816a5ce16083fc0f | diff --git a/lib/redmine.js b/lib/redmine.js
index <HASH>..<HASH> 100644
--- a/lib/redmine.js
+++ b/lib/redmine.js
@@ -295,8 +295,8 @@ Redmine.prototype.delete_user = function(id, callback) {
* Listing time entries
* http://www.redmine.org/projects/redmine/wiki/Rest_TimeEntries#Listing-time-entries
*/
-Redmine.prototype.time_entries = function(callback) {
- this.request('GET', '/time_entries.json', {}, callback);
+Redmine.prototype.time_entries = function(params callback) {
+ this.request('GET', '/time_entries.json', params, callback);
};
/**
@@ -337,8 +337,8 @@ Redmine.prototype.delete_time_entry = function(id, callback) {
* Returns a paginated list of the project memberships. :project_id can be either the project numerical id or the project identifier.
* http://www.redmine.org/projects/redmine/wiki/Rest_Memberships#GET
*/
-Redmine.prototype.membership_by_project_id = function(id, callback) {
- this.request('GET', '/projects/' + id + '/memberships.json', {}, callback);
+Redmine.prototype.membership_by_project_id = function(id,params, callback) {
+ this.request('GET', '/projects/' + id + '/memberships.json', params, callback);
};
/**
@@ -516,8 +516,8 @@ Redmine.prototype.delete_wiki = function(id, title, callback) {
* Returns the list of all custom queries visible by the user (public and private queries) for all projects.
* http://www.redmine.org/projects/redmine/wiki/Rest_Queries#GET
*/
-Redmine.prototype.queries = function(callback) {
- this.request('GET', '/queries.json', {}, callback);
+Redmine.prototype.queries = function(params, callback) {
+ this.request('GET', '/queries.json', params, callback);
}; | added params to query and membership | zanran_node-redmine | train |
5791c8bbcab0aa983a128d295261eb23fd103b55 | diff --git a/scanpy/tools/__init__.py b/scanpy/tools/__init__.py
index <HASH>..<HASH> 100644
--- a/scanpy/tools/__init__.py
+++ b/scanpy/tools/__init__.py
@@ -11,3 +11,4 @@ from ._leiden import leiden
from ._louvain import louvain
from ._sim import sim
from ._score_genes import score_genes, score_genes_cell_cycle
+from ._dendrogram import dendrogram | added dendrogram to list of tools | theislab_scanpy | train |
8ccb6bf3d6b28807a9e242872f9a16e33a804059 | diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go
index <HASH>..<HASH> 100644
--- a/nomad/state/state_store_test.go
+++ b/nomad/state/state_store_test.go
@@ -144,6 +144,7 @@ func TestStateStore_UpsertPlanResults_AllocationsCreated_Denormalized(t *testing
// This test checks that:
// 1) The job is denormalized
// 2) Allocations are denormalized and updated with the diff
+// That stopped allocs Job is unmodified
func TestStateStore_UpsertPlanResults_AllocationsDenormalized(t *testing.T) {
state := testStateStore(t)
alloc := mock.Alloc()
@@ -168,6 +169,12 @@ func TestStateStore_UpsertPlanResults_AllocationsDenormalized(t *testing.T) {
require.NoError(state.UpsertAllocs(900, []*structs.Allocation{stoppedAlloc, preemptedAlloc}))
require.NoError(state.UpsertJob(999, job))
+ // modify job and ensure that stopped and preempted alloc point to original Job
+ mJob := job.Copy()
+ mJob.TaskGroups[0].Name = "other"
+
+ require.NoError(state.UpsertJob(1001, mJob))
+
eval := mock.Eval()
eval.JobID = job.ID
@@ -179,7 +186,7 @@ func TestStateStore_UpsertPlanResults_AllocationsDenormalized(t *testing.T) {
AllocUpdateRequest: structs.AllocUpdateRequest{
AllocsUpdated: []*structs.Allocation{alloc},
AllocsStopped: []*structs.AllocationDiff{stoppedAllocDiff},
- Job: job,
+ Job: mJob,
},
EvalID: eval.ID,
AllocsPreempted: []*structs.AllocationDiff{preemptedAllocDiff},
@@ -194,6 +201,11 @@ func TestStateStore_UpsertPlanResults_AllocationsDenormalized(t *testing.T) {
require.NoError(err)
assert.Equal(alloc, out)
+ outJob, err := state.JobByID(ws, job.Namespace, job.ID)
+ require.NoError(err)
+ require.Equal(mJob.TaskGroups, outJob.TaskGroups)
+ require.NotEmpty(job.TaskGroups, outJob.TaskGroups)
+
updatedStoppedAlloc, err := state.AllocByID(ws, stoppedAlloc.ID)
require.NoError(err)
assert.Equal(stoppedAllocDiff.DesiredDescription, updatedStoppedAlloc.DesiredDescription)
@@ -201,6 +213,7 @@ func TestStateStore_UpsertPlanResults_AllocationsDenormalized(t *testing.T) {
assert.Equal(stoppedAllocDiff.ClientStatus, updatedStoppedAlloc.ClientStatus)
assert.Equal(planModifyIndex, updatedStoppedAlloc.AllocModifyIndex)
assert.Equal(planModifyIndex, updatedStoppedAlloc.AllocModifyIndex)
+ assert.Equal(job.TaskGroups, updatedStoppedAlloc.Job.TaskGroups)
updatedPreemptedAlloc, err := state.AllocByID(ws, preemptedAlloc.ID)
require.NoError(err)
@@ -208,6 +221,7 @@ func TestStateStore_UpsertPlanResults_AllocationsDenormalized(t *testing.T) {
assert.Equal(preemptedAllocDiff.PreemptedByAllocation, updatedPreemptedAlloc.PreemptedByAllocation)
assert.Equal(planModifyIndex, updatedPreemptedAlloc.AllocModifyIndex)
assert.Equal(planModifyIndex, updatedPreemptedAlloc.AllocModifyIndex)
+ assert.Equal(job.TaskGroups, updatedPreemptedAlloc.Job.TaskGroups)
index, err := state.Index("allocs")
require.NoError(err)
@@ -219,6 +233,7 @@ func TestStateStore_UpsertPlanResults_AllocationsDenormalized(t *testing.T) {
require.NoError(err)
require.NotNil(evalOut)
assert.EqualValues(planModifyIndex, evalOut.ModifyIndex)
+
}
// This test checks that the deployment is created and allocations count towards | test that stopped alloc jobs aren't modified
When an alloc is stopped, test that we don't update the job found in
alloc with new job that is no longer relevent for this alloc. | hashicorp_nomad | train |
3cd0db37f0750b9dc6ca5d3fe854846c0274fb2f | diff --git a/src/styles/points/points.js b/src/styles/points/points.js
index <HASH>..<HASH> 100755
--- a/src/styles/points/points.js
+++ b/src/styles/points/points.js
@@ -104,7 +104,10 @@ Object.assign(Points, {
}
}
}
- style.sprite = sprite;
+ else if (sprite) {
+ log.warn(`Style: in style '${this.name}', sprite '${sprite}' was specified, but texture '${this.texture}' has no sprites`);
+ sprite = null;
+ }
// Sets texcoord scale if needed (e.g. for sprite sub-area)
let sprite_info; | points: warn when sprite is specified, but texture has no sprites defined | tangrams_tangram | train |
39549c0d5760e65f3956b3b69a0475b78824fe96 | diff --git a/base/isomorphism/src/main/java/org/openscience/cdk/isomorphism/matchers/QueryAtomContainer.java b/base/isomorphism/src/main/java/org/openscience/cdk/isomorphism/matchers/QueryAtomContainer.java
index <HASH>..<HASH> 100644
--- a/base/isomorphism/src/main/java/org/openscience/cdk/isomorphism/matchers/QueryAtomContainer.java
+++ b/base/isomorphism/src/main/java/org/openscience/cdk/isomorphism/matchers/QueryAtomContainer.java
@@ -1776,9 +1776,10 @@ public class QueryAtomContainer extends QueryChemObject implements IQueryAtomCon
if (optset.contains(TOTAL_DEGREE))
expr.and(new Expr(DEGREE,
atom.getBondCount() + atom.getImplicitHydrogenCount()));
- if (optset.contains(IS_IN_RING) ||
- optset.contains(IS_IN_CHAIN))
- expr.and(new Expr(atom.isInRing() ? IS_IN_RING : IS_IN_CHAIN));
+ if (optset.contains(IS_IN_RING) && atom.isInRing())
+ expr.and(new Expr(IS_IN_RING));
+ if (optset.contains(IS_IN_CHAIN) && !atom.isInRing())
+ expr.and(new Expr(IS_IN_CHAIN));
if (optset.contains(IMPL_H_COUNT))
expr.and(new Expr(IMPL_H_COUNT));
if (optset.contains(RING_BOND_COUNT)) { | Match the semantic of the bond expressions. Unless someone asks for IS_IN_CHAIN don't add it in, hexane (acyclic) is a subsgraph of cyclohexane (cyclic). | cdk_cdk | train |
c79cd1d1bb902f5874a6126d84432cc887b1b05d | diff --git a/simulation.py b/simulation.py
index <HASH>..<HASH> 100644
--- a/simulation.py
+++ b/simulation.py
@@ -906,11 +906,3 @@ def merge_tables(target, tables, columns=None):
nm[onto] = {}
return frames[target]
-
-
-def partial_update(update, outdf_name, outfname):
- if not len(update):
- return
- s = get_table(outdf_name).get_column(outfname)
- s.loc[update.index] = update
- add_column(outdf_name, outfname, s)
diff --git a/tests/test_simulation.py b/tests/test_simulation.py
index <HASH>..<HASH> 100644
--- a/tests/test_simulation.py
+++ b/tests/test_simulation.py
@@ -350,10 +350,3 @@ def test_table_source_local_cols(clear_sim, df):
sim.add_column('source', 'new', pd.Series(['a', 'b', 'c'], index=df.index))
assert sim.get_table('source').local_columns == ['a', 'b']
-
-
-def test_tables(df):
- sim.add_table('test_frame', df)
- s = pd.Series([1], index=['x'])
- sim.partial_update(pd.Series(), "test_frame", "a")
- sim.partial_update(s, "test_frame", 'a') | addressing most of matt's issues dicussed on github | UDST_orca | train |
785fd657d0c94553f52b77540e54660e8e2ab39c | diff --git a/lib/classes/mail.class.php b/lib/classes/mail.class.php
index <HASH>..<HASH> 100644
--- a/lib/classes/mail.class.php
+++ b/lib/classes/mail.class.php
@@ -206,6 +206,7 @@ class mail
$body = eregi_replace("[\]", '', $body);
+ $body .= "<br/>\n"._('Si le lien n\'est pas cliquable, copiez-collez le dans votre navigateur.')."<br/>\n";
$body .= "<br/><br/><br/><br/>\n\n\n\n";
$body .= '<div style="font-style:italic;">' . _('phraseanet::signature automatique des notifications par mail, infos a l\'url suivante') . "</div>\n";
$body .= '<div><a href="' . $registry->get('GV_ServerName') . '">' . $registry->get('GV_ServerName') . "</a></div>\n"; | Add notice "copy paste this link if you can not click it" in mails | alchemy-fr_Phraseanet | train |
34e672f0e4590b248472712b9738c4d6a8c3cf1a | diff --git a/auth.go b/auth.go
index <HASH>..<HASH> 100644
--- a/auth.go
+++ b/auth.go
@@ -1,4 +1,4 @@
-// Package goauth implements cookie/session based authentication. Intended for
+// Package httpauth implements cookie/session based authentication. Intended for
// use with the net/http or github.com/gorilla/mux packages, but may work with
// github.com/codegangsta/martini as well. Credentials are stored as a username
// + password hash, computed with bcrypt.
@@ -12,7 +12,7 @@
// cookie, and can be accessed with the Messages function.
//
// Example source can be found at
-// https://github.com/apexskier/goauth/blob/master/examples/server.go
+// https://github.com/apexskier/httpauth/blob/master/examples/server.go
package httpauth
import ( | Fix a couple of mentions of the old goauth package name. | apexskier_httpauth | train |
aed906af97cfa86801951a6a97251084fac6dc57 | diff --git a/railties/lib/rails/generators/app_base.rb b/railties/lib/rails/generators/app_base.rb
index <HASH>..<HASH> 100644
--- a/railties/lib/rails/generators/app_base.rb
+++ b/railties/lib/rails/generators/app_base.rb
@@ -246,8 +246,16 @@ module Rails
# is easier to silence stdout in the existing test suite this way. The
# end-user gets the bundler commands called anyway, so no big deal.
#
+ # We unset temporary bundler variables to load proper bundler and Gemfile.
+ #
# Thanks to James Tucker for the Gem tricks involved in this call.
+
+ bundle_gemfile, rubyopt = ENV['BUNDLE_GEMFILE'], ENV['RUBYOPT']
+ ENV['BUNDLE_GEMFILE'], ENV['RUBYOPT'] = "", ""
+
print `"#{Gem.ruby}" "#{Gem.bin_path('bundler', 'bundle')}" #{command}`
+
+ ENV['BUNDLE_GEMFILE'], ENV['RUBYOPT'] = bundle_gemfile, rubyopt
end
def run_bundle | prevent using already loaded Gemfile for 'bundle install', fix #<I> | rails_rails | train |
425c451b7a0f71cdfd9fcf49b5a9caff18bfd398 | diff --git a/app/lib/webpack/create-chain.js b/app/lib/webpack/create-chain.js
index <HASH>..<HASH> 100644
--- a/app/lib/webpack/create-chain.js
+++ b/app/lib/webpack/create-chain.js
@@ -361,7 +361,7 @@ module.exports = function (cfg, configName) {
}
chain.optimization
- .concatenateModules(true)
+ .concatenateModules(cfg.ctx.mode.ssr !== true)
if (cfg.ctx.debug) {
// reset default webpack 4 minimizer
diff --git a/app/lib/webpack/ssr/plugin.server-side.js b/app/lib/webpack/ssr/plugin.server-side.js
index <HASH>..<HASH> 100644
--- a/app/lib/webpack/ssr/plugin.server-side.js
+++ b/app/lib/webpack/ssr/plugin.server-side.js
@@ -56,7 +56,6 @@ function getServerManifest (compilation) {
// do not emit anything else for server
compilation.deleteAsset(asset.name)
- // delete compilation.assets[asset.name]
})
return serverManifest | fix(app): SSR mode - not providing CSS generated by SFC on server-side | quasarframework_quasar | train |
8b15b26729d5395c0b95218490e3e8f5c3b0a864 | diff --git a/eZ/Publish/Core/Persistence/Legacy/Handler.php b/eZ/Publish/Core/Persistence/Legacy/Handler.php
index <HASH>..<HASH> 100644
--- a/eZ/Publish/Core/Persistence/Legacy/Handler.php
+++ b/eZ/Publish/Core/Persistence/Legacy/Handler.php
@@ -274,7 +274,7 @@ class Handler implements HandlerInterface
* @param Content\Search\TransformationProcessor $transformationProcessor Search Text Transformation processor
* @param array $config List of optional configuration flags:
* The flag 'defer_type_update' defines if content types should be
- * published immediatly (false), when the
+ * published immediately (false), when the
* {@link \eZ\Publish\SPI\Persistence\Content\Type\Handler::publish()} method
* is called, or if a background process should be triggered (true), which
* is then executed by the old eZ Publish core.
@@ -609,7 +609,8 @@ class Handler implements HandlerInterface
$this->getLocationGateway(),
$this->getLocationMapper(),
$this->contentHandler(),
- $this->getContentMapper()
+ $this->getContentMapper(),
+ $this->urlAliasHandler()
);
}
return $this->locationHandler; | Added: URL alias handler into Location handler | ezsystems_ezpublish-kernel | train |
c54e2d3710c843c0f59947a2b5f07132323e8464 | diff --git a/combine/dev.py b/combine/dev.py
index <HASH>..<HASH> 100644
--- a/combine/dev.py
+++ b/combine/dev.py
@@ -1,3 +1,4 @@
+import logging
import os
import time
from http.server import HTTPServer as BaseHTTPServer, SimpleHTTPRequestHandler
@@ -9,6 +10,9 @@ from watchdog.events import FileSystemEventHandler, FileCreatedEvent, DirModifie
from .files.template import TemplateFile
+logger = logging.getLogger(__file__)
+
+
class Watcher:
def __init__(self, path, combine):
self.path = path
@@ -60,12 +64,20 @@ class EventHandler(FileSystemEventHandler):
def reload_combine(self):
click.secho('Reloading combine', fg='cyan')
- self.combine.reload()
+ try:
+ self.combine.reload()
+ except Exception as e:
+ logger.error('Error reloading', exc_info=e)
+ click.secho('There was an error! See output above.', fg='red')
def rebuild_site(self, only_paths=None):
click.secho(f'Rebuilding {only_paths or "site"}', fg='cyan')
- self.combine.build(only_paths)
- click.secho('Site built', fg='green')
+ try:
+ self.combine.build(only_paths)
+ click.secho('Site built', fg='green')
+ except Exception as e:
+ logger.error('Error building', exc_info=e)
+ click.secho('There was an error! See output above.', fg='red')
class Server: | log error and continue in dev watcher | dropseed_combine | train |
9e6e214385e901bd965e87828f4c1f37308ed571 | diff --git a/src/vizabi-gapminder.js b/src/vizabi-gapminder.js
index <HASH>..<HASH> 100644
--- a/src/vizabi-gapminder.js
+++ b/src/vizabi-gapminder.js
@@ -24,7 +24,7 @@ import DonutChart from 'tools/donutchart';
import {
waffle as WaffleReader
}
-from 'readers/_index';
+ from 'readers/_index';
var language = {
id: "en",
@@ -40,7 +40,9 @@ var locationArray = window.location.href.split("/");
var localUrl = locationArray.splice(0, locationArray.indexOf("preview")).join("/") + "/preview/";
globals.gapminder_paths = {
- wsUrl: 'http://waffle-server-dev.gapminderdev.org',
+ // Explanation what is going on in the code below:
+ // In order to use WS server other than specified in WS_SERVER you need to fill it in manually
+ wsUrl: typeof WS_SERVER === 'undefined' ? 'https://waffle-server-stage.gapminderdev.org' : WS_SERVER
baseUrl: localUrl
};
@@ -478,8 +480,8 @@ BubbleChart.define('default_model', {
which: "geo.name"
},
size_label: {
- use: "constant"
- },
+ use: "constant"
+ },
axis_y: {
use: "indicator",
@@ -562,8 +564,8 @@ PopByAge.define('default_model', {
show: {
_defs_: {
"age": [
- [0, 95]
- ] //show 0 through 100
+ [0, 95]
+ ] //show 0 through 100
}
},
grouping: 5
@@ -614,38 +616,38 @@ PopByAge.define('default_model', {
DonutChart.define('default_model', {
state: {
- // available time would have the range of 1990-2012 years (%Y), with the deafult position at 2000
- time: {
- start: "1990",
- end: "2012",
- value: "2000"
- },
- //Entities include all ("*") geo's of category "regions" -- equivalent to 'geo: ["asi", "ame", "eur", "afr"]'
- entities: {
- dim: "geo",
- show: {
- _defs_: {
- "geo": ["*"],
- "geo.cat": ["region"]
- }
- }
- },
- //Markers correspond to visuals that we want to show. We have label, axis and color
- marker: {
- space: ["entities", "time"],
- label: {
- use: "property",
- which: "geo.name"
- },
- axis: {
- use: "indicator",
- which: "population"
- },
- color: {
- use: "property",
- which: "geo.name"
- }
+ // available time would have the range of 1990-2012 years (%Y), with the deafult position at 2000
+ time: {
+ start: "1990",
+ end: "2012",
+ value: "2000"
+ },
+ //Entities include all ("*") geo's of category "regions" -- equivalent to 'geo: ["asi", "ame", "eur", "afr"]'
+ entities: {
+ dim: "geo",
+ show: {
+ _defs_: {
+ "geo": ["*"],
+ "geo.cat": ["region"]
}
+ }
+ },
+ //Markers correspond to visuals that we want to show. We have label, axis and color
+ marker: {
+ space: ["entities", "time"],
+ label: {
+ use: "property",
+ which: "geo.name"
+ },
+ axis: {
+ use: "indicator",
+ which: "population"
+ },
+ color: {
+ use: "property",
+ which: "geo.name"
+ }
+ }
},
data: {
reader: "csv",
@@ -662,7 +664,7 @@ DonutChart.define('default_model', {
//Waffle Server Reader custom path
-WaffleReader.define('basepath', "http://52.18.235.31:8001/values/waffle");
+WaffleReader.define('basepath', globals.gapminder_paths.wsUrl + "/api/graphs/stats/vizabi-tools");
//preloading mountain chart precomputed shapes
MCComponent.define("preload", function(done) { | feat(routes): migrate routes and data from gapminder tools page to waffle server | vizabi_vizabi | train |
0e3766169a1061eb7e6ebe3c063085278bcd70ca | diff --git a/datascience/tables.py b/datascience/tables.py
index <HASH>..<HASH> 100644
--- a/datascience/tables.py
+++ b/datascience/tables.py
@@ -1256,7 +1256,7 @@ class Table(collections.abc.MutableMapping):
# original single column join
return self._join(column_label, other, other_label)
- def _join(self, column_label, other, other_label=None):
+ def _join(self, column_label, other, other_label=[]):
"""joins when COLUMN_LABEL is a string"""
if self.num_rows == 0 or other.num_rows == 0:
return None
@@ -1265,40 +1265,18 @@ class Table(collections.abc.MutableMapping):
self_rows = self.index_by(column_label)
other_rows = other.index_by(other_label)
+ return self._join_helper([column_label], self_rows, other, [other_label], other_rows)
- # Gather joined rows from self_rows that have join values in other_rows
- joined_rows = []
- for v, rows in self_rows.items():
- if v in other_rows:
- joined_rows += [row + o for row in rows for o in other_rows[v]]
- if not joined_rows:
- return None
-
- # Build joined table
- self_labels = list(self.labels)
- other_labels = [self._unused_label(s) for s in other.labels]
- other_labels_map = dict(zip(other.labels, other_labels))
- joined = type(self)(self_labels + other_labels).with_rows(joined_rows)
-
- # Copy formats from both tables
- joined._formats.update(self._formats)
- for label in other._formats:
- joined._formats[other_labels_map[label]] = other._formats[label]
-
- # Remove redundant column, but perhaps save its formatting
- del joined[other_labels_map[other_label]]
- if column_label not in self._formats and other_label in other._formats:
- joined._formats[column_label] = other._formats[other_label]
-
- return joined.move_to_start(column_label).sort(column_label)
-
- def _multiple_join(self, column_label, other, other_label=None):
+ def _multiple_join(self, column_label, other, other_label=[]):
"""joins when column_label is a non-string iterable"""
assert len(column_label) == len(other_label), 'unequal number of columns'
self_rows = self._multi_index(column_label)
other_rows = other._multi_index(other_label)
+ return self._join_helper(column_label, self_rows, other, other_label, other_rows)
+
+ def _join_helper(self, column_label, self_rows, other, other_label, other_rows):
# Gather joined rows from self_rows that have join values in other_rows
joined_rows = []
for v, rows in self_rows.items(): | refactored to general _join_helper function | data-8_datascience | train |
f7e5a36e5de1ec1f55b2f71d75f377c006e54de7 | diff --git a/src/main/java/org/fit/cssbox/layout/BlockBox.java b/src/main/java/org/fit/cssbox/layout/BlockBox.java
index <HASH>..<HASH> 100644
--- a/src/main/java/org/fit/cssbox/layout/BlockBox.java
+++ b/src/main/java/org/fit/cssbox/layout/BlockBox.java
@@ -1360,7 +1360,7 @@ public class BlockBox extends ElementBox
ab.x = ab.x - cb.x;
ab.y = ab.y - cb.y;
if (!absReference.isblock || ((BlockBox) absReference).getFloating() == FLOAT_NONE) //not-floating boxes: place below
- coords.top = ab.y + ab.height - 1 - cblock.emargin.top - cblock.border.top;
+ coords.top = ab.y + ab.height - cblock.emargin.top - cblock.border.top;
else //floating blocks: place top-aligned
coords.top = ab.y - cblock.emargin.top - cblock.border.top;
} | Static position fix (one pixel bug) | radkovo_CSSBox | train |
35a08ae0ed28b3ebb0a789b5db56221ba845f93a | diff --git a/language/setup.py b/language/setup.py
index <HASH>..<HASH> 100644
--- a/language/setup.py
+++ b/language/setup.py
@@ -55,7 +55,7 @@ REQUIREMENTS = [
setup(
name='google-cloud-language',
- version='0.23.0',
+ version='0.23.1',
description='Python Client for Google Cloud Natural Language',
long_description=README,
namespace_packages=[ | Bump language version. (#<I>) | googleapis_google-cloud-python | train |
bab7e469830b1abc9f98fee13d052de5b8585f97 | diff --git a/src/Psalm/Type/Atomic/TList.php b/src/Psalm/Type/Atomic/TList.php
index <HASH>..<HASH> 100644
--- a/src/Psalm/Type/Atomic/TList.php
+++ b/src/Psalm/Type/Atomic/TList.php
@@ -144,6 +144,12 @@ class TList extends \Psalm\Type\Atomic
} else {
$input_type_param = $input_type->getGenericValueType();
}
+ } elseif ($input_type instanceof Atomic\TList) {
+ if ($offset === 0) {
+ continue;
+ }
+
+ $input_type_param = clone $input_type->type_param;
}
$type_param->replaceTemplateTypesWithStandins(
diff --git a/tests/Template/ClassTemplateTest.php b/tests/Template/ClassTemplateTest.php
index <HASH>..<HASH> 100644
--- a/tests/Template/ClassTemplateTest.php
+++ b/tests/Template/ClassTemplateTest.php
@@ -1864,6 +1864,33 @@ class ClassTemplateTest extends TestCase
/** @param Foo<array> $_ */
function takesFooArray($_): void {}',
],
+ 'allowListAcceptance' => [
+ '<?php
+ /** @template T */
+ class Collection
+ {
+ /** @var list<T> */
+ public $values;
+
+ /** @param list<T> $values */
+ function __construct(array $values)
+ {
+ $this->values = $values;
+ }
+ }
+
+ /** @return Collection<string> */
+ function makeStringCollection()
+ {
+ return new Collection(getStringList()); // gets typed as Collection<mixed> for some reason
+ }
+
+ /** @return list<string> */
+ function getStringList(): array
+ {
+ return ["foo", "baz"];
+ }'
+ ],
];
} | Fix #<I> - allow lists to accept list types | vimeo_psalm | train |
2ed3bc497956d888b0091510f794f9e33f6476e3 | diff --git a/vertx-db2-client/src/test/java/io/vertx/db2client/junit/DB2Resource.java b/vertx-db2-client/src/test/java/io/vertx/db2client/junit/DB2Resource.java
index <HASH>..<HASH> 100644
--- a/vertx-db2-client/src/test/java/io/vertx/db2client/junit/DB2Resource.java
+++ b/vertx-db2-client/src/test/java/io/vertx/db2client/junit/DB2Resource.java
@@ -30,6 +30,7 @@ import org.testcontainers.containers.wait.strategy.LogMessageWaitStrategy;
import io.vertx.core.net.JksOptions;
import io.vertx.db2client.DB2ConnectOptions;
+import org.testcontainers.utility.DockerImageName;
public class DB2Resource extends ExternalResource {
@@ -45,7 +46,7 @@ public class DB2Resource extends ExternalResource {
private boolean started = false;
private boolean isDb2OnZ = false;
private DB2ConnectOptions options;
- private final Db2Container instance = new Db2Container()
+ private final Db2Container instance = new Db2Container(DockerImageName.parse("ibmcom/db2").withTag("11.5.6.0"))
.acceptLicense()
.withLogConsumer(out -> System.out.print("[DB2] " + out.getUtf8String()))
.withUsername("vertx") | Make work with Java <I> | reactiverse_reactive-pg-client | train |
c254c02938aeba1842c91226fd820dba2034e0f2 | diff --git a/locksmith/mongoauth/management/commands/apireport.py b/locksmith/mongoauth/management/commands/apireport.py
index <HASH>..<HASH> 100644
--- a/locksmith/mongoauth/management/commands/apireport.py
+++ b/locksmith/mongoauth/management/commands/apireport.py
@@ -34,4 +34,4 @@ class Command(BaseCommand):
apicall(endpoint, settings.LOCKSMITH_SIGNING_KEY,
api=settings.LOCKSMITH_API_NAME, date=date,
endpoint=item['method'], key=item['key'],
- calls=item['count'].split('.')[0])
+ calls=int(item['count'])) | count was a float, not a str | sunlightlabs_django-locksmith | train |
f114947a25a6807172920b4363058ec38bc9f06e | diff --git a/interfaces/Emitter.php b/interfaces/Emitter.php
index <HASH>..<HASH> 100644
--- a/interfaces/Emitter.php
+++ b/interfaces/Emitter.php
@@ -3,68 +3,46 @@
/**
* Event Emitter Interface
*
- * Event synchronization point. Registers/removes listeners for events and triggers events. Supports registering
- * listeners by priority.
- *
* @package Nyx\Events
* @version 0.1.0
* @author Michal Chojnacki <[email protected]>
- * @copyright 2012-2016 Nyx Dev Team
- * @link http://docs.muyo.io/nyx/events/index.html
+ * @copyright 2012-2017 Nyx Dev Team
+ * @link https://github.com/unyx/nyx
*/
interface Emitter
{
/**
- * Magic constant used by {@see self::emit()}. See the docblock for the $event argument therein.
- */
- const CREATE_EMPTY_EVENT = '__emitter_create_empty_event';
-
- /**
* Triggers the listeners of a given event.
*
- * You may pass any additional arguments you wish to this method. They will be passed along to the listeners,
- * with the propagated Event instance always being the first argument (the $name will not be passed directly
- * as it will be available through the Event). However, be aware of what is passed as reference and what is not,
- * if you wish to modify the data within listeners and have it available with those changes within further
- * listeners.
- *
- * @param string|Event $name The name of the event to emit. You may also pass an Event object that
- * has its trigger name set {@see Event::setName()}.
- * @param Event|mixed $event The event to pass to the event handlers/listeners. If not supplied or
- * not an instance of events\Event an empty Event instance will be
- * created and all arguments after $name will be passed to the
- * listeners. If left as the CREATE_EMPTY_EVENT magic constant, this
- * argument will be completely ignored (ie. not passed to the listeners)
- * but will still trigger the creation of a base Event.
- * @param mixed ...$arguments The data to pass to listeners along with the Event, in the order given.
- * @return Event The Event after it has run through all registered listeners.
- * @throws \LogicException When only an Event object is passed but it does not have a valid
- * trigger name.
+ * @param string|Event $event The name of the event to emit or an object implementing the Event
+ * interface, whose name will be used.
+ * @param mixed ...$payload The data to pass to listeners, in the order given. If $event is an
+ * instance of the Event interface, it will be prepended to the $payload.
+ * @throws \InvalidArgumentException When $event is neither a string nor an Event instance.
*/
- public function emit($name, $event = self::CREATE_EMPTY_EVENT, ...$arguments) : Event;
+ public function emit($event, ...$payload);
/**
* Adds an event listener that listens to the specified event.
*
- * @param string $name The event to listen for.
- * @param callable $listener The listener to add.
- * @param integer $priority The higher this value, the earlier an event listener will be triggered
- * in the chain.
+ * @param string $event The event to listen for.
+ * @param callable $listener The listener to add.
+ * @param integer $priority The higher this value, the earlier an event listener will be triggered.
* @return $this
*/
- public function on(string $name, callable $listener, int $priority = 0) : self;
+ public function on(string $event, callable $listener, int $priority = 0) : Emitter;
/**
* Adds an event listener that listens to the specified event but only gets fired once and then removed from
* the stack.
*
- * @param string $name The event to listen for.
+ * @param string $event The event to listen for.
* @param callable $listener The listener to add.
* @param integer $priority The higher this value, the earlier an event listener will be triggered
* in the chain.
* @return $this
*/
- public function once(string $name, callable $listener, int $priority = 0) : self;
+ public function once(string $event, callable $listener, int $priority = 0) : Emitter;
/**
* Removes the specified listener from the stack of a given event, OR
@@ -72,11 +50,11 @@ interface Emitter
* removes the given listener from all events it's listening to when only $name is null, OR
* removes all listeners registered in this Emitter if both $name and $listener are null,
*
- * @param string $name The event to remove a listener from.
- * @param callable $listener The listener to remove.
+ * @param string $event The event to remove a listener from.
+ * @param callable $listener The listener to remove.
* @return $this
*/
- public function off(string $name = null, callable $listener = null) : self;
+ public function off(string $event = null, callable $listener = null) : Emitter;
/**
* Registers an Event Subscriber with this Emitter. The Subscriber is queried for a list of events
@@ -85,7 +63,7 @@ interface Emitter
* @param Subscriber $subscriber
* @return $this
*/
- public function register(Subscriber $subscriber) : self;
+ public function register(Subscriber $subscriber) : Emitter;
/**
* Deregisters an Event Subscriber from this Emitter, removing all listeners it has registered
@@ -94,7 +72,7 @@ interface Emitter
* @param Subscriber $subscriber
* @return $this
*/
- public function deregister(Subscriber $subscriber) : self;
+ public function deregister(Subscriber $subscriber) : Emitter;
/**
* Returns an array containing the priority-sorted listeners for the given event or for all events if no trigger | [Events] Simplified interfaces\Emitter::emit() to get rid of some complexity in how it can be invoked and to allow for very basic emission scenarios with less overhead. Fixed method signatures while at it. | unyx_events | train |
212d2cafba86f44353fdd5205d752ec09516eda9 | diff --git a/satpy/readers/fci_l1c_fdhsi.py b/satpy/readers/fci_l1c_fdhsi.py
index <HASH>..<HASH> 100644
--- a/satpy/readers/fci_l1c_fdhsi.py
+++ b/satpy/readers/fci_l1c_fdhsi.py
@@ -164,6 +164,29 @@ class FCIFDHSIFileHandler(NetCDF4FileHandler):
return group, root_group
+
+ # scale factors and add_offset on data/<channel>/x were wrong in test data
+ # release from September 2019, add correction. Remove correction when no
+ # longer needed.
+ _corrected_scale_factors = {
+ "x": {
+ 500: -np.deg2rad(8.005244940725e-04),
+ 1000: -np.deg2rad(1.601048987833e-03),
+ 2000: -np.deg2rad(3.202097973165e-03)},
+ "y": {
+ 500: np.deg2rad(8.005244940725e-04),
+ 1000: np.deg2rad(1.601048987833e-03),
+ 2000: np.deg2rad(3.202097973165e-03)}}
+ _corrected_add_offset = {
+ "x": {
+ 500: np.deg2rad(8.9142405037),
+ 1000: np.deg2rad(8.9138402398),
+ 2000: np.deg2rad(8.9130397083)},
+ "y": {
+ 500: -np.deg2rad(8.9142405037),
+ 1000: -np.deg2rad(8.9138402398),
+ 2000: -np.deg2rad(8.9130397083)}}
+
def calc_area_extent(self, key):
"""Calculate area extent for a dataset.
"""
@@ -188,7 +211,12 @@ class FCIFDHSIFileHandler(NetCDF4FileHandler):
ext = {}
for c in "xy":
c_radian = self["data/{:s}/measured/{:s}".format(key.name, c)]
- c_radian_num = c_radian[:] * c_radian.scale_factor + c_radian.add_offset
+ # TEMPORARY CORRECTION for erroneous 2019-09 test data —
+ # REMOVE WHEN NO LONGER NEEDED
+ scalefac = self._corrected_scale_factors[c][key.resolution]
+ addoffset = self._corrected_add_offset[c][key.resolution]
+ #c_radian_num = c_radian[:] * c_radian.scale_factor + c_radian.add_offset
+ c_radian_num = ((c_radian[:]-1) * scalefac + addoffset)
# FCI defines pixels by centroids (Example Products for Pytroll
# Workshop, §B.4.2)
#
@@ -199,8 +227,10 @@ class FCIFDHSIFileHandler(NetCDF4FileHandler):
# the .item() call is needed with the h5netcdf backend, see
# https://github.com/pytroll/satpy/issues/972#issuecomment-558191583
# but we need to compute it first if this is dask
- min_c_radian = c_radian_num[0] - c_radian.scale_factor.item()/2
- max_c_radian = c_radian_num[-1] + c_radian.scale_factor.item()/2
+ #min_c_radian = c_radian_num[0] - c_radian.scale_factor.item()/2
+ #max_c_radian = c_radian_num[-1] + c_radian.scale_factor.item()/2
+ min_c_radian = c_radian_num[0] - scalefac/2
+ max_c_radian = c_radian_num[-1] + scalefac/2
min_c = min_c_radian * h # arc length in m
max_c = max_c_radian * h
try:
@@ -213,6 +243,9 @@ class FCIFDHSIFileHandler(NetCDF4FileHandler):
area_extent = (ext["x"][1], ext["y"][1], ext["x"][0], ext["y"][0])
return (area_extent, nlines, ncols)
+
+ # hardcoded semi-minor-axis correction, to be removed later
+ _semi_minor_axis = 6356752.314
def get_area_def(self, key, info=None):
"""Calculate on-fly area definition for 0 degree geos-projection for a dataset."""
@@ -222,7 +255,8 @@ class FCIFDHSIFileHandler(NetCDF4FileHandler):
return self._cache[key.resolution]
a = float(self["data/mtg_geos_projection/attr/semi_major_axis"])
- b = float(self["data/mtg_geos_projection/attr/semi_minor_axis"])
+ # b = float(self["data/mtg_geos_projection/attr/semi_minor_axis"])
+ b = self._semi_minor_axis
h = float(self["data/mtg_geos_projection/attr/perspective_point_height"])
if_ = float(self["data/mtg_geos_projection/attr/inverse_flattening"])
lon_0 = float(self["data/mtg_geos_projection/attr/longitude_of_projection_origin"]) | Add corrected scale factors for FCI projection
The scale factors and add offsets for the MTG FCI test data as published
in <I>-<I> are factually incorrect. This probably causes the
differences between the pytroll and FCI geolocations. Until this is
fixed from EUMETSATs side, hardcode corrected add_offset and
scale_factor values.
Similarly, there was a small error in the semi minor axis.
Also correct for an off-by-one error in the scale factors. | pytroll_satpy | train |
1b6f2854cd74dc9d29fc6b01171a3289f17dda86 | diff --git a/java/src/playn/java/JavaAssets.java b/java/src/playn/java/JavaAssets.java
index <HASH>..<HASH> 100644
--- a/java/src/playn/java/JavaAssets.java
+++ b/java/src/playn/java/JavaAssets.java
@@ -146,7 +146,7 @@ public class JavaAssets extends AbstractAssets<BufferedImage> {
return recv.loadFailed(error != null ? error : new FileNotFoundException(fullPath));
}
- InputStream getAssetStream(String path) throws IOException {
+ protected InputStream getAssetStream(String path) throws IOException {
InputStream in = getClass().getClassLoader().getResourceAsStream(pathPrefix + path);
if (in == null) {
throw new FileNotFoundException(path);
@@ -154,7 +154,7 @@ public class JavaAssets extends AbstractAssets<BufferedImage> {
return in;
}
- private Sound getSound(String path, boolean music) {
+ protected Sound getSound(String path, boolean music) {
Exception err = null;
for (String suff : SUFFIXES) {
final String soundPath = path + suff;
@@ -168,7 +168,7 @@ public class JavaAssets extends AbstractAssets<BufferedImage> {
return new Sound.Error(err);
}
- private URL requireResource(String path) throws FileNotFoundException {
+ protected URL requireResource(String path) throws FileNotFoundException {
URL url = getClass().getClassLoader().getResource(path);
if (url == null) {
throw new FileNotFoundException(path);
@@ -176,7 +176,7 @@ public class JavaAssets extends AbstractAssets<BufferedImage> {
return url;
}
- private BufferedImage scaleImage(BufferedImage image, float viewImageRatio) {
+ protected BufferedImage scaleImage(BufferedImage image, float viewImageRatio) {
int swidth = MathUtil.iceil(viewImageRatio * image.getWidth());
int sheight = MathUtil.iceil(viewImageRatio * image.getHeight());
BufferedImage scaled = new BufferedImage(swidth, sheight, BufferedImage.TYPE_INT_ARGB);
@@ -186,7 +186,7 @@ public class JavaAssets extends AbstractAssets<BufferedImage> {
return scaled;
}
- private Scale assetScale() {
+ protected Scale assetScale() {
return (assetScale != null) ? assetScale : platform.graphics().ctx().scale;
} | Make access levels consistently protected on JavaAssets internals
This is in order to allow JavaAssets to be sanely extended. | threerings_playn | train |
15b3ac35f3154b9d6e160d97b707cc57e463c4b4 | diff --git a/src/node.js b/src/node.js
index <HASH>..<HASH> 100644
--- a/src/node.js
+++ b/src/node.js
@@ -82,6 +82,11 @@ var httpinvoke = function(uri, method, options, cb) {
outputHeaders = res.headers;
if('content-encoding' in outputHeaders) {
contentEncoding = outputHeaders['content-encoding'];
+ if(['identity', 'gzip', 'deflate'].indexOf(contentEncoding) < 0) {
+ cb(new Error('unsupported Content-Encoding ' + contentEncoding));
+ cb = null;
+ return ignorantlyConsume(res);
+ }
delete outputHeaders['content-encoding'];
} else {
contentEncoding = 'identity';
@@ -99,30 +104,46 @@ var httpinvoke = function(uri, method, options, cb) {
return ignorantlyConsume(res);
}
- if('content-length' in outputHeaders) {
+ if(contentEncoding === 'identity' && 'content-length' in outputHeaders) {
outputLength = Number(outputHeaders['content-length']);
}
- downloadProgressCb(0, outputLength);
+ downloadProgressCb(0, outputLength, outputBinary ? [] : '');
if(!cb) {
return ignorantlyConsume(res);
}
if(method === 'HEAD') {
ignorantlyConsume(res);
- downloadProgressCb(0, 0);
+ downloadProgressCb(0, 0, outputBinary ? [] : '');
return cb && cb(null, _undefined, status, outputHeaders);
}
+ var inputStream;
+ if(contentEncoding === 'identity') {
+ inputStream = res;
+ } else {
+ inputStream = zlib['create' + (contentEncoding === 'gzip' ? 'Gunzip' : 'InflateRaw')]();
+ res.pipe(inputStream);
+ }
+
var output = [], downloaded = 0;
- res.on('data', function(chunk) {
+ inputStream.on('data', function(chunk) {
if(!cb) {
return;
}
downloaded += chunk.length;
output.push(chunk);
- downloadProgressCb(downloaded, outputLength);
+ var partial;
+ if(options.partial) {
+ partial = Buffer.concat(output, downloaded);
+ if(!outputBinary) {
+ partial = partial.toString('utf8');
+ }
+ }
+ downloadProgressCb(downloaded, outputLength, partial);
});
- res.on('end', function() {
+ inputStream.on('error', cb);
+ inputStream.on('end', function() {
if(!cb) {
return;
}
@@ -135,7 +156,12 @@ var httpinvoke = function(uri, method, options, cb) {
return cb(new Error('network error'));
}
- downloadProgressCb(outputLength, outputLength);
+ output = Buffer.concat(output, downloaded);
+ if(!outputBinary) {
+ output = output.toString('utf8');
+ }
+
+ downloadProgressCb(outputLength, outputLength, output);
if(!cb) {
return;
}
@@ -144,22 +170,11 @@ var httpinvoke = function(uri, method, options, cb) {
return cb(null, _undefined, status, outputHeaders);
}
- decompress(Buffer.concat(output, downloaded), contentEncoding, function(err, output) {
- if(!cb) {
- return;
- }
- if(err) {
- return cb(new Error('network error'));
- }
- if(!outputBinary) {
- output = output.toString('utf8');
- }
- try {
- cb(null, outputConverter(output), status, outputHeaders);
- } catch(err) {
- cb(err);
- }
- });
+ try {
+ cb(null, outputConverter(output), status, outputHeaders);
+ } catch(err) {
+ cb(err);
+ }
});
}); | node.js: pass the tests | jakutis_httpinvoke | train |
d90bb8560b550442fc8b47182e67ca76a6df4a68 | diff --git a/scripts/jest/config.src.js b/scripts/jest/config.src.js
index <HASH>..<HASH> 100644
--- a/scripts/jest/config.src.js
+++ b/scripts/jest/config.src.js
@@ -1,4 +1,17 @@
-"use strict";
-
+// @flow
const baseConfig = require("./config.base");
-module.exports = Object.assign({}, baseConfig);
+const listPackages = require("./../utils/listPackages");
+
+// Create a module map to point React packages to the build output
+const moduleNameMapper = {};
+listPackages().forEach(name => {
+ // Root entry point - it's good, no need to change anything here.
+ // moduleNameMapper[`^${name}$`] = `...`;
+
+ // Named entry points
+ moduleNameMapper[`^${name}/(.*)$`] = `<rootDir>packages/${name}/src/$1`;
+});
+
+module.exports = Object.assign({}, baseConfig, {
+ moduleNameMapper
+}); | test(configs): folders must be properly loaded while running tests. | Webiny_webiny-js | train |
363af9ed3a87c025a6de7db32d54cfec4a44b5fb | diff --git a/src/ORM/AssociationCollection.php b/src/ORM/AssociationCollection.php
index <HASH>..<HASH> 100644
--- a/src/ORM/AssociationCollection.php
+++ b/src/ORM/AssociationCollection.php
@@ -14,11 +14,13 @@
*/
namespace Cake\ORM;
+use ArrayIterator;
use Cake\ORM\Association;
use Cake\ORM\AssociationsNormalizerTrait;
use Cake\ORM\Entity;
use Cake\ORM\Table;
use InvalidArgumentException;
+use IteratorAggregate;
/**
* A container/collection for association classes.
@@ -26,7 +28,7 @@ use InvalidArgumentException;
* Contains methods for managing associations, and
* ordering operations around saving and deleting.
*/
-class AssociationCollection
+class AssociationCollection implements IteratorAggregate
{
use AssociationsNormalizerTrait;
@@ -292,4 +294,14 @@ class AssociationCollection
return $this->_normalizeAssociations($keys);
}
+
+ /**
+ * Allow looping through the associations
+ *
+ * @return \Cake\ORM\Association
+ */
+ public function getIterator()
+ {
+ return new ArrayIterator($this->_items);
+ }
} | Allow associations to be iterated
Added an iterator to the class so that associations can be looped through | cakephp_cakephp | train |
d925847a16c1549095892c04cf92291f85935f30 | diff --git a/spec/generators/jobs/webapp_job_generator_spec.rb b/spec/generators/jobs/webapp_job_generator_spec.rb
index <HASH>..<HASH> 100644
--- a/spec/generators/jobs/webapp_job_generator_spec.rb
+++ b/spec/generators/jobs/webapp_job_generator_spec.rb
@@ -29,7 +29,7 @@ class WebappGeneratorSpec < MiniTest::Spec
job_template_exists "mywebapp", "helpers/ctl_setup.sh", "helpers/ctl_setup.sh"
job_template_exists "mywebapp", "helpers/ctl_utils.sh", "helpers/ctl_utils.sh"
- example = File.join("examples", "mywebapp_simple", "default.yml")
+ example = File.join("examples", "mywebapp.yml")
File.exist?(example).must_equal(true, "#{example} not created")
end
end | Fix spec for change to job generator | cloudfoundry-community_bosh-gen | train |
dd125a2a74ad38127c71591c74f03ac06c8d973b | diff --git a/CHANGELOG.md b/CHANGELOG.md
index <HASH>..<HASH> 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,6 +3,9 @@
2.0.2
-----
+### Added
+- Added back the ability to define custom paths in `paths.php` and reference them via `{key}`
+
### Changed
- Better way to operate around symlinks
diff --git a/src/Rocketeer/Services/Pathfinder.php b/src/Rocketeer/Services/Pathfinder.php
index <HASH>..<HASH> 100644
--- a/src/Rocketeer/Services/Pathfinder.php
+++ b/src/Rocketeer/Services/Pathfinder.php
@@ -217,6 +217,11 @@ class Pathfinder
return str_replace($base, null, $this->unifySlashes($path));
}
+ // Replace paths from configuration
+ if ($custom = $this->getPath($folder)) {
+ return $custom;
+ }
+
return false;
}, $path);
}
diff --git a/tests/Services/PathfinderTest.php b/tests/Services/PathfinderTest.php
index <HASH>..<HASH> 100644
--- a/tests/Services/PathfinderTest.php
+++ b/tests/Services/PathfinderTest.php
@@ -136,4 +136,12 @@ class PathfinderTest extends RocketeerTestCase
$storage = $this->paths->getStoragePath();
$this->assertEquals('local/folder', $storage);
}
+
+ public function testCanReplacePatternsWithPathsFile()
+ {
+ $this->swapConfig(['rocketeer::paths.foo' => 'bar']);
+ $replaced = $this->paths->replacePatterns('{foo}');
+
+ $this->assertEquals('bar', $replaced);
+ }
} | Close #<I> - Add back ability to define custom paths | rocketeers_rocketeer | train |
8edf077655b5d106eb520be1ab17550932590fb7 | diff --git a/Actions/Action/DeleteAction.php b/Actions/Action/DeleteAction.php
index <HASH>..<HASH> 100644
--- a/Actions/Action/DeleteAction.php
+++ b/Actions/Action/DeleteAction.php
@@ -50,6 +50,10 @@ class DeleteAction extends Action
try {
$dataProvider->remove($data->getId());
+
+ $response = [
+ 'success' => true,
+ ];
} catch (\Exception $e) {
$message = null;
if (is_object($data) && method_exists($data, '__toString')) {
@@ -58,6 +62,11 @@ class DeleteAction extends Action
$message = 'Error while deleting element with id "' . $id . '"';
}
+ $response = [
+ 'success' => false,
+ 'error' => $message,
+ ];
+
$flashBag = $request->getSession()->getFlashBag();
$flashBag->add('error', $message);
}
@@ -70,11 +79,8 @@ class DeleteAction extends Action
return $controller->redirect($responseHandler->getRefererUrl($controller, $request));
} else {
- return $responseHandler->getResponse($this->options['response_type'], '', '', [
- 'data' => $data,
- ]);
+ return $responseHandler->getResponse($this->options['response_type'], '', '', $response);
}
-
}
/**
diff --git a/Actions/Action/ListAction.php b/Actions/Action/ListAction.php
index <HASH>..<HASH> 100644
--- a/Actions/Action/ListAction.php
+++ b/Actions/Action/ListAction.php
@@ -56,9 +56,7 @@ class ListAction extends Action
$results = $listView->getData($listDataEvent, true);
$results = $this->parseResults($results->toArray(), $columns);
- $params = [
- 'results' => $results,
- ];
+ $params = $results;
}
$paramsEvent = new ResponseEvent($params); | Update rest response for List and Remove actions | vardius_crud-bundle | train |
5d00c8dc1d8eacf0192e76758cbe8dd920671669 | diff --git a/lib/netsuite/records/vendor_bill.rb b/lib/netsuite/records/vendor_bill.rb
index <HASH>..<HASH> 100644
--- a/lib/netsuite/records/vendor_bill.rb
+++ b/lib/netsuite/records/vendor_bill.rb
@@ -40,6 +40,14 @@ module NetSuite
rec
end
+ def self.search_class_name
+ "Transaction"
+ end
+
+ def self.search_class_namespace
+ 'tranSales'
+ end
+
end
end
end | Define search namespaces to allow searching on VendorBills
NetSuite doesn't define a VendorBillSearch. It expects you to use a
TransactionSearch instead:
<URL>,
whereas VendorBills use the `purchases.transactions` namespace.
This is an issue because the `purcahses.transactions` namespace is
required for some actions, just not search.
This commit overrides the namespaces used for the search action on
VendorBills. | NetSweet_netsuite | train |
d4287d7d0d1b61d5eeaee534c330cf3198702ae7 | diff --git a/cwltool/job.py b/cwltool/job.py
index <HASH>..<HASH> 100644
--- a/cwltool/job.py
+++ b/cwltool/job.py
@@ -72,11 +72,6 @@ class CommandLineJob(object):
runtime = [] # type: List[unicode]
- # spec currently says "HOME must be set to the designated output
- # directory." but spec might change to designated temp directory.
- # env = {"TMPDIR": self.tmpdir, "HOME": self.tmpdir} # type: Mapping[str,str]
- env = {"TMPDIR": self.tmpdir, "HOME": self.outdir} # type: Mapping[str,str]
-
(docker_req, docker_is_req) = get_feature(self, "DockerRequirement")
for knownfile in self.pathmapper.files():
@@ -139,12 +134,13 @@ class CommandLineJob(object):
env = self.environment
if not os.path.exists(self.tmpdir):
os.makedirs(self.tmpdir)
- env["TMPDIR"] = self.tmpdir
vars_to_preserve = kwargs.get("preserve_environment")
if vars_to_preserve is not None:
for key, value in os.environ.items():
if key in vars_to_preserve and key not in env:
env[key] = value
+ env["HOME"] = self.outdir
+ env["TMPDIR"] = self.tmpdir
stageFiles(self.pathmapper, os.symlink) | Set $HOME and $TMPDIR correctly for non-Docker command line tools. (#<I>) | common-workflow-language_cwltool | train |
22f90c45be6f0c62e52300e8171fbbb745039741 | diff --git a/tldap/backend/transaction.py b/tldap/backend/transaction.py
index <HASH>..<HASH> 100644
--- a/tldap/backend/transaction.py
+++ b/tldap/backend/transaction.py
@@ -490,13 +490,14 @@ class LDAPwrapper(object):
self._cache_rename_dn(dn, newdn)
cache = self._cache_get_for_dn(newdn)[1]
old_key,old_value,_ = split_dn[0][0]
- cache[old_key] = [ x for x in cache[old_key] if x.lower() != old_value.lower()]
+ if old_value in cache[old_key]:
+ cache[old_key].remove(old_value)
if len(cache[old_key]) == 0:
del cache[old_key]
new_key,new_value,_ = split_newrdn[0][0]
if new_key not in cache:
cache[new_key] = [ ]
- if new_value.lower() not in [ x.lower() for x in cache[new_key] ]:
+ if new_value not in cache[new_key]:
cache[new_key].append(new_value)
return self._process(oncommit, onrollback, onfailure)
diff --git a/tldap/base.py b/tldap/base.py
index <HASH>..<HASH> 100644
--- a/tldap/base.py
+++ b/tldap/base.py
@@ -429,8 +429,9 @@ class LDAPobject(object):
if v is None:
pass
elif isinstance(v, list):
- v = [ x for x in v if x.lower() != old_value.lower()]
- elif old_value.lower() == v.lower():
+ if old_value in v:
+ v.remove(old_value)
+ elif old_value == v:
v = None
if v == None:
del self._db_values[using][old_key]
@@ -445,9 +446,9 @@ class LDAPobject(object):
if v is None:
v = new_value
elif isinstance(v, list):
- if new_value.lower() not in [ x.lower() for x in v ]:
+ if new_value not in v:
v.append(new_value)
- elif v.lower() != new_value.lower():
+ elif v != new_value:
# we can't add a value to a string
assert False
self._db_values[using][new_key] = field.to_db(v) | Case sensitive matches should be ok. | Karaage-Cluster_python-tldap | train |
ea5c9ad8ffd3898e1fe136cc3cf371b3d15e3f97 | diff --git a/src/WorkerPool.js b/src/WorkerPool.js
index <HASH>..<HASH> 100644
--- a/src/WorkerPool.js
+++ b/src/WorkerPool.js
@@ -188,6 +188,29 @@ class PoolWorker {
});
break;
}
+ case 'loadModule': {
+ const { request, questionId } = message;
+ const { data } = this.jobs[id];
+ data.loadModule(request, (error, source, sourceMap, module) => {
+ this.writeJson({
+ type: 'result',
+ id: questionId,
+ error: error ? {
+ message: error.message,
+ details: error.details,
+ missing: error.missing,
+ } : null,
+ result: [
+ source,
+ sourceMap,
+ // TODO: Serialize module?
+ // module,
+ ],
+ });
+ });
+ finalCallback();
+ break;
+ }
case 'resolve': {
const { context, request, questionId } = message;
const { data } = this.jobs[id];
diff --git a/src/index.js b/src/index.js
index <HASH>..<HASH> 100644
--- a/src/index.js
+++ b/src/index.js
@@ -20,6 +20,7 @@ function pitch() {
sourceMap: this.sourceMap,
emitError: this.emitError,
emitWarning: this.emitWarning,
+ loadModule: this.loadModule,
resolve: this.resolve,
target: this.target,
minimize: this.minimize,
diff --git a/src/worker.js b/src/worker.js
index <HASH>..<HASH> 100644
--- a/src/worker.js
+++ b/src/worker.js
@@ -107,6 +107,17 @@ const queue = asyncQueue(({ id, data }, taskCallback) => {
readResource: fs.readFile.bind(fs),
context: {
version: 2,
+ fs,
+ loadModule: (request, callback) => {
+ callbackMap[nextQuestionId] = (error, result) => callback(error, ...result);
+ writeJson({
+ type: 'loadModule',
+ id,
+ questionId: nextQuestionId,
+ request,
+ });
+ nextQuestionId += 1;
+ },
resolve: (context, request, callback) => {
callbackMap[nextQuestionId] = callback;
writeJson({ | fix: `loadModule` and `fs` are now available in a loader context (#<I>) | webpack-contrib_thread-loader | train |
a71a21c21d498b17d84fe01e7b458ce9832b49f1 | diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,5 @@
import os
+import sys
import tempfile
import warnings
from distutils import ccompiler
@@ -18,6 +19,16 @@ except ImportError:
import numpy as np
+def get_include_dirs():
+ """Get all the include directories which may contain headers that we need to
+ compile the cython extensions."""
+ return (
+ np.get_include(),
+ os.path.join(sys.prefix, "include"),
+ os.path.join(sys.prefix, "Library", "include"),
+ )
+
+
def has_c_library(library, extension=".c"):
"""Check whether a C/C++ library is available on the system to the compiler.
@@ -44,6 +55,9 @@ def has_c_library(library, extension=".c"):
# Get a compiler instance
compiler = ccompiler.new_compiler()
+ # Add conda and numpy library include dirs
+ for inc_dir in get_include_dirs():
+ compiler.add_include_dir(inc_dir)
assert isinstance(compiler, ccompiler.CCompiler)
try:
@@ -110,10 +124,9 @@ class CythonBuildExt(build_ext):
extension.extra_compile_args.extend(compile_flags)
extension.extra_link_args.extend(link_flags)
- # We typically use numpy includes in our Cython files, so we"ll add the
- # appropriate headers here
+ # Add numpy and system include directories
for extension in self.extensions:
- extension.include_dirs.append(np.get_include())
+ extension.include_dirs.extend(get_include_dirs())
super().build_extensions()
@@ -186,6 +199,7 @@ setup(
],
packages=setuptools.find_packages(include=["openTSNE", "openTSNE.*"]),
+ python_requires=">=3.6",
install_requires=[
"numpy>1.14",
"numba>=0.38.1", | Add proper conda/pip include dirs to C compilers | pavlin-policar_openTSNE | train |
1806ea0271f8c9826044ebe9c2773657aebd1acd | diff --git a/abydos/stemmer.py b/abydos/stemmer.py
index <HASH>..<HASH> 100644
--- a/abydos/stemmer.py
+++ b/abydos/stemmer.py
@@ -840,5 +840,18 @@ def dutch(word):
if len(word[r1_start:]) >= 1 and word[-2] not in _not_s_endings:
word = word[:-1]
+ # Step 2
+ if word[-1:] == 'e':
+ if len(word[r1_start:]) >= 1 and word[-2] not in _vowels:
+ word = _undouble(word[:-1])
+
+ # Step 3a
+ if word[-4:] == 'heid':
+ if len(word[r2_start:]) >= 1 and word[-2] != 'c':
+ word = word[:-4]
+ if word[-2:] == 'en':
+ if (len(word[r1_start:]) >= 2 and
+ (word[-3] not in _vowels and word[-5:-2] != 'gem')):
+ word = _undouble(word[:-2])
return word | added Steps 2 & 3a | chrislit_abydos | train |
4707f35e7f3003a601479403bf327915edc9c1f4 | diff --git a/odl/phantom/transmission.py b/odl/phantom/transmission.py
index <HASH>..<HASH> 100644
--- a/odl/phantom/transmission.py
+++ b/odl/phantom/transmission.py
@@ -96,7 +96,7 @@ def shepp_logan_ellipsoids(ndim, modified=False):
References
----------
- .. _Shepp-Logan phantom: en.wikipedia.org/wiki/Shepp–Logan_phantom
+ .. _Shepp-Logan phantom: https://en.wikipedia.org/wiki/Shepp-Logan_phantom
"""
if ndim == 2:
ellipsoids = _shepp_logan_ellipse_2d()
@@ -148,7 +148,7 @@ def shepp_logan(space, modified=False, min_pt=None, max_pt=None):
References
----------
- .. _Shepp-Logan phantom: en.wikipedia.org/wiki/Shepp–Logan_phantom
+ .. _Shepp-Logan phantom: https://en.wikipedia.org/wiki/Shepp-Logan_phantom
"""
ellipsoids = shepp_logan_ellipsoids(space.ndim, modified)
return ellipsoid_phantom(space, ellipsoids, min_pt, max_pt) | MAINT: replace non-ASCII en-dash with usual dash | odlgroup_odl | train |
ec8797de91d3c4978cbf58452f5b126692bc53da | diff --git a/src/func.js b/src/func.js
index <HASH>..<HASH> 100644
--- a/src/func.js
+++ b/src/func.js
@@ -10,10 +10,12 @@
import {
- drop,
+ findDuplicates,
head,
- isContinuous,
+ last,
+ range,
} from "./array"
+import { isArray } from "./type"
@@ -300,23 +302,52 @@ export const pipe = (...args) => (...fs) => flow(...fs)(...args)
export const rearg = (f) => (...indices) => {
if (indices.length === 0) return f
+ if (findDuplicates(indices).length > 0) throw RangeError(
+ "func.rearg: duplicate indices are forbidden"
+ )
+
+ // index mapping "new" -> "old"
let indexPairs = indices
.map((n, o) => [n, o])
.sort(([n1], [n2]) => n1 - n2)
- if (
- !isContinuous(indexPairs, ([n1], [n2]) => n2 - n1 === 1) ||
- compose(head, head)(indexPairs) !== 0
- ) {
- throw new RangeError("Not all of the required arguments are covered.")
- }
-
return curryN(
indices.length,
- (...args) => f(...[
- ...indexPairs.map(([_, o]) => args[o]),
- ...drop(indexPairs.length)(args),
- ])
+ (...args) => {
+ let
+ // source arguments: [argument, usageCount]
+ sargs = args.map((a) => [a, 0]),
+
+ // destination arguments: [argument, usageCount]
+ dargs = range(Math.max(
+ head(last(indexPairs)) + 1,
+ args.length)
+ ).map(() => [null, 0]),
+
+ // not used source arguments
+ rest = null
+
+ // fill destination arguments with source arguments
+ // (through index mapping) and mark valid destination arguments
+ // and used source arguments
+ indexPairs.forEach(([n, o]) => {
+ dargs[n][0] = head(sargs[o])
+ dargs[n][1] += 1
+ sargs[o][1] += 1
+ })
+
+ // filter-out all used source arguments and leave only unused ones
+ rest = sargs.filter((a) => last(a) === 0).reverse()
+
+ // return function `f` invocation with valid destination arguments
+ // and not-valid ones replaced with the unused source arguments
+ return f(...dargs.map((a) => {
+ if (last(a) !== 0) return head(a)
+ let rel = rest.pop()
+ if (isArray(rel)) return head(rel)
+ return rel
+ }))
+ }
)
} | func: rearg() accepts non-continuous index sequences. | drmats_js-toolbox | train |
972b900129b91751c78fec7f952b655a4ea53330 | diff --git a/src/Eluceo/iCal/Property/ArrayValue.php b/src/Eluceo/iCal/Property/ArrayValue.php
index <HASH>..<HASH> 100644
--- a/src/Eluceo/iCal/Property/ArrayValue.php
+++ b/src/Eluceo/iCal/Property/ArrayValue.php
@@ -11,12 +11,12 @@ class ArrayValue implements ValueInterface
*/
protected $values;
- public function __construct(array $values)
+ public function __construct($values)
{
$this->values = $values;
}
- public function setValues(array $values)
+ public function setValues($values)
{
$this->values = $values; | Make implementation compatible with PHP <I> | markuspoerschke_iCal | train |
a0c7f09159ecc0024f52fd2d23d4c63d8d3e5eea | diff --git a/lib/nodes/interactive-program.js b/lib/nodes/interactive-program.js
index <HASH>..<HASH> 100644
--- a/lib/nodes/interactive-program.js
+++ b/lib/nodes/interactive-program.js
@@ -7,16 +7,23 @@ module.exports = function (node) {
this.cases = cases || [];
};
- // node.InteractiveProgram.prototype.interpret = function (context) {
- // var value = this.expression.eval(context);
- // for (var i = 0; i < this.cases.length; i++) {
- // if (this.cases[i].case.eval(context) === value) {
- // node.interpretBlock(this.cases[i].body, context);
- // break;
- // }
- // }
- // return context;
- // };
+ node.InteractiveProgram.prototype.interpret = function (context, onError) {
+ return {
+ onKey: function (key) {
+ for (var i = 0; i < this.cases.length; i++) {
+ if (this.cases[i].case.eval(context) === key) { // TODO: No estoy seguro de esto
+ try {
+ node.interpretBlock(this.cases[i].body, context);
+ break;
+ } catch (err) {
+ err.context = context;
+ onError(err);
+ }
+ }
+ }
+ }
+ };
+ };
node.InteractiveProgram.prototype.interpret = function (context) {
try { | Interpreting an interactive program will return an object with callbacks | gobstones_gs-weblang-core | train |
00089c3699f6c6d8ef0055addf137d9006495ddd | diff --git a/cellpy/readers/cellreader.py b/cellpy/readers/cellreader.py
index <HASH>..<HASH> 100644
--- a/cellpy/readers/cellreader.py
+++ b/cellpy/readers/cellreader.py
@@ -3135,10 +3135,9 @@ class CellpyData(object):
self.logger.info("If you really want to do it, use save with force=True")
return
- if not os.path.splitext(filename)[-1]:
- outfile_all = filename + "." + extension
- else:
- outfile_all = filename
+ outfile_all = Path(filename)
+ if not filename.suffix:
+ filename.rename(filename.with_suffix(f'.{extension}'))
if os.path.isfile(outfile_all):
self.logger.debug("Outfile exists")
diff --git a/cellpy/readers/instruments/arbin_sql_csv.py b/cellpy/readers/instruments/arbin_sql_csv.py
index <HASH>..<HASH> 100644
--- a/cellpy/readers/instruments/arbin_sql_csv.py
+++ b/cellpy/readers/instruments/arbin_sql_csv.py
@@ -259,9 +259,12 @@ def test_loader_from_outside():
print(t)
print("/nvoltage")
print(v)
- plt.plot(t, v)
- plt.plot(t, steps)
- plt.show()
+ # plt.plot(t, v)
+ # plt.plot(t, steps)
+ # plt.show()
+
+ outfile = datadir / "test_out"
+ c.save(outfile)
if __name__ == "__main__": | allow c.save to take pathlib.Path objects | jepegit_cellpy | train |
fbe549c088925806754ea6d0bd93ecb23850c46a | diff --git a/core/src/main/java/com/orientechnologies/orient/core/db/ODatabasePojoAbstract.java b/core/src/main/java/com/orientechnologies/orient/core/db/ODatabasePojoAbstract.java
index <HASH>..<HASH> 100644
--- a/core/src/main/java/com/orientechnologies/orient/core/db/ODatabasePojoAbstract.java
+++ b/core/src/main/java/com/orientechnologies/orient/core/db/ODatabasePojoAbstract.java
@@ -51,8 +51,7 @@ import com.orientechnologies.orient.core.tx.OTransaction;
import com.orientechnologies.orient.core.tx.OTransaction.TXTYPE;
@SuppressWarnings("unchecked")
-public abstract class ODatabasePojoAbstract<T extends Object> extends ODatabaseWrapperAbstract<ODatabaseDocumentTx> implements
- ODatabaseSchemaAware<T> {
+public abstract class ODatabasePojoAbstract<T extends Object> extends ODatabaseWrapperAbstract<ODatabaseDocumentTx> implements ODatabaseSchemaAware<T> {
protected HashMap<Integer, ODocument> objects2Records = new HashMap<Integer, ODocument>();
protected IdentityHashMap<ODocument, T> records2Objects = new IdentityHashMap<ODocument, T>();
protected HashMap<ORID, ODocument> rid2Records = new HashMap<ORID, ODocument>();
@@ -218,14 +217,19 @@ public abstract class ODatabasePojoAbstract<T extends Object> extends ODatabaseW
final List<Object> resultPojo = new ArrayList<Object>();
Object obj;
- for (ODocument doc : result) {
- // GET THE ASSOCIATED DOCUMENT
- if (doc.getClassName() == null)
- obj = doc;
- else
- obj = getUserObjectByRecord(doc, iCommand.getFetchPlan(), true);
-
- resultPojo.add(obj);
+ for (OIdentifiable doc : result) {
+ if (doc instanceof ODocument) {
+ // GET THE ASSOCIATED DOCUMENT
+ if (((ODocument) doc).getClassName() == null)
+ obj = doc;
+ else
+ obj = getUserObjectByRecord(((ODocument) doc), iCommand.getFetchPlan(), true);
+
+ resultPojo.add(obj);
+ } else {
+ resultPojo.add(doc);
+ }
+
}
return (RET) resultPojo; | Fix by Luca Molino on issue <I> | orientechnologies_orientdb | train |
3fc9fa5228f576a6f03180908da8b8ded67ca733 | diff --git a/OpenPNM/Network/tools.py b/OpenPNM/Network/tools.py
index <HASH>..<HASH> 100644
--- a/OpenPNM/Network/tools.py
+++ b/OpenPNM/Network/tools.py
@@ -7,6 +7,7 @@ Network.tools.topology: Assorted topological manipulation methods
"""
import scipy as _sp
import numpy as _np
+import scipy.ndimage as _spim
from OpenPNM.Base import logging as _logging
from OpenPNM.Base import Workspace as _workspace
logger = _logging.getLogger(__name__)
@@ -973,7 +974,7 @@ def _scale_3d_axes(ax, X, Y, Z):
ax.set_zlim(mid_z - max_range, mid_z + max_range)
-def generate_base_points(num_points, domain_size, surface='reflected'):
+def generate_base_points(num_points, domain_size, prob=None):
r"""
Generates a set of base points for passing into the DelaunayVoronoiDual
class. The points can be distributed in spherical, cylindrical, or
@@ -1012,10 +1013,16 @@ def generate_base_points(num_points, domain_size, surface='reflected'):
"""
if len(domain_size) == 1: # Spherical
domain_size = _sp.array(domain_size)
- spherical_coords = _sp.rand(num_points, 3)
- r = (spherical_coords[:, 0]**0.333)*domain_size
- theta = spherical_coords[:, 1]*(2*_sp.pi)
- phi = spherical_coords[:, 2]*(2*_sp.pi)
+ if prob is None:
+ prob = _sp.ones([41, 41, 41])
+ prob[20, 20, 20] = 0
+ prob = _spim.distance_transform_bf(prob) <= 20
+ base_pts = _try_points(num_points, prob)
+ # Convert to spherica coordinates
+ [X, Y, Z] = _sp.array(base_pts - [0.5, 0.5, 0.5]).T # Center at origin
+ r = 2*_sp.sqrt(X**2 + Y**2 + Z**2)*domain_size[0]
+ theta = 2*_sp.arctan(Y/X)
+ phi = 2*_sp.arctan(_sp.sqrt(X**2 + Y**2)/Z)
# Reflect base points across perimeter
new_r = 2*domain_size - r
r = _sp.hstack([r, new_r])
@@ -1028,10 +1035,16 @@ def generate_base_points(num_points, domain_size, surface='reflected'):
base_pts = _sp.vstack([X, Y, Z]).T
elif len(domain_size) == 2: # Cylindrical
domain_size = _sp.array(domain_size)
- cylindrical_coords = _sp.rand(num_points, 3)
- r = (cylindrical_coords[:, 0]**0.5)*domain_size[0]
- theta = cylindrical_coords[:, 1]*(2*_sp.pi)
- z = cylindrical_coords[:, 2]
+ if prob is None:
+ prob = _sp.ones([41, 41, 41])
+ prob[20, 20, :] = 0
+ prob = _spim.distance_transform_bf(prob) <= 20
+ base_pts = _try_points(num_points, prob)
+ # Convert to cylindrical coordinates
+ [X, Y, Z] = _sp.array(base_pts - [0.5, 0.5, 0]).T # Center on z-axis
+ r = 2*_sp.sqrt(X**2 + Y**2)*domain_size[0]
+ theta = 2*_sp.arctan(Y/X)
+ z = Z*domain_size[1]
# Reflect base points about faces and perimeter
new_r = 2*domain_size[0] - r
r = _sp.hstack([r, new_r])
@@ -1043,12 +1056,14 @@ def generate_base_points(num_points, domain_size, surface='reflected'):
# Convert to Cartesean coordinates
X = r*_sp.cos(theta)
Y = r*_sp.sin(theta)
- Z = z*domain_size[1]
+ Z = z
base_pts = _sp.vstack([X, Y, Z]).T
elif len(domain_size) == 3: # Rectilinear
domain_size = _sp.array(domain_size)
Nx, Ny, Nz = domain_size
- base_pts = _sp.rand(num_points, 3)
+ if prob is None:
+ prob = _sp.ones([10, 10, 10], dtype=float)
+ base_pts = _try_points(num_points, prob)
base_pts = base_pts*domain_size
# Reflect base points about all 6 faces
orig_pts = base_pts
@@ -1062,3 +1077,17 @@ def generate_base_points(num_points, domain_size, surface='reflected'):
base_pts = _sp.vstack((base_pts, [1, -1, 1]*orig_pts))
base_pts = _sp.vstack((base_pts, [1, 1, -1]*orig_pts))
return base_pts
+
+
+def _try_points(num_points, prob):
+ base_pts = []
+ N = 0
+ while N < num_points:
+ pt = _sp.random.rand(3) # Generate a point
+ # Test whether to keep it or not
+ [indx, indy, indz] = _sp.floor(pt*_sp.shape(prob)).astype(int)
+ if _sp.random.rand(1) <= prob[indx][indy][indz]:
+ base_pts.append(pt)
+ N += 1
+ base_pts = _sp.array(base_pts)
+ return base_pts | Enhanced the generate_base_points function to accept 3D images containing probabilities to create porosity distributions in the domain. | PMEAL_OpenPNM | train |
3023b0cb6ec48de6e5348feecca4460d52c06e0d | diff --git a/jquery.scrollable.js b/jquery.scrollable.js
index <HASH>..<HASH> 100644
--- a/jquery.scrollable.js
+++ b/jquery.scrollable.js
@@ -838,6 +838,9 @@
if ($vScrollbar && options.handle === 'content') {
$vScrollbar.css('pointer-events', 'none');
}
+ if ($wrapper.css('overflow') !== 'hidden' && $wrapper.css('overflow') !== 'visible') {
+ $wrapper.css('overflow', 'hidden');
+ }
function scrollToPreNormalized(x, y, duration, callback) {
refresh(); | Set overflow hidden if container was auto/scroll | misonou_jquery-scrollable | train |
Subsets and Splits