hash
stringlengths
40
40
diff
stringlengths
131
114k
message
stringlengths
7
980
project
stringlengths
5
67
split
stringclasses
1 value
74351e4791603d9e0de297777da91e470dfbf2a6
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -21,6 +21,7 @@ try: readme = f.read() except: print "Failed to load README file" + readme = "" try: import pypandoc
don't fail install if README missing
openvax_pepdata
train
e3e17a70dc0c6074d86858333194cbae9709ca25
diff --git a/builder/builder.go b/builder/builder.go index <HASH>..<HASH> 100644 --- a/builder/builder.go +++ b/builder/builder.go @@ -153,7 +153,7 @@ func BuildMapFromString(m Map, mml *mml.MML, style string) error { for _, l := range mml.Layers { rules := carto.MSS().LayerRules(l.ID, l.Classes...) - if len(rules) > 0 && l.Active { + if len(rules) > 0 { m.AddLayer(l, rules) } } diff --git a/builder/builder_test.go b/builder/builder_test.go index <HASH>..<HASH> 100644 --- a/builder/builder_test.go +++ b/builder/builder_test.go @@ -114,8 +114,7 @@ func TestBuilMapFromString(t *testing.T) { if err := BuildMapFromString(&m, mml, string(mss)); err != nil { t.Fatal(err) } - // inactive layer is not included - if len(m.layers) != 1 { + if len(m.layers) != 2 { t.Fatal(m.layers) } }
add inactive layers in BuildMapFromString layers are still inactive in the output style
omniscale_magnacarto
train
5876c0b3aae941558c592971e2bc8006566e6415
diff --git a/src/models/BrowserStyleSheet.js b/src/models/BrowserStyleSheet.js index <HASH>..<HASH> 100644 --- a/src/models/BrowserStyleSheet.js +++ b/src/models/BrowserStyleSheet.js @@ -32,6 +32,23 @@ const USE_SPEEDY = IS_BROWSER && !IS_DEV export const COMPONENTS_PER_TAG = 40 +// Source: https://github.com/threepointone/glamor/blob/master/src/sheet.js#L32-L43 +const sheetForTag = (tag: HTMLStyleElement): ?CSSStyleSheet => { + if (tag.sheet) { + // $FlowFixMe + return tag.sheet + } + + for (let i = 0; i < document.styleSheets.length; i += 1) { + if (document.styleSheets[i].ownerNode === tag) { + // $FlowFixMe + return document.styleSheets[i] + } + } + + return undefined +} + class BrowserTag implements Tag { isLocal: boolean components: { [string]: Object } @@ -77,18 +94,22 @@ class BrowserTag implements Tag { } speedyInsert(el: HTMLStyleElement, cssRules: Array<string>) { - const sheet = el.sheet - if (sheet === null || sheet === undefined) { + const sheet = sheetForTag(el) + if (sheet === undefined) { return } for (let i = 0; i < cssRules.length; i += 1) { const rule = cssRules[i] - if (rule !== undefined && rule.length) { - /* eslint-disable */ - // $FlowFixMe Flow's `StyleSheet` breakdown here https://github.com/facebook/flow/issues/2696 - sheet.insertRule(rule, sheet.cssRules.length) - /* eslint-enable */ + if (rule !== undefined && rule.length > 0) { + try { + // $FlowFixMe Flow's `StyleSheet` breakdown here https://github.com/facebook/flow/issues/2696 + sheet.insertRule(rule, sheet.cssRules.length) + } catch (e) { + if (process.env.NODE_ENV === 'development') { + console.error('Tried to insert illegal rule:', rule) + } + } } } }
Add sheetForTag and try-catch around insertRule - Fix known Firefox quirk - Add try-catch around error-prone insertRule
styled-components_styled-components
train
69a0e9680475fbbca0302b0a4b1f23420a429607
diff --git a/addons/storyshots/storyshots-core/src/frameworks/rn/loader.js b/addons/storyshots/storyshots-core/src/frameworks/rn/loader.js index <HASH>..<HASH> 100644 --- a/addons/storyshots/storyshots-core/src/frameworks/rn/loader.js +++ b/addons/storyshots/storyshots-core/src/frameworks/rn/loader.js @@ -10,7 +10,7 @@ function test(options) { } function configure(options, storybook) { - const { configPath = 'storybook', config } = options; + const { configPath = '.storybook', config } = options; if (config && typeof config === 'function') { config(storybook);
storyshots should also relate to the .storybook dir
storybooks_storybook
train
ad947727cdeb6cffd34ec1cd981a793230004635
diff --git a/astrobase/lcproc.py b/astrobase/lcproc.py index <HASH>..<HASH> 100644 --- a/astrobase/lcproc.py +++ b/astrobase/lcproc.py @@ -604,8 +604,8 @@ def stetson_threshold(featuresdir, for magcol in magcols[1:]: allobjects['overallthreshold'] = ( - np.intersect(allobjects['overallthreshold'], - allobjects[magcol]['thresholdobjects']) + np.intersect1d(allobjects['overallthreshold'], + allobjects[magcol]['thresholdobjects']) ) LOGINFO('objects above stetson threshold across all magcols: %s' %
lcproc: also get objects above stetson threshold across all magcols
waqasbhatti_astrobase
train
18c8a9128edbbe6bc8ec36060addaad01c46e960
diff --git a/src/TwoFactorAuth/TwoFactorAuthHelper.php b/src/TwoFactorAuth/TwoFactorAuthHelper.php index <HASH>..<HASH> 100644 --- a/src/TwoFactorAuth/TwoFactorAuthHelper.php +++ b/src/TwoFactorAuth/TwoFactorAuthHelper.php @@ -35,6 +35,7 @@ use Lasallecms\Helpers\TwoFactorAuth\SendMessagesViaTwilio; // Laravel facades use Illuminate\Support\Facades\Config; +use Illuminate\Support\Facades\Cookie; use Illuminate\Support\Facades\DB; // Laravel classes @@ -43,6 +44,10 @@ use Illuminate\Http\Request; // Third party classes use Carbon\Carbon; +/** + * Class TwoFactorAuthHelper + * @package Lasallecms\Helpers\TwoFactorAuth + */ class TwoFactorAuthHelper { /** @@ -392,6 +397,65 @@ class TwoFactorAuthHelper /*********************************************************************************/ + /* 2FA COOKIES */ + /*********************************************************************************/ + + /** + * Set (make) the 2FA cookie + * + * @param Illuminate\Http\Response $response + * @return Illuminate\Http\Response + */ + public function setCookie($response) { + + if (!config('auth.auth_2fa_cookie_enable')) { + return false; + } + + + if (!Cookie::has('successful_login')) { + + // what is the cookie's lifetime, in minutes + + $numberOfDays = config('auth.auth_2fa_cookie_lifetime_days'); + + // the max number of days is 30, and the min is 1 + if ($numberOfDays > 30) { + $numberOfDays = 30; + } + if ($numberOfDays < 1) { + $numberOfDays = 1; + } + + // convert days to minutes -- there are 1,440 minutes in a day + $numberOfMinutes = $numberOfDays * 1440; + + return $response->withCookie(cookie('successful_login', Carbon::now(), $numberOfMinutes)); + } + + return $response; + } + + /** + * Does the 2FA cookie exist? + * + * @return bool + */ + public function isCookieExists() { + + //if (Cookie::has('successful_login')) { + if (Cookie::has('successful_login')) { + + return true; + } + + return false; + } + + + + + /*********************************************************************************/ /* OTHER 2FA HELPER METHODS */ /*********************************************************************************/
Add helper methods for 2FA cookie feature. #<I>
lasallecms_lasallecms-l5-helpers-pkg
train
f9f2ba49294ef98bb0f41398ebc806fef5f93abb
diff --git a/optlang/cplex_interface.py b/optlang/cplex_interface.py index <HASH>..<HASH> 100644 --- a/optlang/cplex_interface.py +++ b/optlang/cplex_interface.py @@ -403,6 +403,10 @@ class Model(interface.Model): sense = 'G' rhs = float(constraint.lb) range_value = 0. + elif constraint.lb == constraint.ub: + sense = 'E' + rhs = float(constraint.lb) + range_value = 0. else: sense = 'R' rhs = float(constraint.lb)
cplex_interface understands equality constraints now
biosustain_optlang
train
8c88e5af00310a7a62b15530502002fdd67b98ce
diff --git a/src/Wrapper/FixerWrapper/MethodWrapper.php b/src/Wrapper/FixerWrapper/MethodWrapper.php index <HASH>..<HASH> 100644 --- a/src/Wrapper/FixerWrapper/MethodWrapper.php +++ b/src/Wrapper/FixerWrapper/MethodWrapper.php @@ -109,7 +109,7 @@ final class MethodWrapper if ($returnTypeAnalysis) { $returnTypeInString = $returnTypeAnalysis->getName(); - // for docblocks print: "?Type" => "null|Type" + // for docblocks render: "?Type" => "null|Type" return str_replace('?', 'null|', $returnTypeInString); }
[PackageBuilder] merge PrivatesGetter and PrivatesSetter to PrivatesAccessor
Symplify_TokenRunner
train
0bff591bb0bc5a11ec22eb4f0b6104a79dea0819
diff --git a/distribution/pull_v2.go b/distribution/pull_v2.go index <HASH>..<HASH> 100644 --- a/distribution/pull_v2.go +++ b/distribution/pull_v2.go @@ -334,18 +334,18 @@ func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdat manifest distribution.Manifest tagOrDigest string // Used for logging/progress only ) - if tagged, isTagged := ref.(reference.NamedTagged); isTagged { - manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag())) - if err != nil { - return false, allowV1Fallback(err) - } - tagOrDigest = tagged.Tag() - } else if digested, isDigested := ref.(reference.Canonical); isDigested { + if digested, isDigested := ref.(reference.Canonical); isDigested { manifest, err = manSvc.Get(ctx, digested.Digest()) if err != nil { return false, err } tagOrDigest = digested.Digest().String() + } else if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag())) + if err != nil { + return false, allowV1Fallback(err) + } + tagOrDigest = tagged.Tag() } else { return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", reference.FamiliarString(ref)) }
Prefer digest over tag on pull If a reference passed to the pull code contains both a tag and a digest, currently the tag is used instead of the digest in the request to the registry. This is the wrong behavior. Change it to favor the digest.
moby_moby
train
834c0d1cf7c13cedeebddbb1a71b41a17f7c297b
diff --git a/src/senaite/core/z3cform/widgets/address.py b/src/senaite/core/z3cform/widgets/address.py index <HASH>..<HASH> 100644 --- a/src/senaite/core/z3cform/widgets/address.py +++ b/src/senaite/core/z3cform/widgets/address.py @@ -22,9 +22,11 @@ import json import six from bika.lims import api from bika.lims import senaiteMessageFactory as _ +from bika.lims.decorators import returns_json from Products.CMFPlone.utils import safe_unicode from Products.Five.browser import BrowserView from senaite.core.api import geo +from senaite.core.decorators import readonly_transaction from senaite.core.interfaces import ISenaiteFormLayer from senaite.core.schema.addressfield import BILLING_ADDRESS from senaite.core.schema.addressfield import BUSINESS_ADDRESS @@ -277,6 +279,8 @@ class AjaxSubdivisions(BrowserView): """Endpoint for the retrieval of geographic subdivisions """ + @readonly_transaction + @returns_json def __call__(self): """Returns a json with the list of geographic subdivisions that are immediately below the country or subdivision passed in with `parent` @@ -297,8 +301,7 @@ class AjaxSubdivisions(BrowserView): "code": subdivision.code, "type": self.context.translate(_(subdivision.type)), } - items = map(to_dict, items) - return json.dumps(items) + return [to_dict(item) for item in items] def get_parent(self): """Returns the parent passed through the request
Add readonly_transaction decorator to geo_subdivisions endpoint (#<I>) * Add readonly_transaction decorator to geo_subdivisions endpoint * Simplify * Use comprehension instead of map
senaite_senaite.core
train
38fe9baef9865bf5bc7a226aa2c04b636c5336dd
diff --git a/src/accounts/models.py b/src/accounts/models.py index <HASH>..<HASH> 100644 --- a/src/accounts/models.py +++ b/src/accounts/models.py @@ -21,6 +21,22 @@ class User(AbstractUser): verification_hash = models.CharField(max_length=32, null=True, blank=True) modified = models.DateTimeField(auto_now=True) + def check_password(self, raw_password): + """ + Returns a boolean of whether the raw_password was correct. + """ + if not raw_password or not self.has_usable_password(): + return False + return self.password == raw_password + + def has_usable_password(self): + if not self.password: + return False + return True + + def set_password(self, raw_password): + self.password = unicode(raw_password) + def get_absolute_url(self): return reverse('user_profile', kwargs={'username': self.username}) diff --git a/src/accounts/views.py b/src/accounts/views.py index <HASH>..<HASH> 100644 --- a/src/accounts/views.py +++ b/src/accounts/views.py @@ -200,7 +200,7 @@ class ChangeXMPPPasswordView(UpdateView): changed = xmpp.change_password( self.object.jid, self.old_password, - form.cleaned_data['password2'] + form.cleaned_data['password1'] ) if not changed: @@ -211,6 +211,8 @@ class ChangeXMPPPasswordView(UpdateView): transaction.rollback() return response else: + self.request.user.set_password(form.cleaned_data['password1']) + self.request.user.save() transaction.commit() messages.success(
Adding missing functionality from #<I>
colab_colab
train
e8b0ea5db8ddd2e0a97ef2edb8e1263c9042109a
diff --git a/art/art_param.py b/art/art_param.py index <HASH>..<HASH> 100644 --- a/art/art_param.py +++ b/art/art_param.py @@ -1354,7 +1354,7 @@ FONT_MAP = {"block": [block_dic, True], "banner": [banner_dic, False], # pragma "fancy93": [fancy93_dic, False], "fancy94": [fancy94_dic, False], "fancy95": [fancy95_dic, False], - "smooth1": [smooth_dic, False], + "smooth1": [smooth1_dic, False], "fancy96": [fancy96_dic, False], "fancy97": [fancy97_dic, False], "fancy98": [fancy98_dic, False],
fix : typo fixed. (smooth_dic --> smooth1_dic)
sepandhaghighi_art
train
76d0bc91f4f5c52af98763a706a2219ff75cccbe
diff --git a/public/js/editors/libraries.js b/public/js/editors/libraries.js index <HASH>..<HASH> 100644 --- a/public/js/editors/libraries.js +++ b/public/js/editors/libraries.js @@ -227,40 +227,22 @@ var libraries = [ }, { 'url': [ - 'https://da7xgjtj801h2.cloudfront.net/2014.2.716/styles/kendo.common.min.css', - 'https://da7xgjtj801h2.cloudfront.net/2014.2.716/styles/kendo.default.min.css', - 'https://code.jquery.com/jquery-1.9.1.min.js', - 'https://da7xgjtj801h2.cloudfront.net/2014.2.716/js/kendo.ui.core.min.js' - ], - 'label': 'Kendo UI Core 2014.Q2', - 'group': 'Kendo UI' - }, - { - 'url': [ - 'https://da7xgjtj801h2.cloudfront.net/2014.2.716/styles/kendo.common.min.css', - 'https://da7xgjtj801h2.cloudfront.net/2014.2.716/styles/kendo.rtl.min.css', - 'https://da7xgjtj801h2.cloudfront.net/2014.2.716/styles/kendo.default.min.css', - 'https://da7xgjtj801h2.cloudfront.net/2014.2.716/styles/kendo.dataviz.min.css', - 'https://da7xgjtj801h2.cloudfront.net/2014.2.716/styles/kendo.dataviz.default.min.css', - 'https://da7xgjtj801h2.cloudfront.net/2014.2.716/styles/kendo.mobile.all.min.css', - 'https://code.jquery.com/jquery-1.9.1.min.js', - 'https://da7xgjtj801h2.cloudfront.net/2014.2.716/js/kendo.all.min.js' + 'https://da7xgjtj801h2.cloudfront.net/2015.2.624/styles/kendo.common.min.css', + 'https://da7xgjtj801h2.cloudfront.net/2015.2.624/styles/kendo.silver.min.css', + 'https://code.jquery.com/jquery-2.1.4.min.js', + 'https://da7xgjtj801h2.cloudfront.net/2015.2.624/js/kendo.ui.core.min.js' ], - 'label': 'Kendo UI 2014.Q2', + 'label': 'Kendo UI Core 2015.2.624', 'group': 'Kendo UI' }, { 'url': [ - 'https://da7xgjtj801h2.cloudfront.net/2014.1.318/styles/kendo.common.min.css', - 'https://da7xgjtj801h2.cloudfront.net/2014.1.318/styles/kendo.rtl.min.css', - 'https://da7xgjtj801h2.cloudfront.net/2014.1.318/styles/kendo.default.min.css', - 'https://da7xgjtj801h2.cloudfront.net/2014.1.318/styles/kendo.dataviz.min.css', - 'https://da7xgjtj801h2.cloudfront.net/2014.1.318/styles/kendo.dataviz.default.min.css', - 'https://da7xgjtj801h2.cloudfront.net/2014.1.318/styles/kendo.mobile.all.min.css', + 'https://da7xgjtj801h2.cloudfront.net/2014.3.1411/styles/kendo.common.min.css', + 'https://da7xgjtj801h2.cloudfront.net/2014.3.1411/styles/kendo.default.min.css', 'https://code.jquery.com/jquery-1.9.1.min.js', - 'https://da7xgjtj801h2.cloudfront.net/2014.1.318/js/kendo.all.min.js' + 'https://da7xgjtj801h2.cloudfront.net/2014.3.1411/js/kendo.ui.core.min.js' ], - 'label': 'Kendo UI 2014.Q1', + 'label': 'Kendo UI Core 2014.3.1411', 'group': 'Kendo UI' }, {
Clean up and Update Kendo UI
jsbin_jsbin
train
930e9185edde0d46f91884fd791a0c2f7b528c83
diff --git a/spec/integration/support/application.rb b/spec/integration/support/application.rb index <HASH>..<HASH> 100644 --- a/spec/integration/support/application.rb +++ b/spec/integration/support/application.rb @@ -1,3 +1,4 @@ +require 'rack' require "rack/builder" class IntegrationServer
Unable to execute tests without requiring 'rack'
savonrb_httpi
train
dfaf93fec87de1accb25879b2ad2635cde1f4a34
diff --git a/rb/lib/selenium/webdriver/remote/http/default.rb b/rb/lib/selenium/webdriver/remote/http/default.rb index <HASH>..<HASH> 100644 --- a/rb/lib/selenium/webdriver/remote/http/default.rb +++ b/rb/lib/selenium/webdriver/remote/http/default.rb @@ -33,18 +33,26 @@ module Selenium ) end + MAX_RETRIES = 3 + def request(verb, url, headers, payload, redirects = 0) request = new_request_for(verb, url, headers, payload) - retried = false + retries = 0 begin response = response_for(request) - rescue Errno::ECONNABORTED, Errno::ECONNRESET - # this happens sometimes on windows?! - raise if retried + rescue Errno::ECONNABORTED, Errno::ECONNRESET, Errno::EADDRINUSE + # a retry is sometimes needed on Windows XP where we may quickly + # run out of ephemeral ports + # + # A more robust solution is bumping the MaxUserPort setting + # as described here: + # + # http://msdn.microsoft.com/en-us/library/aa560610%28v=bts.20%29.aspx + raise if retries >= MAX_RETRIES request = new_request_for(verb, url, headers, payload) - retried = true + retries += 1 retry end
JariBakken: Rescue and retry EADDRINUSE to work around connect(2) errors on Windows. This is related to ephemeral ports usage and can also be fixed by tweaking the MaxUserPort setting as described here: <URL>
SeleniumHQ_selenium
train
bfa9ffff3cf3681c3fd6920faa236ca6e5418ddb
diff --git a/superset/views/core.py b/superset/views/core.py index <HASH>..<HASH> 100755 --- a/superset/views/core.py +++ b/superset/views/core.py @@ -2499,7 +2499,7 @@ class Superset(BaseSupersetView): @log_this def sql_json(self): """Runs arbitrary sql and returns and json""" - async = request.form.get('runAsync') == 'true' + async_ = request.form.get('runAsync') == 'true' sql = request.form.get('sql') database_id = request.form.get('database_id') schema = request.form.get('schema') or None @@ -2535,7 +2535,7 @@ class Superset(BaseSupersetView): select_as_cta=request.form.get('select_as_cta') == 'true', start_time=utils.now_as_float(), tab_name=request.form.get('tab'), - status=QueryStatus.PENDING if async else QueryStatus.RUNNING, + status=QueryStatus.PENDING if async_ else QueryStatus.RUNNING, sql_editor_id=request.form.get('sql_editor_id'), tmp_table_name=tmp_table_name, user_id=int(g.user.get_id()), @@ -2560,7 +2560,7 @@ class Superset(BaseSupersetView): 'Template rendering failed: {}'.format(utils.error_msg_from_exception(e))) # Async request. - if async: + if async_: logging.info('Running query on a Celery worker') # Ignore the celery future object and the request may time out. try:
Update core.py (#<I>) With Python <I>, 'async' and 'await' become reserved words, which cause invalid syntax error when running core.py. Renamed 'async' var to 'async_'
apache_incubator-superset
train
f3f34c8ce45a56302c82ae564a0f915d9bdc9592
diff --git a/routes/api.js b/routes/api.js index <HASH>..<HASH> 100644 --- a/routes/api.js +++ b/routes/api.js @@ -54,8 +54,6 @@ router.post('/api/:conn/:db/:coll/:page', function (req, res, next){ } } - console.log(skip, limit); - mongo_db.collection(req.params.coll).find(query_obj, {skip: skip, limit: limit}).toArray(function (err, result){ if(err){ console.error(err); diff --git a/routes/config.js b/routes/config.js index <HASH>..<HASH> 100644 --- a/routes/config.js +++ b/routes/config.js @@ -1,6 +1,5 @@ var express = require('express'); var router = express.Router(); -var _ = require('lodash'); var common = require('./common'); // runs on all routes and checks password if one is setup @@ -15,8 +14,6 @@ router.post('/config/add_config', function (req, res, next){ var connPool = require('../connections'); var connection_list = req.nconf.connections.get('connections'); - - // check if name already exists if(connection_list !== undefined){ if(connection_list[req.body[0]] !== undefined){ @@ -27,12 +24,12 @@ router.post('/config/add_config', function (req, res, next){ // try parse uri string. If pass, add, else throw an error try{ - uri = MongoURI.parse(req.body[1]); + MongoURI.parse(req.body[1]); var options = {}; try{ options = JSON.parse(req.body[2]); }catch(err){ - res.status(400).json({'msg': req.i18n.__('Error in connection options') + ': ' + er}); + res.status(400).json({'msg': req.i18n.__('Error in connection options') + ': ' + err}); return; } @@ -70,7 +67,7 @@ router.post('/config/update_config', function (req, res, next){ // try parse uri string. If pass, add, else throw an error try{ - uri = MongoURI.parse(req.body.conn_string); + MongoURI.parse(req.body.conn_string); // var get current options var current_options = nconf.store.connections[req.body.curr_config].connection_options; @@ -99,7 +96,7 @@ router.post('/config/update_config', function (req, res, next){ } }); }catch(err){ - console.error('Config error2: ' + err); + console.error('Config error: ' + err); res.status(400).json({'msg': req.i18n.__('Config error') + ': ' + err}); } }); diff --git a/routes/index.js b/routes/index.js index <HASH>..<HASH> 100644 --- a/routes/index.js +++ b/routes/index.js @@ -1,7 +1,6 @@ var express = require('express'); var router = express.Router(); var _ = require('lodash'); -var path = require('path'); var common = require('./common'); // runs on all routes and checks password if one is setup @@ -92,8 +91,6 @@ router.get('/app/connection_list', function (req, res, next){ // Show server monitoring router.get('/app/monitoring/:conn/', function (req, res, next){ - var connection_list = req.app.locals.dbConnections; - var monitoringMessage = ''; var monitoringRequired = true; if(req.nconf.app.get('app:monitoring') === false){ diff --git a/routes/users.js b/routes/users.js index <HASH>..<HASH> 100644 --- a/routes/users.js +++ b/routes/users.js @@ -1,6 +1,5 @@ var express = require('express'); var router = express.Router(); -var _ = require('lodash'); var common = require('./common'); // runs on all routes and checks password if one is setup diff --git a/tests/tests.js b/tests/tests.js index <HASH>..<HASH> 100644 --- a/tests/tests.js +++ b/tests/tests.js @@ -1,15 +1,16 @@ +/* global it, describe, toEJSON */ + var request = require('supertest'); var assert = require('chai').assert; var jsdom = require('mocha-jsdom'); var fs = require('fs'); -var colors = require('colors'); var conn_name = 'TestConnection'; var db_name = 'NewTestDB'; var coll_name = 'NewTestCollection'; var user_name = 'TestNewUser'; -app = require('../app'); +const app = require('../app'); var agent = request.agent(app); describe('Add connection, database and collection', function(){ @@ -66,16 +67,12 @@ describe('User tests', function(){ if(process.version.substring(1, 2) >= 4){ describe('Document tests', function(){ var oid_doc_id = ''; - var $; + var doc_id; jsdom({ src: fs.readFileSync('./public/js/toEJSON.js', 'utf-8') }); - before(function (){ - $ = require('jquery'); - }); - it('Add a document', function(done){ var dte = new Date(); var json = '{"NewTestDocument":"Test Data","NewTestDateToday": ISODate("' + dte.toISOString() + '"),"NewTestDate5Days": ISODate("' + new Date(dte.setDate(dte.getDate() - 5)).toISOString() + '")}';
Some eslint-ing and cleaning up code.
mrvautin_adminMongo
train
c4a327a47bd2eb8c4f8b51abdb1717080ec9a142
diff --git a/src/logger.js b/src/logger.js index <HASH>..<HASH> 100644 --- a/src/logger.js +++ b/src/logger.js @@ -1,5 +1,5 @@ /*! - * js-logger 0.9.0 - http://github.com/jonnyreeves/js-logger + * js-logger - http://github.com/jonnyreeves/js-logger * Jonny Reeves, http://jonnyreeves.co.uk/ * js-logger may be freely distributed under the MIT license. */
Version number got out of sync in the header; removed it as it's redundant.
jonnyreeves_js-logger
train
46aadc83a7fdb3abad3ab4b6a75c7b8e6bb9678e
diff --git a/lib/arbetsformedlingen/api/results/soklista_result.rb b/lib/arbetsformedlingen/api/results/soklista_result.rb index <HASH>..<HASH> 100644 --- a/lib/arbetsformedlingen/api/results/soklista_result.rb +++ b/lib/arbetsformedlingen/api/results/soklista_result.rb @@ -3,15 +3,15 @@ require 'arbetsformedlingen/api/values/soklista_values' module Arbetsformedlingen module API module SoklistaResult - def self.build(response_data) - data = response_data.fetch('soklista') + def self.build(response_data, list_name: nil) + data = response_data.fetch('soklista', {}) Values::SoklistaPage.new( - list_name: data.fetch('listnamn'), - total_ads: data.fetch('totalt_antal_platsannonser'), - total_vacancies: data.fetch('totalt_antal_ledigajobb'), + list_name: data.fetch('listnamn', list_name), + total_ads: data.fetch('totalt_antal_platsannonser', 0), + total_vacancies: data.fetch('totalt_antal_ledigajobb', 0), raw_data: response_data, - data: data.fetch('sokdata').map do |result| + data: data.fetch('sokdata', []).map do |result| Values::SoklistaResult.new( id: result.fetch('id'), name: result.fetch('namn'), diff --git a/spec/arbetsformedlingen/api/client_spec.rb b/spec/arbetsformedlingen/api/client_spec.rb index <HASH>..<HASH> 100644 --- a/spec/arbetsformedlingen/api/client_spec.rb +++ b/spec/arbetsformedlingen/api/client_spec.rb @@ -312,6 +312,21 @@ RSpec.describe Arbetsformedlingen::API::Client do end end + describe '#occupation', vcr: true do + it 'returns no occupation if unknown' do + client = described_class.new + + occupations = client.occupation(name: '{{}}}}') + + expect(occupations.list_name).to be_nil + expect(occupations.raw_data).to be_a(Hash) + expect(occupations.total_ads).to be_zero + expect(occupations.total_vacancies).to be_zero + expect(occupations.data).to be_a(Array) + expect(occupations.data).to be_empty + end + end + describe '#occupations', vcr: true do it 'returns a list of occupations' do client = described_class.new
Don't crash when no results are returned from API
buren_arbetsformedlingen
train
e2be5cd4a94957fc723b90648658f806a046d29b
diff --git a/tests/tests_event.py b/tests/tests_event.py index <HASH>..<HASH> 100755 --- a/tests/tests_event.py +++ b/tests/tests_event.py @@ -7,14 +7,23 @@ # # Python DB-API 2.0 module for Firebird. ############################################################################## -import os,sys +import os +import sys +import thread +import time sys.path.append('./../') from firebirdsql import * +if sys.platform in ('win32', 'darwin'): + fbase = os.path.abspath('.') + '/test' +else: + import tempfile + fbase = tempfile.mktemp() TEST_HOST = 'localhost' TEST_PORT = 3050 -TEST_DATABASE = '/tmp/test_event.fdb' +TEST_DATABASE = fbase + '.fdb' TEST_DSN = TEST_HOST + '/' + str(TEST_PORT) + ':' + TEST_DATABASE +print('dsn=', TEST_DSN) TEST_USER = 'sysdba' TEST_PASS = 'masterkey' @@ -48,14 +57,10 @@ def producer(): conn.commit() if __name__ == '__main__': - if len(sys.argv) < 2: - print(sys.argv[0] + ' create|handler|producer') - sys.exit() - if sys.argv[1] == 'create': - create() - elif sys.argv[1] == 'handler': + create() + pid = os.fork() + if pid == 0: handler() - elif sys.argv[1] == 'producer': - producer() else: - print('Bad argument') + time.sleep(1) + producer()
rewrite test for event support.
nakagami_pyfirebirdsql
train
1d76fb4536833c6b66166116f9e3b9fa021eb66f
diff --git a/lib/model/core.rb b/lib/model/core.rb index <HASH>..<HASH> 100644 --- a/lib/model/core.rb +++ b/lib/model/core.rb @@ -52,7 +52,7 @@ module Model # get a hash that we can modify !! opts[:localize] ? hash = self.to_hash : hash = self.as_document - self.class.normalize(Marshal.load(Marshal.dump(hash)), opts) + self.class.normalize(Hash[hash], opts) end def generate_md5 diff --git a/lib/patches/tire/results/item.rb b/lib/patches/tire/results/item.rb index <HASH>..<HASH> 100644 --- a/lib/patches/tire/results/item.rb +++ b/lib/patches/tire/results/item.rb @@ -23,7 +23,7 @@ module Tire end end - self.class.normalize(Marshal.load(Marshal.dump(hash)), opts) + self.class.normalize(Hash[hash], opts) end def lookup(object)
Remove Marshal/unmarshal for cloning hashes
ladder_ladder
train
79fdd8b341437287549ace1f413bd495e6db8d3e
diff --git a/cake/tests/cases/libs/model/datasources/dbo/dbo_mysql.test.php b/cake/tests/cases/libs/model/datasources/dbo/dbo_mysql.test.php index <HASH>..<HASH> 100644 --- a/cake/tests/cases/libs/model/datasources/dbo/dbo_mysql.test.php +++ b/cake/tests/cases/libs/model/datasources/dbo/dbo_mysql.test.php @@ -153,6 +153,7 @@ class MysqlTestModel extends Model { * @subpackage cake.tests.cases.libs.model.datasources.dbo */ class DboMysqlTest extends CakeTestCase { + var $fixtures = array('core.binary_test'); /** * The Dbo instance to be tested * @@ -176,7 +177,6 @@ class DboMysqlTest extends CakeTestCase { */ function setUp() { $db = ConnectionManager::getDataSource('test_suite'); - $this->db = new DboMysqlTestDb($db->config); $this->model = new MysqlTestModel(); } /** @@ -185,7 +185,8 @@ class DboMysqlTest extends CakeTestCase { * @access public */ function tearDown() { - unset($this->db); + unset($this->model); + ClassRegistry::flush(); } /** * startCase @@ -269,7 +270,7 @@ class DboMysqlTest extends CakeTestCase { * @return void */ function testTinyintCasting() { - $this->db->cacheSources = $this->db->testing = false; + $this->db->cacheSources = false; $this->db->query('CREATE TABLE ' . $this->db->fullTableName('tinyint') . ' (id int(11) AUTO_INCREMENT, bool tinyint(1), small_int tinyint(2), primary key(id));'); $this->model = new CakeTestModel(array( @@ -307,7 +308,7 @@ class DboMysqlTest extends CakeTestCase { * @access public */ function testIndexDetection() { - $this->db->cacheSources = $this->db->testing = false; + $this->db->cacheSources = false; $name = $this->db->fullTableName('simple'); $this->db->query('CREATE TABLE ' . $name . ' (id int(11) AUTO_INCREMENT, bool tinyint(1), small_int tinyint(2), primary key(id));'); @@ -510,7 +511,7 @@ class DboMysqlTest extends CakeTestCase { */ function testAlterSchemaIndexes() { App::import('Core', 'Schema'); - $this->db->cacheSources = $this->db->testing = false; + $this->db->cacheSources = false; $schema1 =& new CakeSchema(array( 'name' => 'AlterTest1', @@ -574,5 +575,23 @@ class DboMysqlTest extends CakeTestCase { $this->db->query($this->db->dropSchema($schema1)); } +/** + * test saving and retrieval of blobs + * + * @return void + **/ + function testBlobSaving() { + $this->db->cacheSources = false; + $data = "GIF87ab
Adding test to ensure blob integrity. Disproves #<I>
cakephp_cakephp
train
f6185cfdd5c8095c00976b6a158c0472d9f6a8ff
diff --git a/datadog_checks_base/datadog_checks/base/checks/base.py b/datadog_checks_base/datadog_checks/base/checks/base.py index <HASH>..<HASH> 100644 --- a/datadog_checks_base/datadog_checks/base/checks/base.py +++ b/datadog_checks_base/datadog_checks/base/checks/base.py @@ -375,10 +375,11 @@ class AgentCheck(object): self._log_deprecation('in_developer_mode') return False - def load_configuration_models(self): - # 'datadog_checks.<PACKAGE>.<MODULE>...' - module_parts = self.__module__.split('.') - package_path = '{}.config_models'.format('.'.join(module_parts[:2])) + def load_configuration_models(self, package_path=None): + if package_path is None: + # 'datadog_checks.<PACKAGE>.<MODULE>...' + module_parts = self.__module__.split('.') + package_path = '{}.config_models'.format('.'.join(module_parts[:2])) if self._config_model_shared is None: raw_shared_config = self._get_config_model_initialization_data()
Allow the loading of arbitrary configuration models (#<I>)
DataDog_integrations-core
train
189a6f8b8f21919353e6f650eea2e8fc59e623a2
diff --git a/lib/puppet/pops.rb b/lib/puppet/pops.rb index <HASH>..<HASH> 100644 --- a/lib/puppet/pops.rb +++ b/lib/puppet/pops.rb @@ -10,7 +10,7 @@ module Puppet # # @api public module Pops - EMPTY_HASH = {}.freeze + EMPTY_HASH = {}.freeze EMPTY_ARRAY = [].freeze EMPTY_STRING = ''.freeze
(maint) Fix indentation
puppetlabs_puppet
train
b0b4f45d86fa596dcefe21c02ad1219ab395e7ae
diff --git a/bibliopixel/util/udp.py b/bibliopixel/util/udp.py index <HASH>..<HASH> 100644 --- a/bibliopixel/util/udp.py +++ b/bibliopixel/util/udp.py @@ -7,31 +7,17 @@ import queue, socket from .. util.threads import runnable -class Sender: +class Sender(runnable.Runnable): def __init__(self, address): """ - :param str address: a pair (port, ip_address) to pass to socket.connect + :param str address: a pair (ip_address, port) to pass to socket.connect """ super().__init__() self.address = address - self.running = True - self._socket = None - self.reuse_socket = True - - @property - def socket(self): - if not (self.reuse_socket and self._socket): - self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - return self._socket - - def start(self): - pass - - def stop(self): - pass def send(self, msg): with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock: + sock.settimeout(self.timeout) sock.connect(self.address) sock.send(msg) # sock.sendto(msg, self.address) @@ -48,9 +34,7 @@ class QueuedSender(runnable.QueueHandler): def __init__(self, address, **kwds): """ - :param str address: a pair (port, ip_address) to pass to socket.connect - :param bool use_queue: if True, run the connection in a different thread - with a queue + :param str address: a pair (ip_address, port) to pass to socket.connect """ super().__init__(**kwds) self.sender = Sender(address) @@ -61,7 +45,9 @@ class QueuedSender(runnable.QueueHandler): def sender(address, use_queue=True, **kwds): """ - :param tuple address: + :param str address: a pair (ip_address, port) to pass to socket.connect + :param bool use_queue: if True, run the connection in a different thread + with a queue """ return QueuedSender(address, **kwds) if use_queue else Sender(address) @@ -80,11 +66,15 @@ class Receiver(runnable.Loop): def run(self): self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.socket.bind(self.address) + self.socket.settimeout(self.timeout) super().run() def loop(self): - data, addr = self.socket.recvfrom(self.bufsize) + try: + data, addr = self.socket.recvfrom(self.bufsize) + except socket.timeout: + return if data: self.receive(data)
Add a timeout to UDP sockets. * Clean up unused code
ManiacalLabs_BiblioPixel
train
8314df742e6c8a09eb6344c488a0358ce2284ff8
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -3,7 +3,7 @@ from setuptools import setup -__version__ = "0.0.4.2" +__version__ = "0.0.5-dev1" requirements = [pkg.strip() for pkg in open('requirements.txt').readlines()] @@ -34,6 +34,9 @@ setup( 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', + 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.4', ], )
Updating python supported versions on setup.py
marcwebbie_passpie
train
10b280f8d209855767061d4b2a3c156dd590a068
diff --git a/README.md b/README.md index <HASH>..<HASH> 100644 --- a/README.md +++ b/README.md @@ -31,3 +31,29 @@ s := "AnyKind of_string" ```bash go get -u github.com/iancoleman/strcase ``` + +## Custom Acronyms for ToCamel && ToLowerCamel + +Often times text can contain specific acronyms which you need to be handled a certain way. +Out of the box `strcase` treats the string "ID" as "Id" or "id" but there is no way to cater +for every case in the wild. + +To configure your custom acronym globally you can use the following before running any conversion + +```go +import ( + "github.com/iancoleman/strcase" +) + +func init() { + // results in "Api" using ToCamel("API") + // results in "api" using ToLowerCamel("API") + strcase.ConfigureAcronym("API", "api") + + // results in "PostgreSQL" using ToCamel("PostgreSQL") + // results in "postgreSQL" using ToLowerCamel("PostgreSQL") + strcase.ConfigureAcronym("PostgreSQL", "PostgreSQL") + +} + +``` diff --git a/acronyms.go b/acronyms.go index <HASH>..<HASH> 100644 --- a/acronyms.go +++ b/acronyms.go @@ -3,3 +3,8 @@ package strcase var uppercaseAcronym = map[string]string{ "ID": "id", } + +// ConfigureAcronym allows you to add additional words which will be considered acronyms +func ConfigureAcronym(key, val string) { + uppercaseAcronym[key] = val +} diff --git a/camel_test.go b/camel_test.go index <HASH>..<HASH> 100644 --- a/camel_test.go +++ b/camel_test.go @@ -85,6 +85,78 @@ func TestToLowerCamel(t *testing.T) { toLowerCamel(t) } +func TestCustomAcronymsToCamel(t *testing.T) { + tests := []struct { + name string + acronymKey string + acronymValue string + expected string + }{ + { + name: "API Custom Acronym", + acronymKey: "API", + acronymValue: "api", + expected: "Api", + }, + { + name: "ABCDACME Custom Acroynm", + acronymKey: "ABCDACME", + acronymValue: "AbcdAcme", + expected: "AbcdAcme", + }, + { + name: "PostgreSQL Custom Acronym", + acronymKey: "PostgreSQL", + acronymValue: "PostgreSQL", + expected: "PostgreSQL", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ConfigureAcronym(test.acronymKey, test.acronymValue) + if result := ToCamel(test.acronymKey); result != test.expected { + t.Errorf("expected custom acronym result %s, got %s", test.expected, result) + } + }) + } +} + +func TestCustomAcronymsToLowerCamel(t *testing.T) { + tests := []struct { + name string + acronymKey string + acronymValue string + expected string + }{ + { + name: "API Custom Acronym", + acronymKey: "API", + acronymValue: "api", + expected: "api", + }, + { + name: "ABCDACME Custom Acroynm", + acronymKey: "ABCDACME", + acronymValue: "AbcdAcme", + expected: "abcdAcme", + }, + { + name: "PostgreSQL Custom Acronym", + acronymKey: "PostgreSQL", + acronymValue: "PostgreSQL", + expected: "postgreSQL", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ConfigureAcronym(test.acronymKey, test.acronymValue) + if result := ToLowerCamel(test.acronymKey); result != test.expected { + t.Errorf("expected custom acronym result %s, got %s", test.expected, result) + } + }) + } +} + func BenchmarkToLowerCamel(b *testing.B) { benchmarkCamelTest(b, toLowerCamel) }
Provides ability to configure custom acronyms
iancoleman_strcase
train
3dad65ac3e53a4a88d464f4d7854e532d2c2101e
diff --git a/CHANGELOG.md b/CHANGELOG.md index <HASH>..<HASH> 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,11 @@ Change Log ========== +Version 1.1.0 *(2013-12-24)* +---------------------------- + + * DebugLn does not need a context in it's constructor. + Version 1.0.0 *(2013-12-21)* ---------------------------- diff --git a/ln-dagger-sample/src/main/java/com/f2prateek/ln/dagger/example/DaggerApp.java b/ln-dagger-sample/src/main/java/com/f2prateek/ln/dagger/example/DaggerApp.java index <HASH>..<HASH> 100644 --- a/ln-dagger-sample/src/main/java/com/f2prateek/ln/dagger/example/DaggerApp.java +++ b/ln-dagger-sample/src/main/java/com/f2prateek/ln/dagger/example/DaggerApp.java @@ -72,7 +72,7 @@ public class DaggerApp extends Application { } @Provides LnInterface provideDebugLn(Context context) { - return new DebugLn(context); + return DebugLn.from(context); } } diff --git a/ln-sample/src/main/java/com/f2prateek/ln/example/ExampleApp.java b/ln-sample/src/main/java/com/f2prateek/ln/example/ExampleApp.java index <HASH>..<HASH> 100644 --- a/ln-sample/src/main/java/com/f2prateek/ln/example/ExampleApp.java +++ b/ln-sample/src/main/java/com/f2prateek/ln/example/ExampleApp.java @@ -35,7 +35,7 @@ public class ExampleApp extends Application { if (BuildConfig.DEBUG) { // By default, this is done via injection - Ln.set(new DebugLn(this)); + Ln.set(DebugLn.from(this)); } else { // By default, Ln has an EmptyLn // So no need to configure anything for release diff --git a/ln/src/main/java/com/f2prateek/ln/DebugLn.java b/ln/src/main/java/com/f2prateek/ln/DebugLn.java index <HASH>..<HASH> 100644 --- a/ln/src/main/java/com/f2prateek/ln/DebugLn.java +++ b/ln/src/main/java/com/f2prateek/ln/DebugLn.java @@ -49,15 +49,22 @@ public class DebugLn implements LnInterface { protected int minimumLogLevel; protected String packageName; - public DebugLn(Context context) { - packageName = context.getPackageName(); + public DebugLn(String packageName, int minimumLogLevel) { + this.packageName = packageName; + this.minimumLogLevel = minimumLogLevel; + } + + public static DebugLn from(final Context context) { + String packageName = context.getPackageName(); + int minimumLogLevel = Log.INFO; try { final int flags = context.getPackageManager().getApplicationInfo(packageName, 0).flags; - minimumLogLevel = (flags & ApplicationInfo.FLAG_DEBUGGABLE) != 0 ? Log.VERBOSE : Log.INFO; + minimumLogLevel = + (flags & ApplicationInfo.FLAG_DEBUGGABLE) != 0 ? Log.VERBOSE : minimumLogLevel; } catch (PackageManager.NameNotFoundException e) { e.printStackTrace(); - minimumLogLevel = Log.INFO; } + return new DebugLn(packageName, minimumLogLevel); } @Override diff --git a/ln/src/test/java/com/f2prateek/ln/LnFacadeTest.java b/ln/src/test/java/com/f2prateek/ln/LnFacadeTest.java index <HASH>..<HASH> 100644 --- a/ln/src/test/java/com/f2prateek/ln/LnFacadeTest.java +++ b/ln/src/test/java/com/f2prateek/ln/LnFacadeTest.java @@ -44,8 +44,8 @@ public class LnFacadeTest { @Before public void setUp() { Context context = Robolectric.getShadowApplication().getApplicationContext(); - LnFacade lnFacade = new LnFacade(new DebugLn(context), new DebugLn(context)); - lnFacade.addLoggingEndpoint(new DebugLn(context)); + LnFacade lnFacade = new LnFacade(DebugLn.from(context), DebugLn.from(context)); + lnFacade.addLoggingEndpoint(DebugLn.from(context)); lnFacade.addLoggingEndpoint(new EmptyLn()); Ln.set(lnFacade); Ln.setLoggingLevel(Log.VERBOSE); diff --git a/ln/src/test/java/com/f2prateek/ln/LnTest.java b/ln/src/test/java/com/f2prateek/ln/LnTest.java index <HASH>..<HASH> 100644 --- a/ln/src/test/java/com/f2prateek/ln/LnTest.java +++ b/ln/src/test/java/com/f2prateek/ln/LnTest.java @@ -44,7 +44,7 @@ public class LnTest { private static final String HELLO_WORLD = "Hello, world!"; @Before public void setUp() { - Ln.set(new DebugLn(Robolectric.getShadowApplication().getApplicationContext())); + Ln.set(DebugLn.from(Robolectric.getShadowApplication().getApplicationContext())); Ln.setLoggingLevel(Log.VERBOSE); }
Use a static method to construct DebugLn
f2prateek_ln
train
e25e077a2030aa7c438a993085e6aefcd337ca19
diff --git a/cli/command/cli_test.go b/cli/command/cli_test.go index <HASH>..<HASH> 100644 --- a/cli/command/cli_test.go +++ b/cli/command/cli_test.go @@ -6,6 +6,7 @@ import ( "crypto/x509" "fmt" "io/ioutil" + "net/http" "os" "runtime" "testing" @@ -79,6 +80,24 @@ func TestNewAPIClientFromFlagsWithAPIVersionFromEnv(t *testing.T) { assert.Check(t, is.Equal(customVersion, apiclient.ClientVersion())) } +func TestNewAPIClientFromFlagsWithHttpProxyEnv(t *testing.T) { + defer env.Patch(t, "HTTP_PROXY", "http://proxy.acme.com:1234")() + defer env.Patch(t, "DOCKER_HOST", "tcp://docker.acme.com:2376")() + + opts := &flags.CommonOptions{} + configFile := &configfile.ConfigFile{} + apiclient, err := NewAPIClientFromFlags(opts, configFile) + assert.NilError(t, err) + transport, ok := apiclient.HTTPClient().Transport.(*http.Transport) + assert.Assert(t, ok) + assert.Assert(t, transport.Proxy != nil) + request, err := http.NewRequest(http.MethodGet, "tcp://docker.acme.com:2376", nil) + assert.NilError(t, err) + url, err := transport.Proxy(request) + assert.NilError(t, err) + assert.Check(t, is.Equal("http://proxy.acme.com:1234", url.String())) +} + type fakeClient struct { client.Client pingFunc func() (types.Ping, error) diff --git a/cli/context/docker/load.go b/cli/context/docker/load.go index <HASH>..<HASH> 100644 --- a/cli/context/docker/load.go +++ b/cli/context/docker/load.go @@ -104,8 +104,8 @@ func (c *Endpoint) ClientOpts() ([]client.Opt, error) { return nil, err } result = append(result, - client.WithHost(c.Host), withHTTPClient(tlsConfig), + client.WithHost(c.Host), ) } else {
restore support for env variables to configure proxy regression introduced by b<I>f<I> close #<I>
docker_cli
train
2d2dbbc32447d62b0dc2e1b3dd578ba1642b38dc
diff --git a/tests/YoutubeDlTest.php b/tests/YoutubeDlTest.php index <HASH>..<HASH> 100644 --- a/tests/YoutubeDlTest.php +++ b/tests/YoutubeDlTest.php @@ -367,7 +367,7 @@ class YoutubeDlTest extends TestCase $collection = $yt->download(Options::create()->cleanupMetadata(true)->downloadPath($this->tmpDir)->url($url)); foreach ($collection->getVideos() as $video) { - self::assertFileNotExists($video->getMetadataFile()->getPathname()); + self::assertFileDoesNotExist($video->getMetadataFile()->getPathname()); } }
Don't use deprecated method in test
norkunas_youtube-dl-php
train
35537e95eb215408f1548990feda1bbc47a7e9cc
diff --git a/i3pystatus/iinet.py b/i3pystatus/iinet.py index <HASH>..<HASH> 100644 --- a/i3pystatus/iinet.py +++ b/i3pystatus/iinet.py @@ -15,6 +15,7 @@ class IINet(IntervalModule, ColorRangeModule): * `{percentage_used}` — percentage of your quota that is used * `{percentage_available}` — percentage of your quota that is available + * `{used}` - GB of your quota used """ settings = ( @@ -75,6 +76,7 @@ class IINet(IntervalModule, ColorRangeModule): usage['percent_used'] = '{0:.2f}%'.format(percent_used) usage['percent_available'] = '{0:.2f}%'.format(percent_avaliable) + usage['used'] = '{0:.2f}'.format(used / 1000 ** 3) self.data = usage self.output = { @@ -88,7 +90,7 @@ class IINet(IntervalModule, ColorRangeModule): '_SERVICE=%(service_token)s' % self.__dict__).json() if self.valid_response(response): for traffic_type in response['response']['usage']['traffic_types']: - if traffic_type['name'] == 'anytime': + if traffic_type['name'] in ('anytime', 'usage'): return traffic_type else: raise Exception("Failed to retrieve usage information for: %s" % self.username)
fix vdsl2 usage, also add "used" formatter (#<I>) * fix vdsl2 usage, also add "used" formatter * PEP fix
enkore_i3pystatus
train
49291cbac048d9de4e53f0e02538a7f53d8296ff
diff --git a/anyconfig/mergeabledict.py b/anyconfig/mergeabledict.py index <HASH>..<HASH> 100644 --- a/anyconfig/mergeabledict.py +++ b/anyconfig/mergeabledict.py @@ -3,16 +3,15 @@ # Copyright (C) 2011 - 2013 Satoru SATOH <ssato redhat.com> # License: MIT # -"""Merge-able dict. +"""Dict-based object supports merge operations. .. versionchanged: 0.4.99 - Convert collections.namedtuple objects to dicts recursively. + Support to convert namedtuple objects from/to dicts recursively. .. versionadded: 0.3.1 Added naive and partial implementation of JSON Pointer support. -.. note:: - JSON Pointer: http://tools.ietf.org/html/rfc6901 +.. note:: JSON Pointer: http://tools.ietf.org/html/rfc6901 """ from __future__ import absolute_import @@ -207,8 +206,8 @@ def convert_to(obj, to_namedtuple=False, (bunch is distributed under MIT license same as this module.) .. note:: - If `to_namedtuple` is True and given object `obj` is MergeableDict - (! OrderedMergeableDict), then the order of fields of result namedtuple + If `to_namedtuple` is True and given object `obj` is MergeableDict and + not OrderedMergeableDict, then the order of fields of result namedtuple object may be random and not stable because the order of MergeableDict may not be kept and stable.
doc: minor corrections and cleanups in docstrings
ssato_python-anyconfig
train
4ccef918887584b3831361640609b765cb1306b9
diff --git a/mapillary_tools/processing.py b/mapillary_tools/processing.py index <HASH>..<HASH> 100644 --- a/mapillary_tools/processing.py +++ b/mapillary_tools/processing.py @@ -733,7 +733,10 @@ def user_properties_master(user_name, print("Error, no user key obtained for the user name " + user_name + ", check if the user name is spelled correctly and if the master key is correct") return None - user_properties['MAPSettingsUserKey'] = user_key + if user_key: + user_properties['MAPSettingsUserKey'] = user_key + else: + return None if organization_key: user_properties.update( diff --git a/mapillary_tools/uploader.py b/mapillary_tools/uploader.py index <HASH>..<HASH> 100644 --- a/mapillary_tools/uploader.py +++ b/mapillary_tools/uploader.py @@ -450,10 +450,10 @@ def get_user_key(user_name): req = urllib2.Request(USER_URL.format(user_name, CLIENT_ID)) resp = json.loads(urllib2.urlopen(req).read()) except: - return "" + return None if not resp or 'key' not in resp[0]: print("Error, user name {} does not exist...".format(user_name)) - return "" + return None return resp[0]['key'] @@ -485,9 +485,11 @@ def upload_done_file(import_path, params): if os.path.exists(DONE_filepath): os.remove(DONE_filepath) + def is_done_file(filename): return filename == "DONE" + def upload_file(filepath, url, permission, signature, key=None, aws_key=None): ''' Upload file at filepath.
bug: fail process if username does not exist
mapillary_mapillary_tools
train
9fff46d5780682f13935a36b9d654f8bfe7cf065
diff --git a/tensorpack/input_source/input_source.py b/tensorpack/input_source/input_source.py index <HASH>..<HASH> 100644 --- a/tensorpack/input_source/input_source.py +++ b/tensorpack/input_source/input_source.py @@ -23,7 +23,7 @@ from ..utils.develop import log_deprecated from ..callbacks.base import Callback from ..callbacks.graph import RunOp -__all__ = ['PlaceholderInput', 'FeedInput', 'DataParallelFeedInput', +__all__ = ['PlaceholderInput', 'FeedInput', 'FeedfreeInput', 'QueueInput', 'BatchQueueInput', 'DummyConstantInput', 'TensorInput', @@ -99,64 +99,6 @@ class FeedInput(InputSource): return [self._cb] -class DataParallelFeedInput(FeedInput): - """ - Input by feeding k datapoints to k copies of placeholders located on k towers. - """ - - class _DataParallelFeedCallback(Callback): - def __init__(self, ds, placeholders_per_tower): - self._ds = ds - self._itr = self._ds.get_data() - self._placehdrs_per_tower = placeholders_per_tower - self._nr_tower = len(self._placehdrs_per_tower) - - def _reset(self): - self._ds.reset_state() - self._itr = self._ds.get_data() - - def _before_run(self, _): - cnt = self._nr_tower - feed = {} - for t in range(cnt): - dp = next(self._itr) - f = dict(zip(self._placehdrs_per_tower[t], dp)) - feed.update(f) - return tf.train.SessionRunArgs(fetches=[], feed_dict=feed) - - def __init__(self, ds, tower_names): - super(DataParallelFeedInput, self).__init__(ds) - self._tower_names = tower_names - self._nr_tower = len(tower_names) - - def _setup(self, inputs): - self._placehdrs_per_tower = [] - for tname in self._tower_names: - # build a list of placeholders for each tower - self._placehdrs_per_tower.append( - [v.build_placeholder(prefix=tname + '/') for v in inputs]) - self._cb = self._DataParallelFeedCallback(self._iter_ds, self._placehdrs_per_tower) - - def _get_input_tensors(self): - # return placeholders for each tower - ctx = get_current_tower_context() - return self._placehdrs_per_tower[ctx.index] - - def next_feed(self, cnt=1): - """ - Args: - cnt: how many towers to feed to. - """ - cnt = int(cnt) - assert cnt < self._nr_tower - feed = {} - for t in range(cnt): - dp = next(self._cb._itr) - f = dict(zip(self._placehdrs_per_tower[t], dp)) - feed.update(f) - return feed - - class FeedfreeInput(InputSource): """ Abstract base for input without feed, e.g. by queue or other operations. """
remove DataParallelFeedInput, as dataparallel inference now relies on QueueInput
tensorpack_tensorpack
train
721201de1f2baaf6e21e5992035cc91edc810425
diff --git a/src/Message/SecureXMLAbstractRequest.php b/src/Message/SecureXMLAbstractRequest.php index <HASH>..<HASH> 100644 --- a/src/Message/SecureXMLAbstractRequest.php +++ b/src/Message/SecureXMLAbstractRequest.php @@ -31,9 +31,11 @@ abstract class SecureXMLAbstractRequest extends AbstractRequest */ public function getMessageId() { - return empty($this->getParameter('messageId')) - ? substr(md5(microtime()), 0, 30) - : $this->getParameter('messageId'); + if (empty($this->getParameter('messageId'))) { + $this->setMessageId(substr(md5(microtime()), 0, 30)); + } + + return $this->getParameter('messageId'); } public function sendData($data) diff --git a/tests/Message/SecureXMLAuthorizeRequestTest.php b/tests/Message/SecureXMLAuthorizeRequestTest.php index <HASH>..<HASH> 100644 --- a/tests/Message/SecureXMLAuthorizeRequestTest.php +++ b/tests/Message/SecureXMLAuthorizeRequestTest.php @@ -96,4 +96,16 @@ class SecureXMLAuthorizeRequestTest extends TestCase $this->assertSame('504', $response->getCode()); $this->assertSame('Invalid merchant ID', $response->getMessage()); } + + public function testSetMessageId() + { + $this->request->setMessageId('message_identifier_here'); + $this->assertSame('message_identifier_here', $this->request->getMessageId()); + } + + public function testAutogeneratedMessageId() + { + $this->assertNotNull($this->request->getMessageId()); + $this->assertSame(30, strlen($this->request->getMessageId())); + } }
Fix & test logic for autogenerated messageId
thephpleague_omnipay-securepay
train
7ff8bd1dcfc370c6610018bb533bb007f8a8e359
diff --git a/test-complete/src/test/java/com/marklogic/javaclient/TestQueryByExample.java b/test-complete/src/test/java/com/marklogic/javaclient/TestQueryByExample.java index <HASH>..<HASH> 100644 --- a/test-complete/src/test/java/com/marklogic/javaclient/TestQueryByExample.java +++ b/test-complete/src/test/java/com/marklogic/javaclient/TestQueryByExample.java @@ -35,6 +35,7 @@ public class TestQueryByExample extends BasicJavaClientREST { private static String dbName = "TestQueryByExampleDB"; private static String [] fNames = {"TestQueryByExampleDB-1"}; private static String restServerName = "REST-Java-Client-API-Server"; + private static int restPort=8011; @BeforeClass public static void setUp() throws Exception { @@ -43,6 +44,13 @@ public class TestQueryByExample extends BasicJavaClientREST { setupAppServicesConstraint(dbName); } +@After + public void testCleanUp() throws Exception + { + clearDB(restPort); + System.out.println("Running clear script"); + } + @SuppressWarnings("deprecation") @Test public void testQueryByExampleXML() throws IOException, TransformerException, XpathException @@ -146,10 +154,10 @@ public class TestQueryByExample extends BasicJavaClientREST { String resultDoc = queryMgr.search(qbyex, new StringHandle()).get(); - System.out.println(resultDoc); + System.out.println("testQueryByExampleJSON Result : "+resultDoc); - assertTrue("total result is not correct", resultDoc.contains("\"total\":1")); - assertTrue("doc returned is not correct", resultDoc.contains("{\"snippet-format\":\"snippet\",\"total\":1,\"start\":1,\"page-length\":10,\"results\":[{\"index\":1,\"uri\":\"/qbe/constraint1.json\",\"path\":\"fn:doc(\\\"/qbe/constraint1.json\\\")\",\"score\":28672,\"confidence\":0.6951694,\"fitness\":0.6951694,\"href\":\"/v1/documents?uri=%2Fqbe%2Fconstraint1.json\",\"mimetype\":\"application/json\",\"format\":\"json\",\"matches\":[{\"path\":\"fn:doc(\\\"/qbe/constraint1.json\\\")/*:json/*:root/*:id\",\"match-text\":[{\"highlight\":11}]}],\"metadata\":[{\"title\":\"Vannevar Bush\",\"metadata-type\":\"element\"},{\"id\":11,\"metadata-type\":\"element\"},{\"p\":\"Vannevar Bush wrote an article for The Atlantic Monthly\",\"metadata-type\":\"element\"},{\"popularity\":5,\"metadata-type\":\"element\"}]}]")); + + assertTrue("doc returned is not correct", resultDoc.contains("<search:result index=\"1\" uri=\"/qbe/constraint1.json\" path=\"fn:doc(&quot;/qbe/constraint1.json&quot;)\" score=\"28672\" confidence=\"0.6951694\" fitness=\"0.6951694\" href=\"/v1/documents?uri=%2Fqbe%2Fconstraint1.json\" mimetype=\"application/json\" format=\"json\">")); // release client client.release();
Merged with svn changes
marklogic_java-client-api
train
97e998dc9c2b7c20952bd0d4f36e6345fc4471f7
diff --git a/feed.js b/feed.js index <HASH>..<HASH> 100644 --- a/feed.js +++ b/feed.js @@ -68,6 +68,9 @@ Feed.decodeStream = function () { return pull.through() } +//the code to manage this is split between 3 different places +//which is not good. + Feed.encodeWithIndexes = function (msg) { var key = {id: msg.author, sequence: msg.sequence} var _key = {id: msg.author, timestamp: msg.timestamp} @@ -84,7 +87,7 @@ Feed.encodeWithIndexes = function (msg) { console.log(typeIndex) console.log(codec.encode(typeIndex)) //these are all encoded by a varmatch codec in codec.js - return [ + var batch = [ {key: key, value: msg, type: 'put'}, {key: _key, value: key, type: 'put'}, {key: msg.author, value: msg.sequence, type: 'put'}, @@ -93,6 +96,17 @@ Feed.encodeWithIndexes = function (msg) { {key: typeIndex, value: key, type: 'put'} ] + msg.references.forEach(function (ref) { + batch.push({key: { + type: type, + id: msg.author, + sequence: msg.sequence, + reference: ref + }, value: 0, type: 'put'}) + }) + + return batch + } module.exports = Feed @@ -116,8 +130,9 @@ function Feed (db, id, keys) { var first = {id: id, sequence: 0} var last = {id: id, sequence: 0x1fffffffffffff} - function append (type, buffer, cb) { + function append (type, buffer, references, cb) { var d = new Date() + console.log(references) var msg = signMessage({ previous: prev || zeros, author : bsum(keys.public), @@ -125,7 +140,8 @@ function Feed (db, id, keys) { timezone: d.getTimezoneOffset(), type : type, sequence: seq, - message : buffer + message : buffer, + references: references }, keys) var batch = Feed.encodeWithIndexes(msg) @@ -134,6 +150,7 @@ function Feed (db, id, keys) { //PROBABLY, UPDATE VIA A WRITE STREAM THAT USES BATCHES. prev = bsum(codec.encode(msg)) seq++ + console.log(batch) db.batch(batch, function (err) { cb(null, msg.sequence, prev) }) @@ -142,23 +159,37 @@ function Feed (db, id, keys) { return f = { id: id, feed: feed, - //expose a copy of the pub key, so no one can mess with it. + //expose a **COPY** of the pub key, so no one can mess with it. public: keys ? new Buffer(keys.public) : null, - append: function _append (type, message, cb) { + append: function _append (type, message, references, cb) { + if('function' === typeof references) + cb = references, references = [] type = toBuffer(type) message = toBuffer(message) if(!ready) { f.verify(function (err, s, h) { if(err) return cb(err) - append(type, message, cb) + append(type, message, references || [], cb) }) } else - append(type, message, cb) + append(type, message, references || [], cb) }, - follow: function (id, cb) { + follow: function (id, message, cb) { if(!u.isHash(id)) return cb(new Error('expected id hash')) - db.put(id, 0, cb) + if('function' === typeof message) + cb = message, message = toBuffer('') + db.get(id, function (err, value) { + if(err) + db.put(id, 0, follow) + else follow() + function follow () { + //to do: add + f.append('follow', message, [id], function (err) { + db.put(id, 0, cb) + }) + } + }) }, createReadStream: function (opts) { //defer until verified! @@ -283,7 +314,7 @@ function Feed (db, id, keys) { //if there where no records in the database... if(err) return cb(err) else if(seq === 1 && keys && keys.private) { - return append(INIT, keys.public, function (err, _seq, _hash) { + return append(INIT, keys.public, [], function (err, _seq, _hash) { cb(err, _seq, _hash) }) }
following is now actually posting a message
ssbc_ssb-db
train
f6299eb63caa654f7686ce42ac70176f7457cd1c
diff --git a/phoebe/frontend/bundle.py b/phoebe/frontend/bundle.py index <HASH>..<HASH> 100644 --- a/phoebe/frontend/bundle.py +++ b/phoebe/frontend/bundle.py @@ -2200,7 +2200,7 @@ class Bundle(ParameterSet): # handle case where compute is not provided if compute is None: - computes = self.get_compute().computes + computes = self.get_compute(**kwargs).computes if len(computes)==0: # NOTE: this doesn't take **kwargs since we want those to be # temporarily overriden as is the case when the compute options
run_compute can now find matching options via kwargs so instead of having to provide the compute label, you can provide any other tags so long as that results in a single unique match.
phoebe-project_phoebe2
train
96e57298b813f2dd5e97ca8f24bfd18eb321b8e1
diff --git a/cts/index/indexing_test.go b/cts/index/indexing_test.go index <HASH>..<HASH> 100644 --- a/cts/index/indexing_test.go +++ b/cts/index/indexing_test.go @@ -158,6 +158,20 @@ func TestIndexing(t *testing.T) { } { + res, err := index.SaveObject(map[string]interface{}{"objectID": "taggedObject", "attribute": "value", "_tags": []string{"algolia"}}) + require.NoError(t, err) + g.Collect(res) + } + + { + res, err := index.DeleteBy( + opt.Filters("algolia"), + ) + require.NoError(t, err) + g.Collect(res) + } + + { for _, objectID := range objectIDs[:7] { res, err := index.DeleteObject(objectID) require.NoError(t, err)
chore: add delete by tag CTS test
algolia_algoliasearch-client-go
train
c911013b2093cebf425bc06efa4b8f2adaddf211
diff --git a/state/user.go b/state/user.go index <HASH>..<HASH> 100644 --- a/state/user.go +++ b/state/user.go @@ -109,7 +109,7 @@ func (st *State) RemoveUser(tag names.UserTag) error { name := strings.ToLower(tag.Name()) u, err := st.User(tag) - if errors.IsNotFound(err) { + if err != nil { return errors.Trace(err) } @@ -183,7 +183,9 @@ func (st *State) User(tag names.UserTag) (*User, error) { if user.doc.Deleted { // This error is returned to the apiserver and from there to the api // client. So we don't annotate with information regarding deletion. - return nil, errors.NotFoundf("%q", user.Name()) + // TODO(redir): We'll return a deletedUserError in the future so we can + // return more approriate errors, e.g. username not available. + return nil, errors.UserNotFoundf("%q", user.Name()) } return user, nil } @@ -197,7 +199,14 @@ func (st *State) AllUsers(includeDeactivated bool) ([]*User, error) { var query bson.D // TODO(redir): Provide option to retrieve deleted users in future PR. - query = append(query, bson.DocElem{"deleted", false}) + // e.g. if !includeDelted. + // Ensure the query checks that user -- if it has the deleted attribute -- + // the value is not true. fwereade wanted to be sure we cannot miss users + // that previously existed without the deleted attr. Since this will only + // be in 2.0 that should never happen, but... belt and suspenders. + query = append(query, bson.D{ + {"deleted", bson.D{{"$ne", true}}}, + {"deleted", bson.D{{"$exists", false}}}}...) if !includeDeactivated { query = append(query, bson.DocElem{"deactivated", false}) } @@ -228,7 +237,7 @@ type userDoc struct { Name string `bson:"name"` DisplayName string `bson:"displayname"` Deactivated bool `bson:"deactivated"` - Deleted bool // Deleted users are marked deleted but not removed + Deleted bool `bson:"deleted,omitempty"` // Deleted users are marked deleted but not removed. SecretKey []byte `bson:"secretkey,omitempty"` PasswordHash string `bson:"passwordhash"` PasswordSalt string `bson:"passwordsalt"` @@ -375,6 +384,8 @@ func (u *User) SetPassword(password string) error { // will be cleared. func (u *User) SetPasswordHash(pwHash string, pwSalt string) error { if err := u.ensureNotDeleted(); err != nil { + // If we do get a late set of the password this is fine b/c we have an + // explicit check before login. return errors.Annotate(err, "cannot set password hash") } update := bson.D{{"$set", bson.D{ diff --git a/state/user_test.go b/state/user_test.go index <HASH>..<HASH> 100644 --- a/state/user_test.go +++ b/state/user_test.go @@ -140,6 +140,34 @@ func isDeletedUserError(err error) bool { return ok } +func (s *UserSuite) TestRemoveUserAllUsers(c *gc.C) { + user := s.Factory.MakeUser(c, &factory.UserParams{Name: "one"}) + _ = s.Factory.MakeUser(c, &factory.UserParams{Name: "two"}) + _ = s.Factory.MakeUser(c, &factory.UserParams{Name: "three"}) + + all, err := s.State.AllUsers(true) + c.Check(err, jc.ErrorIsNil) + c.Check(len(all), jc.DeepEquals, 4) + + var got []string + for _, u := range all { + got = append(got, u.Name()) + } + c.Check(got, jc.SameContents, []string{"test-admin", "one", "two", "three"}) + + s.State.RemoveUser(user.UserTag()) + + all, err = s.State.AllUsers(true) + got = nil + for _, u := range all { + got = append(got, u.Name()) + } + c.Check(err, jc.ErrorIsNil) + c.Check(len(all), jc.DeepEquals, 3) + c.Check(got, jc.SameContents, []string{"test-admin", "two", "three"}) + +} + func (s *UserSuite) TestRemoveUser(c *gc.C) { user := s.Factory.MakeUser(c, &factory.UserParams{Password: "should fail"}) @@ -195,7 +223,7 @@ func (s *UserSuite) TestRemoveUser(c *gc.C) { // Check again to verify the user cannot be retrieved. u, err = s.State.User(user.UserTag()) - c.Check(err, jc.Satisfies, errors.IsNotFound) + c.Check(err, jc.Satisfies, errors.IsUserNotFound) } func (s *UserSuite) TestDisable(c *gc.C) {
Updates per fwereade's in person walkthrough/review
juju_juju
train
5f0586550007b63b9e56b0d572c6637464cce29a
diff --git a/lib/acts_as_indexed/search_index.rb b/lib/acts_as_indexed/search_index.rb index <HASH>..<HASH> 100644 --- a/lib/acts_as_indexed/search_index.rb +++ b/lib/acts_as_indexed/search_index.rb @@ -104,16 +104,18 @@ module ActsAsIndexed #:nodoc: negative_quoted = run_quoted_queries(queries[:negative_quoted]) start_quoted = run_start_queries(queries[:start_quoted]) + results = {} + if queries[:start_quoted].any? - results = start_quoted - elsif queries[:positive_quoted].any? && queries[:positive].any? - p = positive.delete_if{ |r_id,w| positive_quoted.exclude?(r_id) } - pq = positive_quoted.delete_if{ |r_id,w| positive.exclude?(r_id) } - results = p.merge(pq) { |r_id,old_val,new_val| old_val + new_val} - elsif queries[:positive].any? - results = positive - else - results = positive_quoted + results = merge_query_results(results, start_quoted) + end + + if queries[:positive_quoted].any? + results = merge_query_results(results, positive_quoted) + end + + if queries[:positive].any? + results = merge_query_results(results, positive) end negative_results = (negative.keys + negative_quoted.keys) @@ -121,7 +123,23 @@ module ActsAsIndexed #:nodoc: #p results results end - + + def merge_query_results(results1, results2) + # Return the other if one is empty. + return results1 if results2.empty? + return results2 if results1.empty? + + # Delete any records from results 1 that are not in results 2. + r1 = results1.delete_if{ |r_id,w| results2.exclude?(r_id) } + + + # Delete any records from results 2 that are not in results 1. + r2 = results2.delete_if{ |r_id,w| results1.exclude?(r_id) } + + # Merge the results by adding their respective scores. + r1.merge(r2) { |r_id,old_val,new_val| old_val + new_val} + end + # Returns true if the index root exists on the FS. #-- # TODO: Make a private method called 'root_exists?' which checks for the root directory. diff --git a/test/acts_as_indexed_test.rb b/test/acts_as_indexed_test.rb index <HASH>..<HASH> 100644 --- a/test/acts_as_indexed_test.rb +++ b/test/acts_as_indexed_test.rb @@ -12,10 +12,10 @@ class ActsAsIndexedTest < ActiveSupport::TestCase def test_adds_to_index original_post_count = Post.count assert_equal [], Post.find_with_index('badger') - p = Post.new(:title => 'badger', :body => 'Thousands of them!') - assert p.save + post = Post.new(:title => 'badger', :body => 'Thousands of them!') + assert post.save assert_equal original_post_count+1, Post.count - assert_equal [p.id], Post.find_with_index('badger',{},{:ids_only => true}) + assert_equal [post.id], Post.find_with_index('badger',{},{:ids_only => true}) end def test_removes_from_index @@ -111,6 +111,7 @@ class ActsAsIndexedTest < ActiveSupport::TestCase end def test_start_quoted_queries + assert_equal [6,5], Post.find_with_index('^"crane" ship',{},{:ids_only => true}) assert_equal [5], Post.find_with_index('^"crane ship"',{},{:ids_only => true}) assert_equal [5], Post.find_with_index('^"crane shi"',{},{:ids_only => true}) assert_equal [5], Post.find_with_index('^"crane sh"',{},{:ids_only => true})
Quoted starts with syntax can now be combined with other syntax.
dougal_acts_as_indexed
train
92bc3452da679e583e63fc1bf33a3e015ba23623
diff --git a/smack-tcp/src/main/java/org/jivesoftware/smack/tcp/XMPPTCPConnection.java b/smack-tcp/src/main/java/org/jivesoftware/smack/tcp/XMPPTCPConnection.java index <HASH>..<HASH> 100644 --- a/smack-tcp/src/main/java/org/jivesoftware/smack/tcp/XMPPTCPConnection.java +++ b/smack-tcp/src/main/java/org/jivesoftware/smack/tcp/XMPPTCPConnection.java @@ -1292,6 +1292,13 @@ public class XMPPTCPConnection extends AbstractXMPPConnection { return shutdownTimestamp != null; } + private void throwNotConnectedExceptionIfDoneAndResumptionNotPossible() throws NotConnectedException { + if (done() && !isSmResumptionPossible()) { + // Don't throw a NotConnectedException is there is an resumable stream available + throw new NotConnectedException(); + } + } + /** * Sends the specified element to the server. * @@ -1299,16 +1306,20 @@ public class XMPPTCPConnection extends AbstractXMPPConnection { * @throws NotConnectedException */ protected void sendStreamElement(Element element) throws NotConnectedException { - if (done() && !isSmResumptionPossible()) { - // Don't throw a NotConnectedException is there is an resumable stream available - throw new NotConnectedException(); - } + throwNotConnectedExceptionIfDoneAndResumptionNotPossible(); - try { - queue.put(element); - } - catch (InterruptedException ie) { - throw new NotConnectedException(); + boolean enqueued = false; + while (!enqueued) { + try { + queue.put(element); + enqueued = true; + } + catch (InterruptedException e) { + throwNotConnectedExceptionIfDoneAndResumptionNotPossible(); + // If the method above did not throw, we have a spurious interrupt and we should try to enqueue the + // element again + LOGGER.log(Level.FINE, "Spurious interrupt", e); + } } } @@ -1329,23 +1340,21 @@ public class XMPPTCPConnection extends AbstractXMPPConnection { } /** - * Returns the next available element from the queue for writing. + * Maybe return the next available element from the queue for writing. If the queue is shut down <b>or</b> a + * spurious interrupt occurs, <code>null</code> is returned. So it is important to check the 'done' condition in + * that case. * - * @return the next element for writing. + * @return the next element for writing or null. */ private Element nextStreamElement() { - // TODO not sure if nextStreamElement and/or this done() condition still required. - // Couldn't this be done in writePackets too? - if (done()) { - return null; - } - Element packet = null; try { packet = queue.take(); } catch (InterruptedException e) { - // Do nothing + if (!queue.isShutdown()) { + LOGGER.log(Level.FINER, "Spurious interrupt", e); + } } return packet; } @@ -1391,9 +1400,7 @@ public class XMPPTCPConnection extends AbstractXMPPConnection { } } if (!instantShutdown) { - // Flush out the rest of the queue. If the queue is extremely large, it's - // possible we won't have time to entirely flush it before the socket is forced - // closed by the shutdown process. + // Flush out the rest of the queue. try { while (!queue.isEmpty()) { Element packet = queue.remove();
Handle spurious interrupts in XMPPTCPConnection Also remove unnecessary done() check in nextStreamElement() and remove wrong comment in !instantShutdown branch. There is no mechanism to forcible close the socket.
igniterealtime_Smack
train
7868e2bdda8c52b13f5b027e7c8bf15ded83321a
diff --git a/lib/generators/enum/enum_generator.rb b/lib/generators/enum/enum_generator.rb index <HASH>..<HASH> 100644 --- a/lib/generators/enum/enum_generator.rb +++ b/lib/generators/enum/enum_generator.rb @@ -1,4 +1,4 @@ -class EnumGenerator < Rails::Generators::NamedBase +class ClassyEnumGenerator < Rails::Generators::NamedBase desc "Generate a ClassyEnum definition in app/enums/" argument :name, :type => :string, :required => true, :banner => 'EnumName' @@ -6,10 +6,6 @@ class EnumGenerator < Rails::Generators::NamedBase source_root File.expand_path("../templates", __FILE__) - #def self.banner - # "rails generate enum NewEnum value1 value2 value3" - #end - def copy_files empty_directory 'app/enums' template "enum.erb", "app/enums/#{file_name}.rb"
Changed enum generator to be classy_enum for rails 3
beerlington_classy_enum
train
2aff59a9873e098564606df8eecdff58de62f3e7
diff --git a/src/Ufo/Routing/Route.php b/src/Ufo/Routing/Route.php index <HASH>..<HASH> 100644 --- a/src/Ufo/Routing/Route.php +++ b/src/Ufo/Routing/Route.php @@ -19,9 +19,21 @@ class Route public static function parse(string $path, RouteStorageInterface $routeStorage): ?Section { if (empty($path) || '/' == $path) { - return $routeStorage->get('/'); + $section = $routeStorage->get('/'); + } else { + $section = $routeStorage->find($path); } - return $routeStorage->find($path); + if (null === $section) { + return null; + } + + //strlen faster than simple comparison + $splen = strlen($section->path); + if (strlen($path) > $splen) { + $section->params = explode('/', trim(substr($path, $splen), '/')); + } + + return $section; } }
feat: parse additional data from path into section params
enikeishik_ufoframework
train
becbc52503e5dfcc83394b2c656e4386af52e707
diff --git a/inspire_dojson/hepnames/rules.py b/inspire_dojson/hepnames/rules.py index <HASH>..<HASH> 100644 --- a/inspire_dojson/hepnames/rules.py +++ b/inspire_dojson/hepnames/rules.py @@ -371,20 +371,13 @@ def _public_notes2marc(self, key, value): @hepnames.over('source', '^670..') [email protected]_each_value def source(self, key, value): - def _get_source(value): - return { - 'name': value.get('a'), - 'date_verified': normalize_date(value.get('d')), - } - - source = self.get('source', []) - - values = force_list(value) - for value in values: - source.append(_get_source(value)) - - return source + """Populate the ``source`` key.""" + return { + 'date_verified': normalize_date(value.get('d')), + 'name': value.get('a'), + } @hepnames2marc.over('670', '^source$')
hepnames: refactor source rule
inspirehep_inspire-dojson
train
4fc62320e73807a2a9890080546e3fee7ce2cb2a
diff --git a/apiserver/facades/client/machinemanager/machinemanager.go b/apiserver/facades/client/machinemanager/machinemanager.go index <HASH>..<HASH> 100644 --- a/apiserver/facades/client/machinemanager/machinemanager.go +++ b/apiserver/facades/client/machinemanager/machinemanager.go @@ -292,7 +292,7 @@ func (mm *MachineManagerAPI) destroyMachine(args params.Entities, force, keep bo if !force { return fail(err) } - logger.Warningf("error destroying machine %v but keep instance: %v", machineTag.Id(), err) + result.Error = common.ServerError(err) } } var info params.DestroyMachineInfo
Ignore keep-instance errors when forcing machine removal.
juju_juju
train
ce961b99064ecc78cb3f1088f3ea55214287fb72
diff --git a/api/server/migrate.go b/api/server/migrate.go index <HASH>..<HASH> 100644 --- a/api/server/migrate.go +++ b/api/server/migrate.go @@ -5,7 +5,8 @@ import ( "net/http" "github.com/libopenstorage/openstorage/api" - ost_errors "github.com/libopenstorage/openstorage/api/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) func (vd *volAPI) cloudMigrateStart(w http.ResponseWriter, r *http.Request) { @@ -58,9 +59,11 @@ func (vd *volAPI) cloudMigrateStart(w http.ResponseWriter, r *http.Request) { resp, err := migrations.Start(ctx, migrateRequest) if err != nil { - if _, ok := err.(*ost_errors.ErrExists); ok { - w.WriteHeader(http.StatusConflict) - return + if serverError, ok := status.FromError(err); ok { + if serverError.Code() == codes.AlreadyExists { + w.WriteHeader(http.StatusConflict) + return + } } vd.sendError(method, startReq.TargetId, w, err.Error(), http.StatusInternalServerError) return diff --git a/api/server/sdk/volume_migrate.go b/api/server/sdk/volume_migrate.go index <HASH>..<HASH> 100644 --- a/api/server/sdk/volume_migrate.go +++ b/api/server/sdk/volume_migrate.go @@ -20,6 +20,7 @@ import ( "context" "github.com/libopenstorage/openstorage/api" + ost_errors "github.com/libopenstorage/openstorage/api/errors" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -86,6 +87,10 @@ func (s *VolumeServer) volumeGroupMigrate( } resp, err := s.driver(ctx).CloudMigrateStart(request) if err != nil { + if _, ok := err.(*ost_errors.ErrExists); ok { + return nil, status.Errorf(codes.AlreadyExists, "Cannot start migration for %s : %v", req.GetClusterId(), err) + } + // if errExist return codes. return nil, status.Errorf(codes.Internal, "Cannot start migration for %s : %v", req.GetClusterId(), err) } return &api.SdkCloudMigrateStartResponse{ @@ -106,6 +111,9 @@ func (s *VolumeServer) allVolumesMigrate( } resp, err := s.driver(ctx).CloudMigrateStart(request) if err != nil { + if _, ok := err.(*ost_errors.ErrExists); ok { + return nil, status.Errorf(codes.AlreadyExists, "Cannot start migration for %s : %v", req.GetClusterId(), err) + } return nil, status.Errorf(codes.Internal, "Cannot start migration for %s : %v", req.GetClusterId(), err) } return &api.SdkCloudMigrateStartResponse{ @@ -127,6 +135,9 @@ func (s *VolumeServer) volumeMigrate( } resp, err := s.driver(ctx).CloudMigrateStart(request) if err != nil { + if _, ok := err.(*ost_errors.ErrExists); ok { + return nil, status.Errorf(codes.AlreadyExists, "Cannot start migration for %s : %v", req.GetClusterId(), err) + } return nil, status.Errorf(codes.Internal, "Cannot start migration for %s : %v", req.GetClusterId(), err) } return &api.SdkCloudMigrateStartResponse{
Fix Idempotency for migration starts (#<I>)
libopenstorage_openstorage
train
745dbee37f55ec5a92bedfc104fa2f974f06ffd4
diff --git a/src/main/java/net/dv8tion/jda/core/requests/WebSocketClient.java b/src/main/java/net/dv8tion/jda/core/requests/WebSocketClient.java index <HASH>..<HASH> 100644 --- a/src/main/java/net/dv8tion/jda/core/requests/WebSocketClient.java +++ b/src/main/java/net/dv8tion/jda/core/requests/WebSocketClient.java @@ -218,6 +218,13 @@ public class WebSocketClient extends WebSocketAdapter implements WebSocketListen { try { + //Make sure that we don't send any packets while attempting to connect. + if (!connected || initiating) + { + Thread.sleep(1000); + continue; + } + attemptedToSend = false; needRatelimit = false;
Fixed bug where packets sent by the WS ratelimit system were sent before IDENTIFY due to racecondition.
DV8FromTheWorld_JDA
train
531c494d00ac6e2d320c74427bb588373afb8ae5
diff --git a/lepo/router.py b/lepo/router.py index <HASH>..<HASH> 100644 --- a/lepo/router.py +++ b/lepo/router.py @@ -19,7 +19,7 @@ class Router: @classmethod def from_file(cls, filename): with open(filename) as infp: - if filename.endswith('.yaml'): + if filename.endswith('.yaml') or filename.endswith('.yml'): import yaml data = yaml.safe_load(infp) else:
Also read .yml as YAML Fixes #4
akx_lepo
train
9f68db52c945d84786374d4e0af8cec67956989e
diff --git a/src/Charcoal/Admin/Property/HierarchicalObjectProperty.php b/src/Charcoal/Admin/Property/HierarchicalObjectProperty.php index <HASH>..<HASH> 100644 --- a/src/Charcoal/Admin/Property/HierarchicalObjectProperty.php +++ b/src/Charcoal/Admin/Property/HierarchicalObjectProperty.php @@ -71,12 +71,12 @@ class HierarchicalObjectProperty extends ObjectProperty $choice['group'] = null; } - if (is_callable([ $obj, 'name' ])) { - $choice['title'] = $obj->name(); - } elseif (is_callable([ $obj, 'label' ])) { - $choice['title'] = $obj->label(); - } elseif (is_callable([ $obj, 'title' ])) { - $choice['title'] = $obj->title(); + if (isset($obj['name'])) { + $choice['title'] = $obj['name']; + } elseif (isset($obj['label'])) { + $choice['title'] = $obj['label']; + } elseif (isset($obj['title'])) { + $choice['title'] = $obj['title']; } return $choice;
chore(arrayAccess): use some arrayAccess instead of methods for properties
locomotivemtl_charcoal-admin
train
c6b44f307883d54c27d4bd135e072922481ccaeb
diff --git a/src/FluxBB/Controllers/Auth.php b/src/FluxBB/Controllers/Auth.php index <HASH>..<HASH> 100644 --- a/src/FluxBB/Controllers/Auth.php +++ b/src/FluxBB/Controllers/Auth.php @@ -27,15 +27,16 @@ namespace FluxBB\Controllers; use FluxBB\Models\Config, FluxBB\Models\Group, - FluxBB\Models\User; + FluxBB\Models\User, + View; class Auth extends Base { public function __construct() { - $this->filter('before', 'fluxbb::only_guests')->only(array('login', 'remember')); - $this->filter('before', 'fluxbb::only_members')->only('logout'); + //$this->filter('before', 'fluxbb::only_guests')->only(array('login', 'remember')); + //$this->filter('before', 'fluxbb::only_members')->only('logout'); } public function get_logout()
Fix the Auth controller for Laravel 4.
fluxbb_core
train
28f48987bc9c25a96d2d48dfa9f30837ef3f4184
diff --git a/pyqg/qg_model.py b/pyqg/qg_model.py index <HASH>..<HASH> 100644 --- a/pyqg/qg_model.py +++ b/pyqg/qg_model.py @@ -265,7 +265,7 @@ class QGModel(model.Model): self.add_diagnostic('APEgenspec', description='spectrum of APE generation', function= (lambda self: self.U * self.rd**-2 * self.del1 * self.del2 * - np.real(1j*self.k*(self.del1*self.ph[0] + self.del2*self.ph[0]) * + np.real(1j*self.k*(self.del1*self.ph[0] + self.del2*self.ph[1]) * np.conj(self.ph[0] - self.ph[1])) ) )
Corrected index ape gen (#<I>) * test commit * test yeti * corrected index in QGmodel diagnostics APEgen * removed test files from benchmark
pyqg_pyqg
train
ac61117b59e8aaf8a644501f432cb71e8e5df9a2
diff --git a/{{cookiecutter.project_slug}}/config/settings/production.py b/{{cookiecutter.project_slug}}/config/settings/production.py index <HASH>..<HASH> 100644 --- a/{{cookiecutter.project_slug}}/config/settings/production.py +++ b/{{cookiecutter.project_slug}}/config/settings/production.py @@ -73,8 +73,6 @@ AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY') # https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME') # https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings -AWS_AUTO_CREATE_BUCKET = True -# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings AWS_QUERYSTRING_AUTH = False # DO NOT change these unless you know what you're doing. _AWS_EXPIRY = 60 * 60 * 24 * 7
Prevent AWS S3 bucket from being created automatically Rationale: most of the time you want to create a bucket yourself in order to assign/tweak polices etc.
pydanny_cookiecutter-django
train
a7c86e7894550925f13c72e3692de053e176bb37
diff --git a/source/rafcon/mvc/controllers/io_data_port_list.py b/source/rafcon/mvc/controllers/io_data_port_list.py index <HASH>..<HASH> 100644 --- a/source/rafcon/mvc/controllers/io_data_port_list.py +++ b/source/rafcon/mvc/controllers/io_data_port_list.py @@ -42,9 +42,9 @@ class DataPortListController(ExtendedController): view['name_col'].add_attribute(view['name_text'], 'text', 0) if not isinstance(self.model.state, LibraryState): view['name_text'].set_property("editable", True) + view['data_type_text'].set_property("editable", True) view['data_type_col'].add_attribute(view['data_type_text'], 'text', 1) - # if not isinstance(self.model.state, LibraryState): - view['data_type_text'].set_property("editable", True) + if view['default_value_col'] and view['default_value_text']: view['default_value_col'].add_attribute(view['default_value_text'], 'text', 2) view['default_value_text'].set_property("editable", True) diff --git a/source/rafcon/statemachine/states/library_state.py b/source/rafcon/statemachine/states/library_state.py index <HASH>..<HASH> 100644 --- a/source/rafcon/statemachine/states/library_state.py +++ b/source/rafcon/statemachine/states/library_state.py @@ -12,7 +12,7 @@ from gtkmvc import Observable from rafcon.statemachine.enums import StateExecutionState from rafcon.statemachine.script import Script, ScriptType from rafcon.statemachine.states.state import State -import rafcon.statemachine.singleton +from rafcon.statemachine.singleton import library_manager from rafcon.utils import log logger = log.get_logger(__name__) @@ -52,7 +52,7 @@ class LibraryState(State): self._state_copy = None lib_os_path, new_library_path, new_library_name = \ - rafcon.statemachine.singleton.library_manager.get_os_path_to_library(library_path, library_name) + library_manager.get_os_path_to_library(library_path, library_name) if library_path != new_library_path or library_name != new_library_name: self.library_name = new_library_name @@ -61,7 +61,7 @@ class LibraryState(State): logger.info("Old library name '{0}' was located at {1}".format(library_name, library_path)) logger.info("New library name '{0}' is located at {1}".format(new_library_name, new_library_path)) - state_machine, lib_version, creation_time = rafcon.statemachine.singleton.library_manager.storage.load_statemachine_from_yaml(lib_os_path) + state_machine, lib_version, creation_time = library_manager.storage.load_statemachine_from_yaml(lib_os_path) self.state_copy = state_machine.root_state self.state_copy.parent = self if not str(lib_version) == version and not str(lib_version) == "None": @@ -75,7 +75,8 @@ class LibraryState(State): for i_key, i_data_port in input_data_ports.iteritems(): if i_key in self.input_data_ports: self.input_data_ports[i_key].default_value = i_data_port.default_value - self.input_data_ports[i_key].data_type = i_data_port.data_type + # Use data type of library, user should not be able to overwrite the data type of a port + # self.input_data_ports[i_key].data_type = i_data_port.data_type self.output_data_ports = self.state_copy.output_data_ports # copy output_port default values
Use data_type of library for ports - forbid user to change the data type of library state ports - when loading a library within a state-machine, also force loading of port types of library => when changing a library also its usages are changed
DLR-RM_RAFCON
train
7865226bc5e5c235d902fd846f929ade3f4d4496
diff --git a/quarkc/lib/datawire-quark-core.rb b/quarkc/lib/datawire-quark-core.rb index <HASH>..<HASH> 100644 --- a/quarkc/lib/datawire-quark-core.rb +++ b/quarkc/lib/datawire-quark-core.rb @@ -630,6 +630,7 @@ module DatawireQuarkCore events.event { handler.onWSInit(sock) } client.on_client(:open) do |wsevt| # puts "open" + sock.opened = true events.event { handler.onWSConnected(sock) } end client.on_client(:message) do |wsevt| @@ -644,7 +645,9 @@ module DatawireQuarkCore end client.on_client(:close) do |wsevt| # puts "close" - events.event { handler.onWSClosed(sock) } + if sock.opened + events.event { handler.onWSClosed(sock) } + end events.event(final:src) { handler.onWSFinal(sock) } end client.on_client(:error) do |wsevt| @@ -676,8 +679,10 @@ module DatawireQuarkCore end class WebsocketAdapter + attr_accessor :opened def initialize(client) @client = client + @opened = false end def send (message)
Ruby: emit onWSClosed only after open
datawire_quark
train
0d1261e87299b8c01db68e4a5d72684f45269e10
diff --git a/cmd2/cmd2.py b/cmd2/cmd2.py index <HASH>..<HASH> 100644 --- a/cmd2/cmd2.py +++ b/cmd2/cmd2.py @@ -441,6 +441,7 @@ class Cmd(cmd.Cmd): self.broken_pipe_warning = '' # Check if history should persist + self.persistent_history_file = '' if persistent_history_file and rl_type != RlType.NONE: persistent_history_file = os.path.expanduser(persistent_history_file) read_err = False
Fixed bug where self.persistent_history_file attribute didn't exist
python-cmd2_cmd2
train
8a8554d736afcb81a8ba57725c73dea985a0f443
diff --git a/lib/epp-client/contact/check_response.rb b/lib/epp-client/contact/check_response.rb index <HASH>..<HASH> 100644 --- a/lib/epp-client/contact/check_response.rb +++ b/lib/epp-client/contact/check_response.rb @@ -12,7 +12,7 @@ module EPP protected def availability - @availability ||= @response.data.find('//contact:id', namespaces).inject({}) do |hash, node| + @availability ||= nodes_for_xpath('//contact:id').inject({}) do |hash, node| hash[node.content.strip] = node['avail'] == '1' hash end diff --git a/lib/epp-client/contact/create_response.rb b/lib/epp-client/contact/create_response.rb index <HASH>..<HASH> 100644 --- a/lib/epp-client/contact/create_response.rb +++ b/lib/epp-client/contact/create_response.rb @@ -4,10 +4,10 @@ module EPP module Contact class CreateResponse < Response def id - @id ||= @response.data.find('//contact:id', namespaces).first.content.strip + @id ||= value_for_xpath('//contact:id') end def creation_date - @crdate ||= Time.parse(@response.data.find('//contact:crDate', namespaces).first.content.strip) + @crdate ||= Time.parse(value_for_xpath('//contact:crDate')) end end end diff --git a/lib/epp-client/contact/transfer_response.rb b/lib/epp-client/contact/transfer_response.rb index <HASH>..<HASH> 100644 --- a/lib/epp-client/contact/transfer_response.rb +++ b/lib/epp-client/contact/transfer_response.rb @@ -4,22 +4,22 @@ module EPP module Contact class TransferResponse < Response def id - @id ||= @response.data.find('//contact:id').first.content.strip + @id ||= value_for_xpath('//contact:id') end def status - @trStatus ||= @response.data.find('//contact:trStatus').first.content.strip + @trStatus ||= value_for_xpath('//contact:trStatus') end def requested_id - @reID ||= @response.data.find('//contact:reID').first.content.strip + @reID ||= value_for_xpath('//contact:reID') end def requested_date - @reDate ||= Time.parse(@response.data.find('//contact:reDate').first.content.strip) + @reDate ||= Time.parse(value_for_xpath('//contact:reDate')) end def action_id - @acID ||= @response.data.find('//contact:acID').first.content.strip + @acID ||= value_for_xpath('//contact:acID') end def action_date - @acDate ||= Time.parse(@response.data.find('//contact:acDate').first.content.strip) + @acDate ||= Time.parse(value_for_xpath('//contact:acDate')) end end end
Use EPP::ResponseHelper in EPP::Contact response classes
m247_epp-client
train
6a645592fc40fcddaec41a4628be7cf019a8feb4
diff --git a/test/e2e/framework/pod/create.go b/test/e2e/framework/pod/create.go index <HASH>..<HASH> 100644 --- a/test/e2e/framework/pod/create.go +++ b/test/e2e/framework/pod/create.go @@ -154,15 +154,7 @@ func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.Persisten RestartPolicy: v1.RestartPolicyOnFailure, }, } - var volumeMounts = make([]v1.VolumeMount, len(pvclaims)) - var volumes = make([]v1.Volume, len(pvclaims)) - for index, pvclaim := range pvclaims { - volumename := fmt.Sprintf("volume%v", index+1) - volumeMounts[index] = v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename} - volumes[index] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}} - } - podSpec.Spec.Containers[0].VolumeMounts = volumeMounts - podSpec.Spec.Volumes = volumes + setVolumes(&podSpec.Spec, pvclaims, nil /*inline volume sources*/, false /*PVCs readonly*/) if nodeSelector != nil { podSpec.Spec.NodeSelector = nodeSelector } @@ -223,33 +215,42 @@ func MakePodSpec(podConfig *Config) *v1.PodSpec { podSpec.SecurityContext.FSGroupChangePolicy = podConfig.PodFSGroupChangePolicy } + setVolumes(podSpec, podConfig.PVCs, podConfig.InlineVolumeSources, podConfig.PVCsReadOnly) + SetNodeSelection(podSpec, podConfig.NodeSelection) + return podSpec +} + +func setVolumes(podSpec *v1.PodSpec, pvcs []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, pvcsReadOnly bool) { var volumeMounts = make([]v1.VolumeMount, 0) var volumeDevices = make([]v1.VolumeDevice, 0) - var volumes = make([]v1.Volume, len(podConfig.PVCs)+len(podConfig.InlineVolumeSources)) + var volumes = make([]v1.Volume, len(pvcs)+len(inlineVolumeSources)) volumeIndex := 0 - for _, pvclaim := range podConfig.PVCs { + for _, pvclaim := range pvcs { volumename := fmt.Sprintf("volume%v", volumeIndex+1) if pvclaim.Spec.VolumeMode != nil && *pvclaim.Spec.VolumeMode == v1.PersistentVolumeBlock { volumeDevices = append(volumeDevices, v1.VolumeDevice{Name: volumename, DevicePath: "/mnt/" + volumename}) } else { volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename}) } - - volumes[volumeIndex] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: podConfig.PVCsReadOnly}}} + volumes[volumeIndex] = v1.Volume{ + Name: volumename, + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvclaim.Name, + ReadOnly: pvcsReadOnly, + }, + }, + } volumeIndex++ } - for _, src := range podConfig.InlineVolumeSources { + for _, src := range inlineVolumeSources { volumename := fmt.Sprintf("volume%v", volumeIndex+1) // In-line volumes can be only filesystem, not block. volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename}) volumes[volumeIndex] = v1.Volume{Name: volumename, VolumeSource: *src} volumeIndex++ } - podSpec.Containers[0].VolumeMounts = volumeMounts podSpec.Containers[0].VolumeDevices = volumeDevices podSpec.Volumes = volumes - - SetNodeSelection(podSpec, podConfig.NodeSelection) - return podSpec }
e2e: MakePod() should support block and inline volumes
kubernetes_kubernetes
train
1da8b552d1a7689171a8b73ec58945d8756884e4
diff --git a/nidmresults/owl/owl_reader.py b/nidmresults/owl/owl_reader.py index <HASH>..<HASH> 100644 --- a/nidmresults/owl/owl_reader.py +++ b/nidmresults/owl/owl_reader.py @@ -211,7 +211,7 @@ class OwlReader(): for class_name in classes: if not self.is_class(class_name): - raise Exception('Class '+str(class_name)+' does not exist.') + warnings.warn('Class '+str(class_name)+' does not exist.') if not isinstance(class_name, term.BNode): prov_type = self.get_prov_class(class_name)
raise warning if class is missing (rather than error)
incf-nidash_nidmresults
train
4f911e1656ba8c58aedb0269d857f5570ff3417e
diff --git a/theanets/feedforward.py b/theanets/feedforward.py index <HASH>..<HASH> 100644 --- a/theanets/feedforward.py +++ b/theanets/feedforward.py @@ -202,8 +202,8 @@ class Network(object): dropout=self.kwargs.get('hidden_dropouts', 0), batch_size=self.kwargs.get('batch_size', 64), factors=self.kwargs.get('mrnn_factors', 0), - radius=self.kwargs.get('recurrent_radius', 0), - sparse=self.kwargs.get('recurrent_sparsity', 0), + radius=self.kwargs.get('rnn_radius', 0), + sparse=self.kwargs.get('rnn_sparsity', 0), ) # if size is a tuple, assume that it's the name of a class for the diff --git a/theanets/flags.py b/theanets/flags.py index <HASH>..<HASH> 100644 --- a/theanets/flags.py +++ b/theanets/flags.py @@ -118,12 +118,14 @@ g.add_argument('--preconditioner', action='store_true', g = climate.add_arg_group('Recurrent Nets') g.add_argument('--recurrent-error-start', type=int, default=3, metavar='T', - help='compute network error starting at time T') + help='compute recurrent network error starting at time T') g.add_argument('--recurrent-form', default='rnn', metavar='[rnn|mrnn|lstm]', help='create recurrent layers with this form') -g.add_argument('--recurrent-radius', type=float, default=1.1, metavar='R', - help='create recurrent weights with spectral radius R') -g.add_argument('--recurrent-sparsity', type=float, default=0, metavar='R', - help='create recurrent weights with fraction R 0s') +g.add_argument('--recurrent-layers', type=int, nargs='+', default=(), metavar='I', + help='make layer #I a recurrent layer') +g.add_argument('--rnn-radius', type=float, default=1.1, metavar='R', + help='create rnn layer weights with spectral radius R') +g.add_argument('--rnn-sparsity', type=float, default=0, metavar='R', + help='create rnn layer weights with R fraction of zeros') g.add_argument('--mrnn-factors', type=int, default=0, metavar='N', help='use N factors for hidden connection tensor')
Rename a couple of rnn-specific flags.
lmjohns3_theanets
train
db29978fe089d7dd4c90cd92e9846ee1bbb19a76
diff --git a/lib/browser/api/menu.js b/lib/browser/api/menu.js index <HASH>..<HASH> 100644 --- a/lib/browser/api/menu.js +++ b/lib/browser/api/menu.js @@ -110,6 +110,12 @@ Menu.prototype.insert = function (pos, item) { throw new TypeError('Invalid item') } + if (pos < 0) { + throw new RangeError(`Position ${pos} cannot be less than 0`) + } else if (pos > this.getItemCount()) { + throw new RangeError(`Position ${pos} cannot be greater than the total MenuItem count`) + } + // insert item depending on its type insertItemByType.call(this, item, pos) diff --git a/spec/api-menu-spec.js b/spec/api-menu-spec.js index <HASH>..<HASH> 100644 --- a/spec/api-menu-spec.js +++ b/spec/api-menu-spec.js @@ -688,6 +688,24 @@ describe('Menu module', () => { }) describe('Menu.insert', () => { + it('should throw when attempting to insert at out-of-range indices', () => { + const menu = Menu.buildFromTemplate([ + { label: '1' }, + { label: '2' }, + { label: '3' } + ]) + + const item = new MenuItem({ label: 'badInsert' }) + + expect(() => { + menu.insert(9999, item) + }).to.throw(/Position 9999 cannot be greater than the total MenuItem count/) + + expect(() => { + menu.insert(-9999, item) + }).to.throw(/Position -9999 cannot be less than 0/) + }) + it('should store item in @items by its index', () => { const menu = Menu.buildFromTemplate([ { label: '1' },
fix: throw error when inserting menu items out-of-range (#<I>) * fix: throw error when inserting menu items out-of-range * also check pos < 0
electron_electron
train
96e5c84cc83240a5f4086a26ff37e715d7fa63ba
diff --git a/examples/features/xds/README.md b/examples/features/xds/README.md index <HASH>..<HASH> 100644 --- a/examples/features/xds/README.md +++ b/examples/features/xds/README.md @@ -29,9 +29,9 @@ The client application needs to import the xDS package to install the resolver a _ "google.golang.org/grpc/xds/experimental" // To install the xds resolvers and balancers. ``` -Then, use `xds-experimental` target scheme for the ClientConn. +Then, use `xds` target scheme for the ClientConn. ``` $ export GRPC_XDS_BOOTSTRAP=/path/to/bootstrap.json -$ go run client/main.go "xDS world" xds-experimental:///target_service +$ go run client/main.go "xDS world" xds:///target_service ``` diff --git a/examples/features/xds/client/main.go b/examples/features/xds/client/main.go index <HASH>..<HASH> 100644 --- a/examples/features/xds/client/main.go +++ b/examples/features/xds/client/main.go @@ -49,7 +49,7 @@ Usage: client [name [target]] name The name you wish to be greeted by. Defaults to %q target - The URI of the server, e.g. "xds-experimental:///helloworld-service". Defaults to %q + The URI of the server, e.g. "xds:///helloworld-service". Defaults to %q `, defaultName, defaultTarget) flag.PrintDefaults() diff --git a/test/kokoro/xds.sh b/test/kokoro/xds.sh index <HASH>..<HASH> 100755 --- a/test/kokoro/xds.sh +++ b/test/kokoro/xds.sh @@ -28,7 +28,7 @@ GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info \ --gcp_suffix=$(date '+%s') \ --verbose \ --client_cmd="grpc-go/interop/xds/client/client \ - --server=xds-experimental:///{server_uri} \ + --server=xds:///{server_uri} \ --stats_port={stats_port} \ --qps={qps} \ {fail_on_failed_rpc}" diff --git a/xds/internal/resolver/xds_resolver.go b/xds/internal/resolver/xds_resolver.go index <HASH>..<HASH> 100644 --- a/xds/internal/resolver/xds_resolver.go +++ b/xds/internal/resolver/xds_resolver.go @@ -33,9 +33,7 @@ import ( "google.golang.org/grpc/xds/internal/client/bootstrap" ) -// xDS balancer name is xds_experimental while resolver scheme is -// xds-experimental since "_" is not a valid character in the URL. -const xdsScheme = "xds-experimental" +const xdsScheme = "xds" // For overriding in unittests. var ( @@ -216,3 +214,18 @@ func (r *xdsResolver) Close() { r.cancelCtx() r.logger.Infof("Shutdown") } + +// Keep scheme with "-experimental" temporarily. Remove after one release. +const schemeExperimental = "xds-experimental" + +type xdsResolverExperimentalBuilder struct { + xdsResolverBuilder +} + +func (*xdsResolverExperimentalBuilder) Scheme() string { + return schemeExperimental +} + +func init() { + resolver.Register(&xdsResolverExperimentalBuilder{}) +} diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index <HASH>..<HASH> 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -54,6 +54,15 @@ var ( target = resolver.Target{Endpoint: targetStr} ) +func TestRegister(t *testing.T) { + for _, s := range []string{"xds", "xds-experimental"} { + b := resolver.Get(s) + if b == nil { + t.Errorf("scheme %v is not registered", s) + } + } +} + // testClientConn is a fake implemetation of resolver.ClientConn. All is does // is to store the state received from the resolver locally and signal that // event through a channel.
xds: rename resolver scheme to xds (#<I>) <URL>
grpc_grpc-go
train
ce62996b59eee70755a86cf9cfb8e8d0a6dabe36
diff --git a/src/mako/application/cli/commands/migrations/RollbackTrait.php b/src/mako/application/cli/commands/migrations/RollbackTrait.php index <HASH>..<HASH> 100644 --- a/src/mako/application/cli/commands/migrations/RollbackTrait.php +++ b/src/mako/application/cli/commands/migrations/RollbackTrait.php @@ -59,6 +59,6 @@ trait RollbackTrait $this->write('Rolled back the following migrations:' . PHP_EOL); - $this->outputMigrationList($migrations); + $this->outputMigrationList($migrations->getItems()); } }
Fix migration rollback action When trying to rollback migrations everything works fine except in the last step when the command tries to output reverted migrations list. The 'outputMigrationList' method expects an array parameter but an object is parsed instead.
mako-framework_framework
train
c8d35552cea362a8fe393acef34c7cab6fee83c7
diff --git a/classes/PodsAPI.php b/classes/PodsAPI.php index <HASH>..<HASH> 100644 --- a/classes/PodsAPI.php +++ b/classes/PodsAPI.php @@ -2544,6 +2544,17 @@ class PodsAPI return $out; } + /** + * Return an array of dummy data for select2 autocomplete input + */ + public function select2_ajax() { + return array( + 'option_1' => 'Option 1', + 'option_2' => 'Option 2', + 'option_3' => 'Option 3', + 'option_4' => 'Option 4', + ); + } private function do_hook () { $args = func_get_args(); if (empty($args)) diff --git a/classes/PodsAdmin.php b/classes/PodsAdmin.php index <HASH>..<HASH> 100644 --- a/classes/PodsAdmin.php +++ b/classes/PodsAdmin.php @@ -65,6 +65,10 @@ class PodsAdmin { wp_register_script('jquery-chosen', PODS_URL . 'ui/js/chosen.jquery.min.js', array('jquery'), '0.9.8'); if (!wp_style_is('jquery-chosen', 'registered')) wp_register_style('jquery-chosen', PODS_URL . 'ui/css/chosen.css', array(), '0.9.8'); + if (!wp_style_is('jquery-select2', 'registered')) + wp_register_style('jquery-select2', PODS_URL . 'ui/css/select2.css', array(), '2.1'); + if (!wp_script_is('jquery-select2', 'registered')) + wp_register_script('jquery-select2', PODS_URL . 'ui/js/select2.min.js', array('jquery'), '2.1'); if (isset($_GET['page'])) { $page = $_GET['page']; if ('pods' == $page || (false !== strpos($page, 'pods-') && 0 === strpos($page, 'pods-'))) { @@ -119,6 +123,10 @@ class PodsAdmin { wp_enqueue_script('thickbox'); wp_enqueue_style('thickbox'); + // Select2 + wp_enqueue_script('jquery-select2'); + wp_enqueue_style('jquery-select2'); + // Plupload scripts wp_enqueue_script('plupload'); wp_enqueue_script('plupload-html5'); @@ -272,7 +280,7 @@ class PodsAdmin { } public function pods_form_test() { - require_once PODS_DIR . 'ui/admin/pods_form_test.php'; + require_once PODS_DIR . 'ui/admin/form.php'; } public function admin_setup() { @@ -419,7 +427,8 @@ class PodsAdmin { 'import_package' => array('priv' => 'manage_packages'), 'validate_package' => array('priv' => 'manage_packages'), 'replace_package' => array('priv' => 'manage_packages'), - 'security_settings' => array('priv' => 'manage_settings')); + 'security_settings' => array('priv' => 'manage_settings'), + 'select2_ajax' => array('priv' => 'manage_pods', 'format' => 'json')); $methods = apply_filters('pods_admin_ajax_methods', $methods, $this);
Added select2 ajax method
pods-framework_pods
train
cfd19f136d4d67345284423ec897b3388194ffb3
diff --git a/libkbfs/folder_branch_ops.go b/libkbfs/folder_branch_ops.go index <HASH>..<HASH> 100644 --- a/libkbfs/folder_branch_ops.go +++ b/libkbfs/folder_branch_ops.go @@ -3314,6 +3314,10 @@ func (fbo *folderBranchOps) renameLocked( return err } + if err := checkDisallowedPrefixes(newName); err != nil { + return err + } + oldParentPath, err := fbo.pathFromNodeForMDWriteLocked(lState, oldParent) if err != nil { return err diff --git a/libkbfs/kbfs_ops_test.go b/libkbfs/kbfs_ops_test.go index <HASH>..<HASH> 100644 --- a/libkbfs/kbfs_ops_test.go +++ b/libkbfs/kbfs_ops_test.go @@ -695,6 +695,39 @@ func testPutBlockInCache( } } +func TestKBFSOpsGetBaseDirChildrenHidesFiles(t *testing.T) { + mockCtrl, config, ctx, cancel := kbfsOpsInit(t) + defer kbfsTestShutdown(mockCtrl, config, ctx, cancel) + + u, id, rmd := injectNewRMD(t, config) + + rootID := kbfsblock.FakeID(42) + dirBlock := NewDirBlock().(*DirBlock) + dirBlock.Children["a"] = DirEntry{EntryInfo: EntryInfo{Type: File}} + dirBlock.Children[".kbfs_git"] = DirEntry{EntryInfo: EntryInfo{Type: Dir}} + blockPtr := makeBP(rootID, rmd, config, u) + rmd.data.Dir.BlockPointer = blockPtr + node := pathNode{blockPtr, "p"} + p := path{FolderBranch{Tlf: id}, []pathNode{node}} + testPutBlockInCache(t, config, node.BlockPointer, id, dirBlock) + ops := getOps(config, id) + n := nodeFromPath(t, ops, p) + + children, err := config.KBFSOps().GetDirChildren(ctx, n) + if err != nil { + t.Errorf("Got error on getdir: %+v", err) + } else if len(children) != 1 { + t.Errorf("Got bad children back: %v", children) + } + for c, ei := range children { + if de, ok := dirBlock.Children[c]; !ok { + t.Errorf("No such child: %s", c) + } else if de.EntryInfo != ei { + t.Errorf("Wrong EntryInfo for child %s: %v", c, ei) + } + } +} + func TestKBFSOpsGetBaseDirChildrenCacheSuccess(t *testing.T) { mockCtrl, config, ctx, cancel := kbfsOpsInit(t) defer kbfsTestShutdown(mockCtrl, config, ctx, cancel)
folder_branch_ops: don't allow a rename to a disallowed prefix Issue: KBFS-<I>
keybase_client
train
92258ac96804979f232f1627c54c498a6b4a3367
diff --git a/grails-core/src/main/groovy/org/codehaus/groovy/grails/exceptions/DefaultStackTraceFilterer.java b/grails-core/src/main/groovy/org/codehaus/groovy/grails/exceptions/DefaultStackTraceFilterer.java index <HASH>..<HASH> 100644 --- a/grails-core/src/main/groovy/org/codehaus/groovy/grails/exceptions/DefaultStackTraceFilterer.java +++ b/grails-core/src/main/groovy/org/codehaus/groovy/grails/exceptions/DefaultStackTraceFilterer.java @@ -91,19 +91,11 @@ public class DefaultStackTraceFilterer implements StackTraceFilterer { public Throwable filter(Throwable source) { if (shouldFilter) { StackTraceElement[] trace = source.getStackTrace(); - List<StackTraceElement> newTrace = new ArrayList<StackTraceElement>(); - boolean foundGroovy = false; - for (StackTraceElement stackTraceElement : trace) { - String className = stackTraceElement.getClassName(); - if(!foundGroovy && stackTraceElement.getFileName().endsWith(".groovy")) { - foundGroovy = true; - } - if (cutOffPackage != null && className.startsWith(cutOffPackage) && foundGroovy) break; - if (isApplicationClass(className)) { - if (stackTraceElement.getLineNumber() > -1) { - newTrace.add(stackTraceElement); - } - } + List<StackTraceElement> newTrace = filterTraceWithCutOff(trace, cutOffPackage); + + if(newTrace.size() == 0) { + // filter with no cut-off so at least there is some trace + newTrace = filterTraceWithCutOff(trace, null); } // Only trim the trace if there was some application trace on the stack @@ -119,6 +111,24 @@ public class DefaultStackTraceFilterer implements StackTraceFilterer { return source; } + private List<StackTraceElement> filterTraceWithCutOff(StackTraceElement[] trace, String endPackage) { + List<StackTraceElement> newTrace = new ArrayList<StackTraceElement>(); + boolean foundGroovy = false; + for (StackTraceElement stackTraceElement : trace) { + String className = stackTraceElement.getClassName(); + if(!foundGroovy && stackTraceElement.getFileName().endsWith(".groovy")) { + foundGroovy = true; + } + if (endPackage != null && className.startsWith(endPackage) && foundGroovy) break; + if (isApplicationClass(className)) { + if (stackTraceElement.getLineNumber() > -1) { + newTrace.add(stackTraceElement); + } + } + } + return newTrace; + } + /** * Whether the given class name is an internal class and should be filtered * @param className The class name
if filtering produced no trace and a cut off package was specified then filter again with no cut off package
grails_grails-core
train
11df045169b4247ad2f450e802dc579f6ff06471
diff --git a/src/StoragelessSession/Http/SessionMiddleware.php b/src/StoragelessSession/Http/SessionMiddleware.php index <HASH>..<HASH> 100644 --- a/src/StoragelessSession/Http/SessionMiddleware.php +++ b/src/StoragelessSession/Http/SessionMiddleware.php @@ -150,7 +150,10 @@ final class SessionMiddleware implements MiddlewareInterface public function __invoke(Request $request, Response $response, callable $out = null) { list($request, $sessionContainer) = $this->injectSession($request, $this->parseToken($request)); - $response = $out === null ? $response : $out($request, $response); + + if (null !== $out) { + $response = $out($request, $response); + } return $this->appendToken($sessionContainer, $response); }
Removing weird ternary as per @danizord review
psr7-sessions_storageless
train
25fd3cdd5db33faf92babc0ff72f66824bd42b00
diff --git a/waybackpack/archive.py b/waybackpack/archive.py index <HASH>..<HASH> 100644 --- a/waybackpack/archive.py +++ b/waybackpack/archive.py @@ -169,6 +169,11 @@ class Resource(object): prefix=prefix, ts=ts, suffix=suffix) path = os.path.join(directory, filename) + try: + os.makedirs(directory) + except OSError: + pass + with open(path, "wb") as f: logger.info("Fetching {0}".format(ts)) content = view.fetch(user_agent=user_agent)
Create destination directory if it doesn't exist already
jsvine_waybackpack
train
6353fd0b9ec04b9303dc3980a28a49788b8be54c
diff --git a/QuickPay/api/Request.php b/QuickPay/api/Request.php index <HASH>..<HASH> 100644 --- a/QuickPay/api/Request.php +++ b/QuickPay/api/Request.php @@ -157,7 +157,7 @@ class Request // If additional data is delivered, we will send it along with the API request if( is_array( $form ) && ! empty( $form ) ) { - curl_setopt( $this->client->ch, CURLOPT_POSTFIELDS, $form ); + curl_setopt( $this->client->ch, CURLOPT_POSTFIELDS, http_build_query($form) ); } // Execute the request
Support for parameters of type object - eg POST /payments with variables. Example: $form = [ 'currency' => 'DKK', 'order_id' => '<I>-1', 'variables' => [ 'foo' => 'bar', 'more' => 'less' ] ]; $response = $client->request->post('/payments', $form);
QuickPay_quickpay-php-client
train
8d7ab01c67d037b47a2bf617f4263b5f85b39f25
diff --git a/tests/unit/states/npm_test.py b/tests/unit/states/npm_test.py index <HASH>..<HASH> 100644 --- a/tests/unit/states/npm_test.py +++ b/tests/unit/states/npm_test.py @@ -218,7 +218,7 @@ class NpmTestCase(TestCase): with patch.dict(npm.__opts__, {'test': False}): comt = ('Error cleaning cached {0}'.format(name)) pkg_ret.update({'result': False, 'comment': comt}) - pkg_ret['changes'] = {} + pkg_ret['changes'] = {} self.assertDictEqual(npm.cache_cleaned(name), pkg_ret)
Fixing npm cache cleaned lint errors
saltstack_salt
train
82cb21fca3f6598fa5451d73034b8a90a4b1db77
diff --git a/server.go b/server.go index <HASH>..<HASH> 100644 --- a/server.go +++ b/server.go @@ -668,6 +668,7 @@ func (server *Server) buildDefaultHTTPRouter() *mux.Router { router := mux.NewRouter() router.NotFoundHandler = http.HandlerFunc(notFoundHandler) router.StrictSlash(true) + router.SkipClean(true) return router }
Disable gorilla/mux URL cleaning to prevent sending redirect This fixes #<I> and #<I>. By default, gorilla/mux cleans URL paths such that adjacent slashes are collapsed into one single slash. This behavior is pretty common in application stacks and it is fine, but for Traefik, this can lead to incorrect URL paths forwarded to backend servers. See <URL>
containous_traefik
train
7b67a751efe3bb9796ee64872421efffe88d31de
diff --git a/activesupport/lib/active_support/notifications.rb b/activesupport/lib/active_support/notifications.rb index <HASH>..<HASH> 100644 --- a/activesupport/lib/active_support/notifications.rb +++ b/activesupport/lib/active_support/notifications.rb @@ -171,6 +171,24 @@ module ActiveSupport end end + # Subscribe to a given event name with the passed +block+. + # + # You can subscribe to events by passing a String to match exact event + # names, or by passing a Regexp to match all events that match a pattern. + # + # ActiveSupport::Notifications.subscribe(/render/) do |*args| + # ... + # end + # + # The +block+ will receive five parameters with information about the event: + # + # ActiveSupport::Notifications.subscribe('render') do |name, start, finish, id, payload| + # name # => String, name of the event (such as 'render' from above) + # start # => Time, when the instrumented block started execution + # finish # => Time, when the instrumented block ended execution + # id # => String, unique ID for the instrumenter that fired the event + # payload # => Hash, the payload + # end def subscribe(*args, &block) notifier.subscribe(*args, &block) end
Add docs to ActiveSupport::Notifications.subscribe [ci skip]
rails_rails
train
214a90e048b51826242f7551dade0c6c1227c2cc
diff --git a/ddl/worker.go b/ddl/worker.go index <HASH>..<HASH> 100644 --- a/ddl/worker.go +++ b/ddl/worker.go @@ -141,16 +141,12 @@ func (d *ddl) updateJob(t *meta.TMeta, job *model.Job) error { func (d *ddl) finishJob(t *meta.TMeta, job *model.Job) error { // done, notice and run next job. - err := d.meta.RunInNewTxn(false, func(t *meta.TMeta) error { - _, err := t.DeQueueDDLJob() - if err != nil { - return errors.Trace(err) - } - - err = t.AddHistoryDDLJob(job) + _, err := t.DeQueueDDLJob() + if err != nil { return errors.Trace(err) - }) + } + err = t.AddHistoryDDLJob(job) return errors.Trace(err) }
ddl: finishJob use same transaction.
pingcap_tidb
train
80ba5aa1c545534b8f3bf008221ce513870b94f2
diff --git a/src/Chart.Line.js b/src/Chart.Line.js index <HASH>..<HASH> 100644 --- a/src/Chart.Line.js +++ b/src/Chart.Line.js @@ -275,6 +275,7 @@ return helpers.findPreviousWhere(collection, hasValue, index) || point; }; + if (!this.scale) return; this.scale.draw(easingDecimal);
error thrown error thrown (TypeError: this.scale is undefined) if destroy is called before animation is complete, this update fix this error!
chartjs_Chart.js
train
4256b143f2e9f283a24a19c13dfb34747167d9ac
diff --git a/saltpylint/pep8.py b/saltpylint/pep8.py index <HASH>..<HASH> 100644 --- a/saltpylint/pep8.py +++ b/saltpylint/pep8.py @@ -143,6 +143,8 @@ class PEP8Indentation(_PEP8BaseChecker): 'expected-an-indented-block'), 'E8113': ('PEP8 %s: unexpected indentation', 'unexpected-indentation'), + 'E8114': ('PEP8 %s: indentation is not a multiple of four (comment)', + 'indentation-is-not-a-multiple-of-four-comment'), 'E8121': ('PEP8 %s: continuation line indentation is not a multiple of four', 'continuation-line-indentation-is-not-a-multiple-of-four'), 'E8122': ('PEP8 %s: continuation line missing indentation or outdented',
Add support for PEP8 E<I>
saltstack_salt-pylint
train
8971d114bf88ad0c712cd90f97c3eed0296bb8e9
diff --git a/pyardrone/__init__.py b/pyardrone/__init__.py index <HASH>..<HASH> 100644 --- a/pyardrone/__init__.py +++ b/pyardrone/__init__.py @@ -76,7 +76,7 @@ class ARDrone: @property def state(self): ''' - The latest state from *NavData*. + The latest state from :py:class:`~pyardrone.navdata.NavData`. >>> drone.state.fly_mask True # drone is flying
doc: make state doc visiable
afg984_pyardrone
train
147c1082d82b0c3ee04f8a72ae8f91ca6b0bec42
diff --git a/src/Service/ElasticaService.php b/src/Service/ElasticaService.php index <HASH>..<HASH> 100644 --- a/src/Service/ElasticaService.php +++ b/src/Service/ElasticaService.php @@ -374,14 +374,14 @@ class ElasticaService return []; } if (!\is_array($value)) { - return [$value]; + return \explode(',', $value); } return $value; }) ->setNormalizer('index', function (Options $options, $value) { if (!\is_array($value)) { - return [$value]; + return \explode(',', $value); } return $value;
fix: in case of csv indexes ou types
ems-project_EMSCommonBundle
train
b995d2657ddbc9f01949161669347c44a8f1a974
diff --git a/src/helpers.js b/src/helpers.js index <HASH>..<HASH> 100644 --- a/src/helpers.js +++ b/src/helpers.js @@ -206,19 +206,26 @@ function cloner(deep, item) { if (type.object(item)) { const o = {}; - let k; + let i, l, k; // NOTE: could be possible to erase computed properties through `null`. - for (k in item) { + const props = Object.getOwnPropertyNames(item); + for (i = 0, l = props.length; i < l; i++) { + k = props[i]; if (type.lazyGetter(item, k)) { Object.defineProperty(o, k, { get: Object.getOwnPropertyDescriptor(item, k).get, - enumerable: true, + enumerable: Object.getOwnPropertyDescriptor(item, k).enumerable, configurable: true }); } - else if (hasOwnProp.call(item, k)) { - o[k] = deep ? cloner(true, item[k]) : item[k]; + else { + Object.defineProperty(o, k, { + value: deep ? cloner(true, item[k]) : item[k], + enumerable: Object.getOwnPropertyDescriptor(item, k).enumerable, + writable: true, + configurable: true + }); } } return o;
Add support for non-enumerable properties
Yomguithereal_baobab
train
337d805e7ff7b05568e81968dc560b06f86faa81
diff --git a/coinbase-java/src/main/java/com/coinbase/v1/entity/PaymentMethod.java b/coinbase-java/src/main/java/com/coinbase/v1/entity/PaymentMethod.java index <HASH>..<HASH> 100755 --- a/coinbase-java/src/main/java/com/coinbase/v1/entity/PaymentMethod.java +++ b/coinbase-java/src/main/java/com/coinbase/v1/entity/PaymentMethod.java @@ -2,6 +2,7 @@ package com.coinbase.v1.entity; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonValue; +import com.sun.org.apache.xpath.internal.operations.Bool; import java.io.Serializable; @@ -136,6 +137,8 @@ public class PaymentMethod implements Serializable { private String _name; private Boolean _allowBuy; private Boolean _allowSell; + private Boolean _allowDeposit; + private Boolean _allowWithdraw; private Boolean _primaryBuy; private Boolean _primarySell; private Account _account; @@ -183,6 +186,22 @@ public class PaymentMethod implements Serializable { _allowSell = allowSell; } + public Boolean allowDeposit() { + return _allowDeposit; + } + + public void setAllowDeposit(Boolean allowDeposit) { + _allowDeposit = allowDeposit; + } + + public Boolean allowWithdraw() { + return _allowWithdraw; + } + + public void setAllowWithdraw(Boolean allowWithdraw) { + _allowWithdraw = allowWithdraw; + } + public Account getAccount() { return _account; }
add allowDeposit and allowWithdraw properties
coinbase_coinbase-java
train
4e7bea2c9adecdf9415b88abec57d21ca0c4d1a9
diff --git a/src/main/java/de/btobastian/javacord/entities/message/Message.java b/src/main/java/de/btobastian/javacord/entities/message/Message.java index <HASH>..<HASH> 100644 --- a/src/main/java/de/btobastian/javacord/entities/message/Message.java +++ b/src/main/java/de/btobastian/javacord/entities/message/Message.java @@ -140,6 +140,13 @@ public interface Message extends Comparable<Message> { public Future<Void> delete(); /** + * Checks if the message is deleted. + * + * @return Whether the message is deleted or not. + */ + public boolean isDeleted(); + + /** * Gets the attachments of a message. * * @return The attachments of the message. diff --git a/src/main/java/de/btobastian/javacord/entities/message/impl/ImplMessage.java b/src/main/java/de/btobastian/javacord/entities/message/impl/ImplMessage.java index <HASH>..<HASH> 100644 --- a/src/main/java/de/btobastian/javacord/entities/message/impl/ImplMessage.java +++ b/src/main/java/de/btobastian/javacord/entities/message/impl/ImplMessage.java @@ -104,6 +104,7 @@ public class ImplMessage implements Message { private final String nonce; private boolean mentionsEveryone; private boolean pinned; + private boolean deleted = false; private Calendar creationDate = Calendar.getInstance(); private final Collection<Embed> embeds = new ArrayList<>(); @@ -313,6 +314,7 @@ public class ImplMessage implements Message { response, RateLimitType.SERVER_MESSAGE_DELETE, getChannelReceiver().getServer()); } api.removeMessage(message); + deleted = true; logger.debug("Deleted message (id: {}, author: {}, content: \"{}\")", getId(), getAuthor(), getContent()); // call listener @@ -337,6 +339,11 @@ public class ImplMessage implements Message { } @Override + public boolean isDeleted() { + return deleted; + } + + @Override public Collection<MessageAttachment> getAttachments() { return Collections.unmodifiableCollection(attachments); } diff --git a/src/main/java/de/btobastian/javacord/utils/handler/message/MessageDeleteHandler.java b/src/main/java/de/btobastian/javacord/utils/handler/message/MessageDeleteHandler.java index <HASH>..<HASH> 100644 --- a/src/main/java/de/btobastian/javacord/utils/handler/message/MessageDeleteHandler.java +++ b/src/main/java/de/btobastian/javacord/utils/handler/message/MessageDeleteHandler.java @@ -51,7 +51,7 @@ public class MessageDeleteHandler extends PacketHandler { public void handle(JSONObject packet) { String messageId = packet.getString("id"); final Message message = api.getMessageById(messageId); - if (message == null) { + if (message == null || message.isDeleted()) { return; // no cached version available } listenerExecutorService.submit(new Runnable() {
Added Message#isDeleted() and fixed double call of listener
Javacord_Javacord
train
2425e5947c9bfda64b8debcdf95b12a448985d8d
diff --git a/authapi/tests/test_teams.py b/authapi/tests/test_teams.py index <HASH>..<HASH> 100644 --- a/authapi/tests/test_teams.py +++ b/authapi/tests/test_teams.py @@ -154,6 +154,114 @@ class TeamTests(AuthAPITestCase): response = self.client.get(reverse('seedteam-list')) self.assertEqual(len(response.data[0]['users']), 0) + def test_permissions_team_list_unauthorized(self): + '''Unauthorized users shouldn't be able to see team list.''' + url = reverse('seedteam-list') + + response = self.client.get(url) + self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) + + def test_permissions_team_list_member_of_team(self): + '''Teams that a user is a member of should be displayed on the list.''' + url = reverse('seedteam-list') + + user, token = self.create_user() + self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) + + org = SeedOrganization.objects.create() + team1 = SeedTeam.objects.create(organization=org) + SeedTeam.objects.create(organization=org) + team1.users.add(user) + response = self.client.get(url) + [team] = response.data + self.assertEqual(team['id'], team1.pk) + + def test_permissions_team_list_read_permission(self): + '''Teams that a user has 'team:read' permission for should be + displayed on the list.''' + url = reverse('seedteam-list') + + user, token = self.create_user() + self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) + + org = SeedOrganization.objects.create() + team1 = SeedTeam.objects.create(organization=org) + team2 = SeedTeam.objects.create(organization=org) + self.add_permission(user, 'team:read', team1.pk) + response = self.client.get(url) + self.assertTrue(team2.pk not in [t['id'] for t in response.data]) + + def test_permissions_team_list_admin_permission(self): + '''Teams that a user has 'team:admin' permission for should be + displayed on the list.''' + url = reverse('seedteam-list') + + user, token = self.create_user() + self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) + + org = SeedOrganization.objects.create() + team1 = SeedTeam.objects.create(organization=org) + team2 = SeedTeam.objects.create(organization=org) + self.add_permission(user, 'team:admin', team1.pk) + response = self.client.get(url) + self.assertTrue(team2.pk not in [t['id'] for t in response.data]) + + def test_permissions_team_list_org_member(self): + '''Teams that are a part of an organization that the user is part of + should be displayed in the team list.''' + url = reverse('seedteam-list') + + user, token = self.create_user() + self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) + + org = SeedOrganization.objects.create() + org.users.add(user) + team = SeedTeam.objects.create(organization=org) + response = self.client.get(url) + [resp_team] = response.data + self.assertEqual(team.pk, resp_team['id']) + + def test_permissions_team_list_org_admin(self): + '''Teams that are a part of an organization that a user has org:admin + permission for should be displayed on the list.''' + url = reverse('seedteam-list') + + user, token = self.create_user() + self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) + + org = SeedOrganization.objects.create() + team = SeedTeam.objects.create(organization=org) + self.add_permission(user, 'org:admin', org.pk) + response = self.client.get(url) + self.assertTrue(team.pk in [t['id'] for t in response.data]) + + def test_permissions_team_list_org_write(self): + '''Teams that are a part of an organization that a user has org:write + permission for should be displayed on the list.''' + url = reverse('seedteam-list') + + user, token = self.create_user() + self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) + + org = SeedOrganization.objects.create() + team = SeedTeam.objects.create(organization=org) + self.add_permission(user, 'org:write', org.pk) + response = self.client.get(url) + self.assertTrue(team.pk in [t['id'] for t in response.data]) + + def test_permissions_team_list_admin(self): + '''Admin users should be able to see all teams.''' + url = reverse('seedteam-list') + + user, token = self.create_admin_user() + self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) + + org = SeedOrganization.objects.create() + team = SeedTeam.objects.create(organization=org) + response = self.client.get(url) + [resp_team] = response.data + self.assertTrue(team.pk, resp_team['id']) + def test_create_team(self): '''Creating teams on this endpoint should not be allowed.''' _, token = self.create_admin_user()
Add tests for getting a list of teams
praekeltfoundation_seed-auth-api
train
3dd5251f50fc8330e7189487f56ba6d3a8061ab2
diff --git a/src/main/java/io/github/classgraph/ClasspathElement.java b/src/main/java/io/github/classgraph/ClasspathElement.java index <HASH>..<HASH> 100644 --- a/src/main/java/io/github/classgraph/ClasspathElement.java +++ b/src/main/java/io/github/classgraph/ClasspathElement.java @@ -42,6 +42,7 @@ import io.github.classgraph.utils.NestedJarHandler; import io.github.classgraph.utils.WorkQueue; /** A classpath element (a directory or jarfile on the classpath). */ +// TODO: This can probably be merged with ClasspathOrModulePathEntry, since it is mostly a wrapper for that class. abstract class ClasspathElement { /** The path of the classpath element relative to the current directory. */ final ClasspathOrModulePathEntry classpathEltPath; @@ -98,6 +99,11 @@ abstract class ClasspathElement { return classpathEltPath.getRawPath(); } + /** Return the resolved path for this classpath element (i.e. the raw path resolved against the current dir). */ + String getResolvedPath() { + return classpathEltPath.getResolvedPath(); + } + /** * @return The classpath element's file (directory or jarfile), or null if this is a module. May trigger the * extraction of nested jars. diff --git a/src/main/java/io/github/classgraph/ScanResult.java b/src/main/java/io/github/classgraph/ScanResult.java index <HASH>..<HASH> 100644 --- a/src/main/java/io/github/classgraph/ScanResult.java +++ b/src/main/java/io/github/classgraph/ScanResult.java @@ -258,7 +258,7 @@ public final class ScanResult implements Closeable, AutoCloseable { } else { // If not scanning, nested jarfiles were not extracted, so use the raw classpath // element paths to form the resulting URL - String rawPath = classpathElement.getRawPath(); + String rawPath = classpathElement.getResolvedPath(); if (rawPath.startsWith("jrt:/") || rawPath.startsWith("http://") || rawPath.startsWith("https://")) { classpathElementOrderURLs.add(new URL(rawPath));
Further standardize jarfile URLs, and resolve them relative to curr dir
classgraph_classgraph
train
c39e2db0274204752fb98b23a4ea2a5d5147ab28
diff --git a/lib/connector/IFrameConnector.js b/lib/connector/IFrameConnector.js index <HASH>..<HASH> 100644 --- a/lib/connector/IFrameConnector.js +++ b/lib/connector/IFrameConnector.js @@ -90,15 +90,10 @@ var IFrameConnector = Connector.inherit(/** @lends baqend.connector.IFrameConnec } else { this.postMessage(msg); } - }, - - postMessage: function(msg) { - this.iframe.contentWindow.postMessage(msg, this.origin); if (!this.connected) { setTimeout(function() { - var receive = this.messages[JSON.parse(msg).mid]; - if (receive) { + if (this.messages[msg.mid]) { delete this.messages[msg.mid]; receive({ status: 0, @@ -109,6 +104,10 @@ var IFrameConnector = Connector.inherit(/** @lends baqend.connector.IFrameConnec } }, + postMessage: function(msg) { + this.iframe.contentWindow.postMessage(msg, this.origin); + }, + /** * @inheritDoc */ @@ -122,11 +121,11 @@ var IFrameConnector = Connector.inherit(/** @lends baqend.connector.IFrameConnec } var msg = JSON.parse(event.data); - this.connected = true; var receive = this.messages[msg.mid]; if (receive) { delete this.messages[msg.mid]; + this.connected = true; receive({ status: msg.status, diff --git a/lib/util/TokenStorage.js b/lib/util/TokenStorage.js index <HASH>..<HASH> 100644 --- a/lib/util/TokenStorage.js +++ b/lib/util/TokenStorage.js @@ -56,11 +56,9 @@ TokenStorage.GLOBAL = new TokenStorage(); try { if (typeof localStorage != "undefined") { TokenStorage.WEB_STORAGE = new (TokenStorage.inherit({ - constructor: function TokenLocalStorage() {}, + constructor: function TokenWebStorage() {}, get: function(origin) { - var token = sessionStorage.getItem('BAT:' + origin) || localStorage.getItem('BAT:' + origin); - console.log(token); - return token; + return sessionStorage.getItem('BAT:' + origin) || localStorage.getItem('BAT:' + origin); }, update: function(origin, token, useLocalStorage) { if (useLocalStorage === undefined) diff --git a/spec/user.spec.js b/spec/user.spec.js index <HASH>..<HASH> 100644 --- a/spec/user.spec.js +++ b/spec/user.spec.js @@ -377,20 +377,18 @@ describe('Test user and roles', function() { if (typeof localStorage !== "undefined") { it('should save token in session storage when register loginOption is false', function() { - var user = new DB.User({ username: makeLogin(), email: "[email protected]" }); + var user = new DB.User({ username: makeLogin()}); return DB.User.register(user, 'secret', false).then(function(u) { expect(u.username).eqls(user.username); - expect(u.email).eqls("[email protected]"); expect(localStorage.getItem('BAT:' + db._connector.origin)).be.not.ok; expect(sessionStorage.getItem('BAT:' + db._connector.origin)).be.ok; }); }); it('should save token in local storage when register loginOption is true', function() { - var user = new DB.User({ username: makeLogin(), email: "[email protected]" }); + var user = new DB.User({ username: makeLogin()}); return DB.User.register(user, 'secret', true).then(function(u) { expect(u.username).eqls(user.username); - expect(u.email).eqls("[email protected]"); expect(localStorage.getItem('BAT:' + db._connector.origin)).be.ok; expect(sessionStorage.getItem('BAT:' + db._connector.origin)).be.not.ok; }); @@ -398,12 +396,11 @@ describe('Test user and roles', function() { it('should save token in session storage when login loginOption is false', function() { var username = makeLogin(); - var user = new DB.User({ username: username, email: "[email protected]" }); + var user = new DB.User({ username: username}); return DB.User.register(user, 'secret', db.User.LoginOption.NO_LOGIN).then(function() { return DB.User.login(username, 'secret', false); }).then(function(u) { expect(u.username).eqls(user.username); - expect(u.email).eqls("[email protected]"); expect(localStorage.getItem('BAT:' + db._connector.origin)).be.not.ok; expect(sessionStorage.getItem('BAT:' + db._connector.origin)).be.ok; }); @@ -411,12 +408,11 @@ describe('Test user and roles', function() { it('should save token in local storage when login loginOption is true', function() { var username = makeLogin(); - var user = new DB.User({ username: username, email: "[email protected]" }); + var user = new DB.User({ username: username}); return DB.User.register(user, 'secret', db.User.LoginOption.NO_LOGIN).then(function() { return DB.User.login(username, 'secret', true); }).then(function(u) { expect(u.username).eqls(user.username); - expect(u.email).eqls("[email protected]"); expect(localStorage.getItem('BAT:' + db._connector.origin)).be.ok; expect(sessionStorage.getItem('BAT:' + db._connector.origin)).be.not.ok; });
small bugfixes in connect abort logic fix tests cleanup
Baqend_js-sdk
train
d64556bc785411f33172840369651e19b8e15012
diff --git a/src/utils/DateLanguages.js b/src/utils/DateLanguages.js index <HASH>..<HASH> 100644 --- a/src/utils/DateLanguages.js +++ b/src/utils/DateLanguages.js @@ -1,4 +1,4 @@ -module.exports = { +export default { translations: { 'en': { 'months': { diff --git a/src/utils/DateUtils.js b/src/utils/DateUtils.js index <HASH>..<HASH> 100644 --- a/src/utils/DateUtils.js +++ b/src/utils/DateUtils.js @@ -1,4 +1,4 @@ -module.exports = { +export default { /** * Validates a date object
change to export default in all utils files - solves #<I>
charliekassel_vuejs-datepicker
train
abcbf48301177b3100dc79e96fba5a2b2eb13094
diff --git a/error.js b/error.js index <HASH>..<HASH> 100644 --- a/error.js +++ b/error.js @@ -139,9 +139,9 @@ } return error; }; - result.code = code; - result.name = name; - result.message = message; + result.CODE = code; + result.NAME = name; + result.MESSAGE = message; return result; },
Exception thrown because setting Function.name
ArnaudBuchholz_gpf-js
train
e2acf0c7c6cbd01d0ba117cb2d0c57def7e2e586
diff --git a/rb/lib/selenium/webdriver/remote/w3c/capabilities.rb b/rb/lib/selenium/webdriver/remote/w3c/capabilities.rb index <HASH>..<HASH> 100755 --- a/rb/lib/selenium/webdriver/remote/w3c/capabilities.rb +++ b/rb/lib/selenium/webdriver/remote/w3c/capabilities.rb @@ -41,6 +41,7 @@ module Selenium :set_window_rect, :timeouts, :unhandled_prompt_behavior, + :strict_file_interactability, # remote-specific :remote_session_id,
Add strictFileInteractability to known W3C capabilities
SeleniumHQ_selenium
train
c3faa3857945ea28dacf967d08709cee6ce8b37c
diff --git a/adafruit_platformdetect/constants/boards.py b/adafruit_platformdetect/constants/boards.py index <HASH>..<HASH> 100644 --- a/adafruit_platformdetect/constants/boards.py +++ b/adafruit_platformdetect/constants/boards.py @@ -199,6 +199,8 @@ _JETSON_IDS = ( ( "nvidia,p3509-0000+p3668-0000", "nvidia,p3509-0000+p3668-0001", + "nvidia,p3509-0000-a00+p3668-0000-a01", + "nvidia,p3509-0000-a00+p3668-0001-a01", "nvidia,p3449-0000+p3668-0000", "nvidia,p3449-0000+p3668-0001", ),
add ConnectTech Boson board for Jetson NX
adafruit_Adafruit_Python_PlatformDetect
train
8be1c0f88ab62ea7ebb3f66616dfe42efdaf1f9e
diff --git a/osmdroid-third-party/src/main/java/org/osmdroid/google/wrapper/v2/GoogleV1MapWrapper.java b/osmdroid-third-party/src/main/java/org/osmdroid/google/wrapper/v2/GoogleV1MapWrapper.java index <HASH>..<HASH> 100644 --- a/osmdroid-third-party/src/main/java/org/osmdroid/google/wrapper/v2/GoogleV1MapWrapper.java +++ b/osmdroid-third-party/src/main/java/org/osmdroid/google/wrapper/v2/GoogleV1MapWrapper.java @@ -166,7 +166,7 @@ class GoogleV1MapWrapper implements IMap { public void clearPolyline(final int id) { final GooglePolylineOverlay polyline = getPolyline(id); mMapView.getOverlays().remove(polyline); - mPolylines.remove(polyline); + mPolylines.remove(id); } private GooglePolylineOverlay getPolyline(final int id) { diff --git a/osmdroid-third-party/src/main/java/org/osmdroid/google/wrapper/v2/OsmdroidMapWrapper.java b/osmdroid-third-party/src/main/java/org/osmdroid/google/wrapper/v2/OsmdroidMapWrapper.java index <HASH>..<HASH> 100644 --- a/osmdroid-third-party/src/main/java/org/osmdroid/google/wrapper/v2/OsmdroidMapWrapper.java +++ b/osmdroid-third-party/src/main/java/org/osmdroid/google/wrapper/v2/OsmdroidMapWrapper.java @@ -185,7 +185,7 @@ class OsmdroidMapWrapper implements IMap { public void clearPolyline(final int id) { final PathOverlay polyline = getPolyline(id); mMapView.getOverlays().remove(polyline); - mPolylines.remove(polyline); + mPolylines.remove(id); } private PathOverlay getPolyline(final int id) {
Should clear polyline by key, not value
osmdroid_osmdroid
train
427c40b1a90a321bfd10d417fb44bc713ee83135
diff --git a/grakn-engine/src/main/java/ai/grakn/engine/postprocessing/PostProcessing.java b/grakn-engine/src/main/java/ai/grakn/engine/postprocessing/PostProcessing.java index <HASH>..<HASH> 100644 --- a/grakn-engine/src/main/java/ai/grakn/engine/postprocessing/PostProcessing.java +++ b/grakn-engine/src/main/java/ai/grakn/engine/postprocessing/PostProcessing.java @@ -172,7 +172,7 @@ public class PostProcessing { if(typeName.equals("Casting")){ numJobs = cache.getCastingJobs(keyspace).size(); } else if(typeName.equals("Resources")){ - numJobs = cache.getCastingJobs(keyspace).size(); + numJobs = cache.getResourceJobs(keyspace).size(); } LOG.info(" Post processing step [" + typeName + " for Graph [" + keyspace + "] has jobs : " + numJobs); total += numJobs;
very minor bug (#<I>)
graknlabs_grakn
train
0aa832afec04215ed5e14b1cc5fc287ad7939792
diff --git a/src/manipulation.js b/src/manipulation.js index <HASH>..<HASH> 100644 --- a/src/manipulation.js +++ b/src/manipulation.js @@ -45,7 +45,6 @@ var // checked="checked" or checked rchecked = /checked\s*(?:[^=]|=\s*.checked.)/i, - rscriptTypeMasked = /^true\/(.*)/, rcleanScript = /^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g; // Prefer a tbody over its parent table for containing new rows @@ -65,10 +64,8 @@ function disableScript( elem ) { return elem; } function restoreScript( elem ) { - var match = rscriptTypeMasked.exec( elem.type ); - - if ( match ) { - elem.type = match[ 1 ]; + if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) { + elem.type = elem.type.slice( 5 ); } else { elem.removeAttribute( "type" ); }
Manipulation: Reduce size by eliminating single-use variable Closes gh-<I>
jquery_jquery
train
3b1c8f4fa5f1410ba052c0911c1e2f0d8765f76c
diff --git a/packages/node-krl-parser/src/tokenizer.js b/packages/node-krl-parser/src/tokenizer.js index <HASH>..<HASH> 100644 --- a/packages/node-krl-parser/src/tokenizer.js +++ b/packages/node-krl-parser/src/tokenizer.js @@ -70,16 +70,29 @@ module.exports = function(src, opts){ //chevron }else if(c === "<" && (src[i + 1] === "<")){ ctxChange(); - i++; + buff = src.substring(i, i + 2); + i += 2; + pushTok("CHEVRON-OPEN"); + next_is_escaped = false; while(i < src.length){ c = src[i]; - buff += c; - if(c === ">" && (src[i - 1] === ">") && (src[i - 2] !== "\\")){ - break; + if(next_is_escaped){ + next_is_escaped = false; + }else{ + if(c === "\\"){ + next_is_escaped = true; + } + if(c === ">" && (src[i + 1] === ">")){ + pushTok("CHEVRON-STRING"); + buff = src.substring(i, i + 2); + i += 2; + break; + } } + buff += c; i++; } - pushTok("CHEVRON"); + pushTok("CHEVRON-CLOSE"); /////////////////////////////////////////////////////////////////////////// //number diff --git a/packages/node-krl-parser/tests/tokenizer.test.js b/packages/node-krl-parser/tests/tokenizer.test.js index <HASH>..<HASH> 100644 --- a/packages/node-krl-parser/tests/tokenizer.test.js +++ b/packages/node-krl-parser/tests/tokenizer.test.js @@ -50,23 +50,29 @@ test("tokenizer", function(t){ ]); tst("<<some chevron\n\"?\"//string\nok?>>", [ - "[CHEVRON]<<some chevron\n\"?\"//string\nok?>>", + "[CHEVRON-OPEN]<<", + "[CHEVRON-STRING]some chevron\n\"?\"//string\nok?", + "[CHEVRON-CLOSE]>>", ]); tst("<<This #{x{\"flip\"}} that >\\> >>", [ - "[CHEVRON]<<This #{x{\"flip\"}} that >\\> >>", + "[CHEVRON-OPEN]<<", + "[CHEVRON-STRING]This #{x{\"flip\"}} that >\\> ", + "[CHEVRON-CLOSE]>>", ]); tst("<<This #{x{\"flip\"}} that >\\>>>", [ - "[CHEVRON]<<This #{x{\"flip\"}} that >\\>>>", + "[CHEVRON-OPEN]<<", + "[CHEVRON-STRING]This #{x{\"flip\"}} that >\\>", + "[CHEVRON-CLOSE]>>", ]); tst("<<This /* wat */\n//ok\n>>", [ - "[CHEVRON]<<This /* wat */\n//ok\n>>", + "[CHEVRON-OPEN]<<", + "[CHEVRON-STRING]This /* wat */\n//ok\n", + "[CHEVRON-CLOSE]>>", ]); - //NOTE a chevron in a beesting is not allowed. - tst("123", [ "[NUMBER]123", ]);
changing how Chevrons are tokenized to support beestings
Picolab_pico-engine
train
f6994a3c0dfc9f1b9cdf55f76ebe87709ca1e41c
diff --git a/src/RepeaterBuilder.php b/src/RepeaterBuilder.php index <HASH>..<HASH> 100644 --- a/src/RepeaterBuilder.php +++ b/src/RepeaterBuilder.php @@ -31,4 +31,9 @@ class RepeaterBuilder extends FieldsBuilder { return $this->getParentContext(); } + + public function setLocation($param, $operator, $value) + { + return $this->getParentContext()->setLocation($param, $operator, $value); + } } diff --git a/tests/RepeaterBuilderTest.php b/tests/RepeaterBuilderTest.php index <HASH>..<HASH> 100644 --- a/tests/RepeaterBuilderTest.php +++ b/tests/RepeaterBuilderTest.php @@ -45,4 +45,20 @@ class RepeaterBuilderTest extends \PHPUnit_Framework_TestCase ->endRepeater() ->addText('parent_title'); } + + public function testSetLocation() + { + $fieldsBuilder = $this->getMockBuilder('StoutLogic\AcfBuilder\FieldsBuilder') + ->setConstructorArgs(['parent']) + ->getMock(); + + $repeaterBuilder = new RepeaterBuilder('slides'); + $repeaterBuilder->setParentContext($fieldsBuilder); + + $fieldsBuilder->expects($this->once())->method('setLocation'); + + $repeaterBuilder->addText('title') + ->addWysiwyg('content') + ->setLocation('post_type', '==', 'page'); + } }
Calling setLocation on a RepeaterBuilder should delegate to the ParentContext Close issue #1
StoutLogic_acf-builder
train
a1a878de862ddf2f54caf496ec5e8522caab6bbf
diff --git a/payment_paypal/indico_payment_paypal/controllers.py b/payment_paypal/indico_payment_paypal/controllers.py index <HASH>..<HASH> 100644 --- a/payment_paypal/indico_payment_paypal/controllers.py +++ b/payment_paypal/indico_payment_paypal/controllers.py @@ -54,8 +54,7 @@ class RHPaypalIPN(RH): raise BadRequest def _process(self): - if not self._verify_business(): - return + self._verify_business() verify_params = list(chain(IPN_VERIFY_EXTRA_PARAMS, request.form.iteritems())) result = requests.post(current_plugin.settings.get('url'), data=verify_params).text if result != 'VERIFIED': diff --git a/payment_paypal/tests/controllers_test.py b/payment_paypal/tests/controllers_test.py index <HASH>..<HASH> 100644 --- a/payment_paypal/tests/controllers_test.py +++ b/payment_paypal/tests/controllers_test.py @@ -75,7 +75,6 @@ def test_ipn_is_transaction_duplicated(mocker, txn_id, payment_status, expected) @pytest.mark.usefixtures('db', 'request_context') @pytest.mark.parametrize('fail', ( - 'business', 'verify', 'dup_txn', 'fail', @@ -89,7 +88,6 @@ def test_ipn_process(mocker, fail): post = mocker.patch('indico_payment_paypal.controllers.requests.post') post.return_value.text = 'INVALID' if fail == 'verify' else 'VERIFIED' rh = RHPaypalIPN() - rh._verify_business = lambda: fail != 'business' rh._is_transaction_duplicated = lambda: fail == 'dup_txn' rh.event = MagicMock(id=1) rh.registrant = MagicMock() @@ -99,8 +97,8 @@ def test_ipn_process(mocker, fail): request.view_args = {'confId': rh.event.id} request.args = {'registrantId': '1'} request.form = {'payment_status': payment_status, 'txn_id': '12345', 'mc_gross': amount, - 'mc_currency': 'EUR'} + 'mc_currency': 'EUR', 'business': '[email protected]'} with PaypalPaymentPlugin.instance.plugin_context(): rh._process() - assert post.called == (fail != 'business') + assert post.called assert rt.called == (fail is None)
Payment/Paypal: Don't fail on business mismatch Merchant ID and email seem to be interchangable. THANKS PAYPAL.
indico_indico-plugins
train
d419f6d6c5855d8e5006fb08a63149088e4af303
diff --git a/src/system/modules/metamodelsattribute_timestamp/MetaModelAttributeTimestamp.php b/src/system/modules/metamodelsattribute_timestamp/MetaModelAttributeTimestamp.php index <HASH>..<HASH> 100644 --- a/src/system/modules/metamodelsattribute_timestamp/MetaModelAttributeTimestamp.php +++ b/src/system/modules/metamodelsattribute_timestamp/MetaModelAttributeTimestamp.php @@ -90,23 +90,43 @@ class MetaModelAttributeTimestamp extends MetaModelAttributeNumeric */ public function valueToWidget($varValue) { - if ($varValue === null) return ''; - if ($varValue != 0) return $varValue; - - //we need to parse the 0 timestamp manually because the widget will display an empty string - $strDateType = $this->get('timetype'); + if ($varValue === null) + { + return ''; + } - if($strDateType == 'time') + if ($varValue != 0) { return $varValue; } - else + + // We need to parse the 0 timestamp manually because the widget will display an empty string. + if ($varValue === 0 || $varValue === '') + { + return ''; + } + + // Get the right format for the field. + switch ($this->get('timetype')) { - $strDateType = empty($strDateType) ? 'date' : $strDateType; - $strDateType = ($strDateType == 'date')? $GLOBALS['TL_CONFIG']['dateFormat'] : $GLOBALS['TL_CONFIG']['datimFormat']; - - return date($strDateType, $varValue); + case 'time': + $strDateType = $GLOBALS['TL_CONFIG']['timeFormat']; + break; + + case 'date': + $strDateType = $GLOBALS['TL_CONFIG']['dateFormat']; + break; + + case 'datim': + $strDateType = $GLOBALS['TL_CONFIG']['datimFormat']; + break; + + default: + return $varValue; } + + // Return the data. + return date($strDateType, $varValue); } /** @@ -114,7 +134,15 @@ class MetaModelAttributeTimestamp extends MetaModelAttributeNumeric */ public function widgetToValue($varValue, $intId) { - return ($varValue === '')? null : $varValue; + // Check if we have some data. + if($varValue === '') + { + return null; + } + + // Make a unix timestamp from the string. + $date = new \DateTime($varValue); + return $date->getTimestamp(); } }
Improve the handling of the timestamp.
MetaModels_attribute_timestamp
train
15bb369ccde8416c788eaadfd6a4f76138a67843
diff --git a/pyiso.py b/pyiso.py index <HASH>..<HASH> 100644 --- a/pyiso.py +++ b/pyiso.py @@ -1279,6 +1279,7 @@ class RockRidge(RockRidgeBase): # For NM record if rr_name is not None: if curr_dr_len + self._ce_len() + self._nm_len(rr_name) > 254: + len_here = 0 if self.continuation_entry is None: self.continuation_entry = RockRidgeContinuation() curr_dr_len += self._ce_len() @@ -1289,12 +1290,10 @@ class RockRidge(RockRidgeBase): len_here = 254 - curr_dr_len - 5 self._add_nm(rr_name[:len_here]) self.posix_name_flags |= (1 << 0) - curr_dr_len += self._nm_len(self.posix_name) - self.continuation_entry._add_nm(rr_name[len_here:]) - self.continuation_entry.continue_length += self._nm_len(rr_name[len_here:]) - else: - self.continuation_entry._add_nm(rr_name) - self.continuation_entry.continue_length += self._nm_len(rr_name) + curr_dr_len += self._nm_len(rr_name[:len_here]) + + self.continuation_entry._add_nm(rr_name[len_here:]) + self.continuation_entry.continue_length += self._nm_len(rr_name[len_here:]) else: self._add_nm(rr_name) curr_dr_len += self._nm_len(rr_name)
Slightly simplify NM record.
clalancette_pycdlib
train
9f05fa0b088271f74633fc3f0443db36edb4db61
diff --git a/public/js/admin.js b/public/js/admin.js index <HASH>..<HASH> 100644 --- a/public/js/admin.js +++ b/public/js/admin.js @@ -37,6 +37,8 @@ $(document).ready(function() { var $setting = $(this); var old = $setting.data('old'); + $setting.removeClass('saved'); + if (old !== value) { $.ajax({ type: 'POST', @@ -46,6 +48,7 @@ $(document).ready(function() { } }).success(function(data) { $setting.data('old', value); + $setting.addClass('saved'); }); } });
Add/remove a CSS class when storing changed settings via AJAX.
fluxbb_core
train
e051afa3e2808eab94c5c9d550a7d854001b5df0
diff --git a/src/livestreamer/stream/hls.py b/src/livestreamer/stream/hls.py index <HASH>..<HASH> 100644 --- a/src/livestreamer/stream/hls.py +++ b/src/livestreamer/stream/hls.py @@ -139,6 +139,13 @@ class HLSStreamWorker(SegmentedStreamWorker): except ValueError as err: raise StreamError(err) + if playlist.is_master: + raise StreamError("Attempted to play a variant playlist, use " + "'hlsvariant://{0}' instead".format(self.stream.url)) + + if playlist.iframes_only: + raise StreamError("Streams containing I-frames only is not playable") + media_sequence = playlist.media_sequence or 0 sequences = [Sequence(media_sequence + i, s) for i, s in enumerate(playlist.segments)] @@ -149,9 +156,6 @@ class HLSStreamWorker(SegmentedStreamWorker): def process_sequences(self, playlist, sequences): first_sequence, last_sequence = sequences[0], sequences[-1] - if playlist.iframes_only: - raise StreamError("Streams containing I-frames only is not playable") - if (first_sequence.segment.key and first_sequence.segment.key.method != "NONE"):
stream.hls: Raise an error when encountering a variant playlist.
streamlink_streamlink
train
fee7bf6d8eef8c948df71ec3ff70414776face22
diff --git a/__tests__/search-test.js b/__tests__/search-test.js index <HASH>..<HASH> 100644 --- a/__tests__/search-test.js +++ b/__tests__/search-test.js @@ -32,7 +32,7 @@ describe('Search', function() { }, { header: 'Third' - } + }, ]; var search = TestUtils.renderIntoDocument( @@ -47,4 +47,56 @@ describe('Search', function() { expect(options[1].getDOMNode().value).toEqual(columns[0].property); expect(options[1].getDOMNode().textContent).toEqual(columns[0].header); }); + + it('should be able to yield results', function() { + var columns = [ + { + property: 'first', + header: 'First' + }, + ]; + var value = 'demo'; + var data = [ + { + first: value + }, + ]; + var result = function(d) { + expect(d.data).toEqual(data); + }; + var search = TestUtils.renderIntoDocument( + <Search columns={columns} data={data} onResult={result} /> + ); + + var input = TestUtils.findRenderedDOMComponentWithTag(search, 'input'); + input.getDOMNode().value = value; + + TestUtils.Simulate.change(input); + }); + + it('should be able to flag hidden results', function() { + var columns = [ + { + property: 'first', + header: 'First' + }, + ]; + var value = 'demo'; + var data = [ + { + first: value + }, + ]; + var result = function(d) { + expect(d.data[0]._visible).toEqual(false); + }; + var search = TestUtils.renderIntoDocument( + <Search columns={columns} data={data} onResult={result} /> + ); + + var input = TestUtils.findRenderedDOMComponentWithTag(search, 'input'); + input.getDOMNode().value = value + value; + + TestUtils.Simulate.change(input); + }); });
Add extra tests for search Related to #3.
reactabular_reactabular
train
6e72b00dce0befeb7000e4cce88aeba0a5ac8822
diff --git a/src/frontend/org/voltdb/RealVoltDB.java b/src/frontend/org/voltdb/RealVoltDB.java index <HASH>..<HASH> 100644 --- a/src/frontend/org/voltdb/RealVoltDB.java +++ b/src/frontend/org/voltdb/RealVoltDB.java @@ -487,9 +487,11 @@ public class RealVoltDB implements VoltDBInterface, RestoreAgent.Callback hostLog.error("Not a valid XML deployment file at URL: " + m_config.m_pathToDeployment); VoltDB.crashVoltDB(); } + + // note the heatbeats are specified in seconds in xml, but ms internally HeartbeatType hbt = m_deployment.getHeartbeat(); if (hbt != null) - m_config.m_deadHostTimeoutMS = hbt.getTimeout(); + m_config.m_deadHostTimeoutMS = hbt.getTimeout() * 1000; // create a dummy catalog to load deployment info into Catalog catalog = new Catalog(); diff --git a/src/frontend/org/voltdb/VoltDBNodeFailureFaultHandler.java b/src/frontend/org/voltdb/VoltDBNodeFailureFaultHandler.java index <HASH>..<HASH> 100644 --- a/src/frontend/org/voltdb/VoltDBNodeFailureFaultHandler.java +++ b/src/frontend/org/voltdb/VoltDBNodeFailureFaultHandler.java @@ -100,6 +100,10 @@ class VoltDBNodeFailureFaultHandler implements FaultHandler { new Thread() { @Override public void run() { + // if we see an early fault (during startup), then it's ok not to + // notify the export manager + if (ExportManager.instance() == null) + return; //Notify the export manager the cluster topology has changed ExportManager.instance().notifyOfClusterTopologyChange(); }
Fixing ENG-<I>. I think if you configure the heartbeat timeout (as opposed to not including it in the deployment file), I get the units wrong and we timeout like crazy fast yo.
VoltDB_voltdb
train
9a272f307db4282e4f72d66b6a1407f7514bc74d
diff --git a/doctr/__main__.py b/doctr/__main__.py index <HASH>..<HASH> 100644 --- a/doctr/__main__.py +++ b/doctr/__main__.py @@ -443,9 +443,12 @@ def configure(args, parser): env: global: + # Doctr deploy key for {deploy_repo} - secure: "{encrypted_variable}" - """.format(options=options, N=N, encrypted_variable=encrypted_variable.decode('utf-8')))) + """.format(options=options, N=N, + encrypted_variable=encrypted_variable.decode('utf-8'), + deploy_repo=deploy_repo))) print(dedent("""\ {N}. Commit and push these changes to your github repository.
Add a comment to the .travis.yml secure variable, since there can be multiple variables
drdoctr_doctr
train