hash
stringlengths 40
40
| diff
stringlengths 131
114k
| message
stringlengths 7
980
| project
stringlengths 5
67
| split
stringclasses 1
value |
---|---|---|---|---|
c5a1ca0b34a57366415df60099ab0918a7579469 | diff --git a/lib/puppet/provider/zfs/solaris.rb b/lib/puppet/provider/zfs/solaris.rb
index <HASH>..<HASH> 100644
--- a/lib/puppet/provider/zfs/solaris.rb
+++ b/lib/puppet/provider/zfs/solaris.rb
@@ -31,7 +31,7 @@ Puppet::Type.type(:zfs).provide(:solaris) do
end
end
- [:mountpoint, :compression, :copies, :quota, :reservation, :sharenfs, :snapdir].each do |field|
+ [:mountpoint, :recordsize, :aclmode, :aclinherit, :primarycache, :secondarycache, :compression, :copies, :quota, :reservation, :sharenfs, :snapdir].each do |field|
define_method(field) do
zfs(:get, "-H", "-o", "value", field, @resource[:name]).strip
end
diff --git a/lib/puppet/type/zfs.rb b/lib/puppet/type/zfs.rb
index <HASH>..<HASH> 100755
--- a/lib/puppet/type/zfs.rb
+++ b/lib/puppet/type/zfs.rb
@@ -12,6 +12,26 @@ module Puppet
desc "The mountpoint property."
end
+ newproperty(:recordsize) do
+ desc "The recordsize property."
+ end
+
+ newproperty(:aclmode) do
+ desc "The aclmode property."
+ end
+
+ newproperty(:aclinherit) do
+ desc "The aclinherit property."
+ end
+
+ newproperty(:primarycache) do
+ desc "The primarycache property."
+ end
+
+ newproperty(:secondarycache) do
+ desc "The secondarycache property."
+ end
+
newproperty(:compression) do
desc "The compression property."
end
@@ -33,7 +53,7 @@ module Puppet
end
newproperty(:snapdir) do
- desc "The sharenfs property."
+ desc "The snapdir property."
end
autorequire(:zpool) do
@@ -48,4 +68,3 @@ module Puppet
end
end
end
-
diff --git a/spec/unit/provider/zfs/solaris_spec.rb b/spec/unit/provider/zfs/solaris_spec.rb
index <HASH>..<HASH> 100755
--- a/spec/unit/provider/zfs/solaris_spec.rb
+++ b/spec/unit/provider/zfs/solaris_spec.rb
@@ -75,7 +75,7 @@ describe provider_class do
end
- [:mountpoint, :compression, :copies, :quota, :reservation, :sharenfs, :snapdir].each do |prop|
+ [:mountpoint, :recordsize, :aclmode, :aclinherit, :primarycache, :secondarycache, :compression, :copies, :quota, :reservation, :sharenfs, :snapdir].each do |prop|
describe "when getting the #{prop} value" do
it "should call zfs with :get, #{prop} and this zfs" do
@provider.expects(:zfs).with(:get, "-H", "-o", "value", prop, @resource[:name]).returns("value\n") | (#<I>) Include additional zfs properties
We added the following zfs properties to the zfs provider
:recordsize, :aclmode, :aclinherit, :primarycache, :secondarycache
Currently zfs users have to chain an exec to the zfs filesystem creation and the inclusion of these properties would allow puppet to set them natively. | puppetlabs_puppet | train |
0ed12c599e4e73ca448c3c4e5349ea14468672b7 | diff --git a/src/server/storage/safestorage.js b/src/server/storage/safestorage.js
index <HASH>..<HASH> 100644
--- a/src/server/storage/safestorage.js
+++ b/src/server/storage/safestorage.js
@@ -241,8 +241,7 @@ SafeStorage.prototype.createProject = function (data, callback) {
var deferred = Q.defer(),
rejected = false,
userAuthInfo,
- self = this,
- dbProject;
+ self = this;
rejected = check(data !== null && typeof data === 'object', deferred, 'data is not an object.') ||
check(typeof data.projectName === 'string', deferred, 'data.projectName is not a string.') ||
@@ -255,33 +254,30 @@ SafeStorage.prototype.createProject = function (data, callback) {
}
if (rejected === false) {
- data.projectId = data.username + CONSTANTS.PROJECT_ID_SEP + data.projectName;
this.gmeAuth.getUser(data.username)
.then(function (user) {
+ var info = {
+ createdAt: (new Date()).toISOString()
+ };
userAuthInfo = user.projects;
if (user.canCreate) {
- return Storage.prototype.createProject.call(self, data);
+ return self.gmeAuth.addProject(data.username, data.projectName, info);
} else {
throw new Error('Not authorized to create a new project.');
}
})
- .then(function (dbProject_) {
- dbProject = dbProject_;
- return self.gmeAuth.authorizeByUserId(data.username, dbProject.projectId, 'create', {
+ .then(function (projectId) {
+ data.projectId = projectId;
+ return self.gmeAuth.authorizeByUserId(data.username, projectId, 'create', {
read: true,
write: true,
delete: true
});
})
.then(function () {
- var info = {
- createdAt: (new Date()).toISOString()
- //TODO: populate with more data here, e.g. description
- };
-
- return self.gmeAuth.addProject(data.username, data.projectName, info);
+ return Storage.prototype.createProject.call(self, data);
})
- .then(function () {
+ .then(function (dbProject) {
var project = new UserProject(dbProject, self, self.logger, self.gmeConfig);
deferred.resolve(project);
}) | #<I> Authorize before creating project in db.
Could fix reoccurring test issue in standalone.
Former-commit-id: a<I>ce<I>dd<I>b6e<I>b9aa6cee<I>d | webgme_webgme-engine | train |
7fb94fd69bd94e044b2fabad7611399781e150d2 | diff --git a/lib/dai-plugin-mcd/test/CdpTypeService.spec.js b/lib/dai-plugin-mcd/test/CdpTypeService.spec.js
index <HASH>..<HASH> 100644
--- a/lib/dai-plugin-mcd/test/CdpTypeService.spec.js
+++ b/lib/dai-plugin-mcd/test/CdpTypeService.spec.js
@@ -12,13 +12,16 @@ const scenarios = [['ETH-A', ETH], ['ETH-B', ETH], ['REP-A', REP]];
let systemValues = {};
function setSystemValues() {
- const values = [[1000, 1.5, '2.5'], [500, 1.8, '1.5'], [800, 1.8, '2.0']];
+ const values = [[12.7, 100000, 1.5, '5.0', '21.5', 0.05], [4, 100000, 2, '4.0', '8', 0.05], [6.7, 5000, 1.8, '10.0', '13.5', 0.08]];
let scenario;
for (scenario = 0; scenario < scenarios.length; scenario++) {
systemValues[scenarios[scenario][0]] = {
- expectedCeiling: values[scenario][0],
- expectedRatio: values[scenario][1],
- expectedFee: values[scenario][2]
+ expectedTotalCollateral: values[scenario][0],
+ expectedCeiling: values[scenario][1],
+ expectedRatio: values[scenario][2],
+ expectedFee: values[scenario][3],
+ expectedTotalDebt: values[scenario][4],
+ expectedPenalty: values[scenario][5]
};
}
}
@@ -60,13 +63,15 @@ describe.each(scenarios)('%s', (ilk, GEM) => {
});
test('get total collateral', async () => {
+ const { expectedTotalCollateral } = systemValues[ilk];
const total = await cdpType.getTotalCollateral();
- expect(total).toEqual(GEM(12.7));
+ expect(total).toEqual(GEM(expectedTotalCollateral));
});
-
+
test('get total collateral in USD', async () => {
+ const { expectedTotalCollateral } = systemValues[ilk];
const collateral = await cdpType.getTotalCollateral(USD);
- expect(collateral.toNumber()).toEqual(USD(20).toNumber());
+ expect(collateral.toNumber()).toEqual(USD(expectedTotalCollateral * 10).toNumber());
});
test('throw error for invalid collateral type', async () => {
@@ -81,8 +86,9 @@ describe.each(scenarios)('%s', (ilk, GEM) => {
});
test('get total debt', async () => {
+ const { expectedTotalDebt } = systemValues[ilk];
const debt = await cdpType.getTotalDebt();
- expect(debt).toEqual(MDAI(4));
+ expect(debt).toEqual(MDAI(expectedTotalDebt));
});
test('get debt ceiling', async () => {
@@ -103,8 +109,9 @@ describe.each(scenarios)('%s', (ilk, GEM) => {
});
test('get liquidation penalty', async () => {
+ const { expectedPenalty } = systemValues[ilk];
const penalty = await cdpType.getLiquidationPenalty();
- expect(penalty).toBe(0.05);
+ expect(penalty).toBe(expectedPenalty);
});
test('get annual stability fee', async () => { | Update expected values in cdp type service spec | makerdao_dai.js | train |
48e2c38736f5d2c7bbc53ea20f872e6f31c9a0a4 | diff --git a/core/src/main/java/org/springframework/security/providers/cas/CasAuthenticationProvider.java b/core/src/main/java/org/springframework/security/providers/cas/CasAuthenticationProvider.java
index <HASH>..<HASH> 100644
--- a/core/src/main/java/org/springframework/security/providers/cas/CasAuthenticationProvider.java
+++ b/core/src/main/java/org/springframework/security/providers/cas/CasAuthenticationProvider.java
@@ -28,6 +28,8 @@ import org.springframework.security.ui.cas.CasProcessingFilter;
import org.springframework.security.userdetails.UserDetails;
import org.springframework.security.userdetails.UserDetailsService;
+import org.springframework.security.userdetails.UserDetailsChecker;
+import org.springframework.security.userdetails.checker.AccountStatusUserDetailsChecker;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -61,6 +63,7 @@ public class CasAuthenticationProvider implements AuthenticationProvider, Initia
//~ Instance fields ================================================================================================
private UserDetailsService userDetailsService;
+ private UserDetailsChecker userDetailsChecker = new AccountStatusUserDetailsChecker();
private CasProxyDecider casProxyDecider;
protected MessageSourceAccessor messages = SpringSecurityMessageSource.getAccessor();
private StatelessTicketCache statelessTicketCache = new NullStatelessTicketCache();
@@ -142,6 +145,7 @@ public class CasAuthenticationProvider implements AuthenticationProvider, Initia
// Lookup user details
UserDetails userDetails = userDetailsService.loadUserByUsername(response.getUser());
+ userDetailsChecker.check(userDetails);
// Construct CasAuthenticationToken
return new CasAuthenticationToken(this.key, userDetails, authentication.getCredentials(), | SEC-<I>: Added account status checking to Cas provider | spring-projects_spring-security | train |
9fdb13e6ceacff2739bfd9956a5097bde3392d25 | diff --git a/src/GraphQL/Annotation/Annotation.php b/src/GraphQL/Annotation/Annotation.php
index <HASH>..<HASH> 100644
--- a/src/GraphQL/Annotation/Annotation.php
+++ b/src/GraphQL/Annotation/Annotation.php
@@ -108,7 +108,7 @@ function deannotate_enum(string $class_name, \ReflectionClass $reflection, EnumT
/** @var EnumVal $annotation */
$annotation = $parsed[0];
- $values[$const->getName()] = [
+ $values[$annotation->name ?? $const->getName()] = [
'name' => $annotation->name ?? $const->getName(),
'description' => $annotation->description,
'value' => $annotation->value ?? $const->getValue(),
@@ -151,9 +151,9 @@ function deannotate_object(array $types, AnnotationReader $reader, string $class
$annotated_method_fields = array_reduce(
$reflection->getMethods(\ReflectionMethod::IS_PUBLIC | \ReflectionMethod::IS_STATIC),
static function (array $fields, \ReflectionMethod $method) use ($types, $reader, $annotation): array {
- $method_name = $method->getName();
/** @var Field|null $annotation */
$annotation = $reader->getMethodAnnotation($method, Field::class);
+ $method_name = $annotation->name ?? $method->getName();
if ($annotation === null) {
return $fields;
@@ -197,13 +197,13 @@ function deannotate_properties(array $types, \ReflectionClass $reflection, Annot
$fields = [];
foreach ($reflection->getProperties(\ReflectionProperty::IS_PUBLIC) as $property) {
- $prop_name = $property->getName();
/** @var Field $annotation */
$annotation = $reader->getPropertyAnnotation($property, Field::class);
+ $prop_name = $annotation->name ?? $property->getName();
$fields[$prop_name] = [
+ 'name' => $prop_name,
'type' => deannotate_type($types, $annotation),
- 'name' => $annotation->name ?? $prop_name,
'description' => $annotation->description,
];
} | Allow name override by the Annotation | leocavalcante_siler | train |
8745cf1a9c89250084bd9d67f956a530e6a657ab | diff --git a/squad/settings.py b/squad/settings.py
index <HASH>..<HASH> 100644
--- a/squad/settings.py
+++ b/squad/settings.py
@@ -192,7 +192,7 @@ LOGGING = {
'django': {
'handlers': ['console'],
'propagate': False,
- 'level': os.getenv('DJANGO_LOG_LEVEL', DEBUG and 'DEBUG' or 'INFO'),
+ 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
},
'': {
'handlers': ['console'], | settings: don't debug Django stuff by default | Linaro_squad | train |
4d47013a007ca07edbc1af543e3faf28ecbfddb2 | diff --git a/tests/Picnik/Requests/Word/AbstractWordRequestTest.php b/tests/Picnik/Requests/Word/AbstractWordRequestTest.php
index <HASH>..<HASH> 100644
--- a/tests/Picnik/Requests/Word/AbstractWordRequestTest.php
+++ b/tests/Picnik/Requests/Word/AbstractWordRequestTest.php
@@ -52,7 +52,7 @@ class AbstractWordRequestTest extends TestCase
$r = $this->getAbstractInstance();
$r->useCanonical();
- $this->assertTrue($r->getParameters()['useCanonical']);
+ $this->assertSame('true', $r->getParameters()['useCanonical']);
}
public function testUsingCanonicalWithFalseParameter()
@@ -60,7 +60,7 @@ class AbstractWordRequestTest extends TestCase
$r = $this->getAbstractInstance();
$r->useCanonical(false);
- $this->assertFalse($r->getParameters()['useCanonical']);
+ $this->assertSame('false', $r->getParameters()['useCanonical']);
}
public function testUsingCanonicalWithNoneBooleanParameter() | Update test case to cover issue #1.
- Test cases covering the `useCanonical` method have been updated to
ensure that the output parameter is being converted into a string
representation of the boolean value.
This covers the bug in issue #1. | alanly_picnik | train |
3b31aa859f9355d115f9463449ae284a98ff2f96 | diff --git a/lib/packetgen/types/array.rb b/lib/packetgen/types/array.rb
index <HASH>..<HASH> 100644
--- a/lib/packetgen/types/array.rb
+++ b/lib/packetgen/types/array.rb
@@ -46,6 +46,7 @@ module PacketGen
obj = klass.new.read(str)
self.push obj
str.slice!(0, obj.sz)
+ break if @counter and self.size == @counter.to_i
end
self
end | Types::Array: stop reading when counter is reached | sdaubert_packetgen | train |
ce06c0e2a981a1948c92d9118b47741e481d7d74 | diff --git a/lib/conceptql/cli.rb b/lib/conceptql/cli.rb
index <HASH>..<HASH> 100644
--- a/lib/conceptql/cli.rb
+++ b/lib/conceptql/cli.rb
@@ -34,7 +34,7 @@ module ConceptQL
aliases: :s,
desc: 'schema for database (PostgreSQL only)'
- desc 'run_statement statement_file', 'Evals the statement from the statement file and executes it agains the DB specified by DB_URL'
+ desc 'run_statement statement_file', 'Reads the ConceptQL statement from the statement file and executes it against the DB'
def run_statement(statement_file)
q = ConceptQL::Query.new(db(options), criteria_from_file(statement_file))
puts q.query.sql
@@ -42,7 +42,19 @@ module ConceptQL
pp q.execute
end
- desc 'fake_graph file', 'Evals the file and shows the contents as a ConceptQL graph'
+ desc 'show_graph statement_file', 'Reads the ConceptQL statement from the file and shows the contents as a ConceptQL graph'
+ def show_graph(file)
+ graph_it(criteria_from_file(file))
+ end
+
+ desc 'show_and_tell_file statement_file', 'Reads the ConceptQL statement from the file and shows the contents as a ConceptQL graph, then executes the statement against the DB'
+ option :full
+ def show_and_tell(file)
+ show_and_tell(criteria_from_file(file), options)
+ end
+
+ private
+ desc 'fake_graph file', 'Reads the ConceptQL statement from the file and shows the contents as a ConceptQL graph'
def fake_graph(file)
require_relative 'graph'
require_relative 'tree'
@@ -54,17 +66,6 @@ module ConceptQL
system('open /tmp/graph.pdf')
end
- desc 'show_graph file', 'Evals the file and shows the contents as a ConceptQL graph'
- def show_graph(file)
- graph_it(criteria_from_file(file))
- end
-
- desc 'show_and_tell_file file', 'Evals the file and shows the contents as a ConceptQL graph, then executes the statement against our test database'
- option :full
- def show_and_tell_file(file)
- show_and_tell(criteria_from_file(file), options)
- end
-
desc 'show_and_tell_db conceptql_id', 'Fetches the ConceptQL from a DB and shows the contents as a ConceptQL graph, then executes the statement against our test database'
option :full
def show_and_tell_db(conceptql_id)
@@ -79,7 +80,6 @@ module ConceptQL
graph_it(result[:statement].to_hash, db, result[:label])
end
- private
def fetch_conceptql(conceptql_id)
my_db = db(options)
my_db.extension(:pg_array, :pg_json) | CLI: fix wording, remove some commands
Some commands aren't applicable to most users, so I'm hiding them. | outcomesinsights_conceptql | train |
e4fd86abdcfdc2ad39331581472692d45ec7cf0a | diff --git a/src/main/java/com/google/cloud/genomics/dataflow/utils/PipelineFactory.java b/src/main/java/com/google/cloud/genomics/dataflow/utils/PipelineFactory.java
index <HASH>..<HASH> 100644
--- a/src/main/java/com/google/cloud/genomics/dataflow/utils/PipelineFactory.java
+++ b/src/main/java/com/google/cloud/genomics/dataflow/utils/PipelineFactory.java
@@ -41,7 +41,6 @@ import com.google.api.services.bigquery.model.TableDataList;
import com.google.api.services.bigquery.model.TableFieldSchema;
import com.google.api.services.bigquery.model.TableList;
import com.google.api.services.bigquery.model.TableReference;
-import com.google.api.services.bigquery.model.TableRow;
import com.google.api.services.bigquery.model.TableSchema;
import com.google.api.services.bigquery.model.ViewDefinition;
import com.google.api.services.compute.model.AccessConfig;
@@ -529,7 +528,6 @@ public class PipelineFactory {
TableList.class,
TableList.Tables.class,
TableReference.class,
- TableRow.class,
TableSchema.class,
Tags.class,
TargetHttpProxy.class, | Huh? A coder for TableRow is already registered by default | googlegenomics_dataflow-java | train |
93356d463581f2ad61c03cb7fda9974235675a23 | diff --git a/plugins/communicators/winrm/command_filters/rm.rb b/plugins/communicators/winrm/command_filters/rm.rb
index <HASH>..<HASH> 100644
--- a/plugins/communicators/winrm/command_filters/rm.rb
+++ b/plugins/communicators/winrm/command_filters/rm.rb
@@ -31,9 +31,9 @@ module VagrantPlugins
ret_cmd = ''
if recurse
- ret_cmd = "rm \"#{dir}\" -recurse -force"
+ ret_cmd = "if (Test-Path \"#{dir}\") {Remove-Item \"#{dir}\" -force -recurse}"
else
- ret_cmd = "if (Test-Path #{dir}) {Remove-Item #{dir} -force}"
+ ret_cmd = "if (Test-Path \"#{dir}\") {Remove-Item \"#{dir}\" -force}"
end
return ret_cmd
end
diff --git a/plugins/provisioners/docker/client.rb b/plugins/provisioners/docker/client.rb
index <HASH>..<HASH> 100644
--- a/plugins/provisioners/docker/client.rb
+++ b/plugins/provisioners/docker/client.rb
@@ -127,10 +127,8 @@ module VagrantPlugins
def create_container(config)
args = container_run_args(config)
- @machine.communicate.sudo %[rm -f #{config[:cidfile]}
- ]
- @machine.communicate.sudo %[docker run #{args}
- ]
+ @machine.communicate.sudo %[rm -f "#{config[:cidfile]}"]
+ @machine.communicate.sudo %[docker run #{args}]
sha = Digest::SHA1.hexdigest(args)
container_data_path(config).open("w+") do |f|
diff --git a/test/unit/plugins/communicators/winrm/command_filter_test.rb b/test/unit/plugins/communicators/winrm/command_filter_test.rb
index <HASH>..<HASH> 100644
--- a/test/unit/plugins/communicators/winrm/command_filter_test.rb
+++ b/test/unit/plugins/communicators/winrm/command_filter_test.rb
@@ -54,20 +54,22 @@ describe VagrantPlugins::CommunicatorWinRM::CommandFilter, unit: true do
it 'filters out rm recurse commands' do
expect(subject.filter('rm -Rf /some/dir')).to eq(
- "rm \"/some/dir\" -recurse -force")
+ "if (Test-Path \"/some/dir\") {Remove-Item \"/some/dir\" -force -recurse}")
expect(subject.filter('rm -fr /some/dir')).to eq(
- "rm \"/some/dir\" -recurse -force")
+ "if (Test-Path \"/some/dir\") {Remove-Item \"/some/dir\" -force -recurse}")
expect(subject.filter('rm -r /some/dir')).to eq(
- "rm \"/some/dir\" -recurse -force")
+ "if (Test-Path \"/some/dir\") {Remove-Item \"/some/dir\" -force -recurse}")
expect(subject.filter('rm -r "/some/dir"')).to eq(
- "rm \"/some/dir\" -recurse -force")
+ "if (Test-Path \"/some/dir\") {Remove-Item \"/some/dir\" -force -recurse}")
end
it 'filters out rm commands' do
expect(subject.filter('rm /some/dir')).to eq(
- "if (Test-Path /some/dir) {Remove-Item /some/dir -force}")
+ "if (Test-Path \"/some/dir\") {Remove-Item \"/some/dir\" -force}")
expect(subject.filter('rm -f /some/dir')).to eq(
- "if (Test-Path /some/dir) {Remove-Item /some/dir -force}")
+ "if (Test-Path \"/some/dir\") {Remove-Item \"/some/dir\" -force}")
+ expect(subject.filter('rm -f "/some/dir"')).to eq(
+ "if (Test-Path \"/some/dir\") {Remove-Item \"/some/dir\" -force}")
end
it 'filters out mkdir commands' do | Update rm filters to test for path
This commit updates the rm filter for winrm to operate like how rm works
in bash. If a folder doesn't exist, the command returns 0 rather than 1. | hashicorp_vagrant | train |
d0a62a1531008ce427c2b3744aee1a527705d910 | diff --git a/ospd/command/command.py b/ospd/command/command.py
index <HASH>..<HASH> 100644
--- a/ospd/command/command.py
+++ b/ospd/command/command.py
@@ -253,9 +253,20 @@ class GetScannerDetails(BaseCommand):
@return: Response string for <get_scanner_details> command.
"""
+ list_all = xml.get('list_all')
+ list_all = True if list_all == '1' else False
+
desc_xml = Element('description')
desc_xml.text = self._daemon.get_scanner_description()
scanner_params = self._daemon.get_scanner_params()
+
+ if not list_all:
+ scanner_params = {
+ key: value
+ for (key, value) in scanner_params.items()
+ if value.get('visible_for_client') is 1
+ }
+
details = [
desc_xml,
OspResponse.create_scanner_params_xml(scanner_params), | Add list_all attribute to get all available params
Per default we only get the params visible to the
client and not all supported params.
With list_all we can get all supported params. | greenbone_ospd | train |
2464bf2f3a13e0df1437df79b171e308d71c42a1 | diff --git a/lib/xcflushd/storage.rb b/lib/xcflushd/storage.rb
index <HASH>..<HASH> 100644
--- a/lib/xcflushd/storage.rb
+++ b/lib/xcflushd/storage.rb
@@ -155,8 +155,11 @@ module Xcflushd
logger.error(SOME_REPORTS_MISSING_ERROR)
else
keys.each_with_index do |key, i|
- # The usage could be empty if we failed to rename the key in the
- # previous step. hgetall returns {} for keys that do not exist.
+ # hgetall returns {} for keys that do not exist. That can happen
+ # for 2 reasons:
+ # 1) Apicast-xc does not guarantee that a key in the set of cached
+ # reports will always exist.
+ # 2) We failed to rename the key in the previous step.
unless usages[i].empty?
service_id, creds = storage_keys.service_and_creds(key, suffix)
result << { service_id: service_id, | storage: document case where a key in the set of cached reports does not exist | 3scale_xcflushd | train |
5e71cc91c3dfc5d00755a5d7858cfe5b882f4763 | diff --git a/django_jenkins/tasks/run_jshint.py b/django_jenkins/tasks/run_jshint.py
index <HASH>..<HASH> 100755
--- a/django_jenkins/tasks/run_jshint.py
+++ b/django_jenkins/tasks/run_jshint.py
@@ -53,7 +53,7 @@ class Task(BaseTask):
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
output, err = process.communicate()
retcode = process.poll()
- if retcode not in [0, 1]: # normal csslint return codes
+ if retcode not in [0, 2]: # normal jshint return codes
raise CalledProcessError(retcode, cmd, output=output + '\n' + err)
self.output.write(output.decode('utf-8')) | Updated return codes from jshint
jshint return codes changed after version <I>, more information here:
<URL> | kmmbvnr_django-jenkins | train |
657410c13243366fdae3217777729a0fa65521d0 | diff --git a/nodeconductor/structure/tasks.py b/nodeconductor/structure/tasks.py
index <HASH>..<HASH> 100644
--- a/nodeconductor/structure/tasks.py
+++ b/nodeconductor/structure/tasks.py
@@ -275,10 +275,10 @@ def recover_erred_service(service_project_link_str, is_iaas=False):
is_active = False
if is_active:
- for entity in (spl, settings):
- if entity.state == SynchronizationStates.ERRED:
- entity.set_in_sync_from_erred()
- entity.save()
+ for entity in (spl, settings):
+ if entity.state == SynchronizationStates.ERRED:
+ entity.set_in_sync_from_erred()
+ entity.save()
else:
logger.info('Failed to recover service settings %s.' % settings) | Fix typo (NC-<I>) | opennode_waldur-core | train |
3eb3803cf074435d79ed6bffa3c655788f8840d2 | diff --git a/webhookendpoint.go b/webhookendpoint.go
index <HASH>..<HASH> 100644
--- a/webhookendpoint.go
+++ b/webhookendpoint.go
@@ -29,19 +29,20 @@ type WebhookEndpointListParams struct {
// For more details see https://stripe.com/docs/api#webhook_endpoints.
type WebhookEndpoint struct {
APIResource
- APIVersion string `json:"api_version"`
- Application string `json:"application"`
- Connect bool `json:"connect"`
- Created int64 `json:"created"`
- Deleted bool `json:"deleted"`
- Description string `json:"description"`
- EnabledEvents []string `json:"enabled_events"`
- ID string `json:"id"`
- Livemode bool `json:"livemode"`
- Object string `json:"object"`
- Secret string `json:"secret"`
- Status string `json:"status"`
- URL string `json:"url"`
+ APIVersion string `json:"api_version"`
+ Application string `json:"application"`
+ Connect bool `json:"connect"`
+ Created int64 `json:"created"`
+ Deleted bool `json:"deleted"`
+ Description string `json:"description"`
+ EnabledEvents []string `json:"enabled_events"`
+ ID string `json:"id"`
+ Livemode bool `json:"livemode"`
+ Metadata map[string]string `json:"metadata"`
+ Object string `json:"object"`
+ Secret string `json:"secret"`
+ Status string `json:"status"`
+ URL string `json:"url"`
}
// WebhookEndpointList is a list of webhook endpoints as retrieved from a list endpoint. | Add `Metadata` on `WebhookEndpoint` | stripe_stripe-go | train |
699c65e6db9c97e4cbd246aa4c0e59f5a266e954 | diff --git a/tensor2tensor/layers/common_attention.py b/tensor2tensor/layers/common_attention.py
index <HASH>..<HASH> 100644
--- a/tensor2tensor/layers/common_attention.py
+++ b/tensor2tensor/layers/common_attention.py
@@ -2912,8 +2912,9 @@ def multihead_attention(query_antecedent,
else:
# Inplace update is required for inference on TPU.
# Inplace_ops only supports inplace_update on the first dimension.
- # TODO(shibow): explore updating the entire Tensor instead of using
- # inplace_ops to avoid the transposes.
+ # The performance of current implementation is better than updating
+ # the tensor by adding the result of matmul(one_hot,
+ # update_in_current_step)
tmp_k = tf.transpose(cache["k"], perm=[2, 0, 1, 3])
tmp_k = common_layers.tf_inplace_ops().alias_inplace_update(
tmp_k, decode_loop_step, tf.squeeze(k, axis=2))
diff --git a/tensor2tensor/layers/common_layers.py b/tensor2tensor/layers/common_layers.py
index <HASH>..<HASH> 100644
--- a/tensor2tensor/layers/common_layers.py
+++ b/tensor2tensor/layers/common_layers.py
@@ -1552,8 +1552,9 @@ def conv_relu_conv(inputs,
else:
# Inplace update is required for inference on TPU.
# Inplace_ops only supports inplace_update on the first dimension.
- # TODO(shibow): explore updating the entire Tensor instead of using
- # inplace_ops to avoid the transposes.
+ # The performance of current implementation is better than updating
+ # the tensor by adding the result of matmul(one_hot,
+ # update_in_current_step)
tmp_f = tf.transpose(cache["f"], perm=[1, 0, 2])
tmp_f = tf_inplace_ops().alias_inplace_update(
tmp_f, decode_loop_step * tf.shape(inputs)[1], | internal change, just adding comments.
PiperOrigin-RevId: <I> | tensorflow_tensor2tensor | train |
9c127c3f91748edbe50b95a34c7da2dacb3b3765 | diff --git a/store/etcdv3/meta/etcd.go b/store/etcdv3/meta/etcd.go
index <HASH>..<HASH> 100644
--- a/store/etcdv3/meta/etcd.go
+++ b/store/etcdv3/meta/etcd.go
@@ -275,7 +275,6 @@ func (e *ETCD) BindStatus(ctx context.Context, entityKey, statusKey, statusValue
// There isn't a status bound to the entity.
statusTxn := entityTxn.Responses[0].GetResponseTxn()
if !statusTxn.Succeeded {
- e.revokeLease(ctx, leaseID)
return nil
}
@@ -287,7 +286,6 @@ func (e *ETCD) BindStatus(ctx context.Context, entityKey, statusKey, statusValue
// There is a status bound to the entity yet but its value isn't same as the expected one.
valueTxn := statusTxn.Responses[0].GetResponseTxn()
if !valueTxn.Succeeded {
- e.revokeLease(ctx, leaseID)
return nil
}
@@ -297,6 +295,7 @@ func (e *ETCD) BindStatus(ctx context.Context, entityKey, statusKey, statusValue
if origLeaseID != leaseID {
e.revokeLease(ctx, leaseID)
}
+
_, err = e.cliv3.KeepAliveOnce(ctx, origLeaseID)
return err
} | should only revoke when we don't use the lease | projecteru2_core | train |
fcbfac9597de4a0197ffa8a680296768d48dbcb2 | diff --git a/sprinter/formulas/template.py b/sprinter/formulas/template.py
index <HASH>..<HASH> 100644
--- a/sprinter/formulas/template.py
+++ b/sprinter/formulas/template.py
@@ -8,6 +8,7 @@ source = http://mywebsite.com/.gitignore
target = ~/.gitignore
username = %(config:username)s
password = %(config:mywebsitepassword)s
+on_update = false
"""
import os
import urllib
@@ -22,7 +23,8 @@ class TemplateFormula(FormulaBase):
super(TemplateFormula, self).install(feature_name, config)
def update(self, feature_name, source_config, target_config):
- self.__install_file(target_config['source'], target_config['target'], target_config)
+ if self.lib.is_affirmative('on_update'):
+ self.__install_file(target_config['source'], target_config['target'], target_config)
super(TemplateFormula, self).update(feature_name, source_config, target_config)
def remove(self, feature_name, config): | Adding on_update method in lib | toumorokoshi_sprinter | train |
8668edfc3f3b53ac2c87037f06405fe7012bf269 | diff --git a/directory/directory_dest.go b/directory/directory_dest.go
index <HASH>..<HASH> 100644
--- a/directory/directory_dest.go
+++ b/directory/directory_dest.go
@@ -70,7 +70,7 @@ func newImageDestination(ref dirReference, compress bool) (types.ImageDestinatio
}
}
// create version file
- err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0755)
+ err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0644)
if err != nil {
return nil, errors.Wrapf(err, "error creating version file %q", d.ref.versionPath())
} | directory: make the "version" file non-executable
Take the execute bit off of the "version" file that we create when
writing images to a "directory:" destination. | containers_image | train |
057343a11f6212c72912b9da5f232fc4a5c1de50 | diff --git a/fastlane_core/lib/fastlane_core/version.rb b/fastlane_core/lib/fastlane_core/version.rb
index <HASH>..<HASH> 100644
--- a/fastlane_core/lib/fastlane_core/version.rb
+++ b/fastlane_core/lib/fastlane_core/version.rb
@@ -1,3 +1,3 @@
module FastlaneCore
- VERSION = "0.46.3".freeze
+ VERSION = "0.47.0".freeze
end | [fastlane_core] Version bump (#<I>)
Added inline showing of relevant GitHub issue when fastlane fails | fastlane_fastlane | train |
e9b9f6a0b6aecebed36fc33b47a8c88ec462b2a6 | diff --git a/src/com/google/javascript/jscomp/DefaultPassConfig.java b/src/com/google/javascript/jscomp/DefaultPassConfig.java
index <HASH>..<HASH> 100644
--- a/src/com/google/javascript/jscomp/DefaultPassConfig.java
+++ b/src/com/google/javascript/jscomp/DefaultPassConfig.java
@@ -278,7 +278,7 @@ public final class DefaultPassConfig extends PassConfig {
checks.add(convertEs6TypedToEs6);
}
- if (options.needsTranspilationFrom(ES_NEXT)) {
+ if (options.needsTranspilationFrom(ES2018)) {
TranspilationPasses.addEs2018Passes(checks);
checks.add(setFeatureSet(ES8));
} | Switch ES_NEXT to ES<I> in one more spot that I missed in the previous change.
-------------
Created by MOE: <URL> | google_closure-compiler | train |
743c24ec7703c71006e4ff56e47f6578f17567ce | diff --git a/bigtable-hbase-2.x-parent/bigtable-hbase-2.x/src/main/java/com/google/cloud/bigtable/hbase2_x/BigtableAsyncAdmin.java b/bigtable-hbase-2.x-parent/bigtable-hbase-2.x/src/main/java/com/google/cloud/bigtable/hbase2_x/BigtableAsyncAdmin.java
index <HASH>..<HASH> 100644
--- a/bigtable-hbase-2.x-parent/bigtable-hbase-2.x/src/main/java/com/google/cloud/bigtable/hbase2_x/BigtableAsyncAdmin.java
+++ b/bigtable-hbase-2.x-parent/bigtable-hbase-2.x/src/main/java/com/google/cloud/bigtable/hbase2_x/BigtableAsyncAdmin.java
@@ -287,10 +287,12 @@ public class BigtableAsyncAdmin implements AsyncAdmin {
if (ex != null) {
if (Status.fromThrowable(ex).getCode() == Status.Code.NOT_FOUND) {
throw new CompletionException(new TableNotFoundException(tableName));
+ } else {
+ throw new CompletionException(ex);
}
+ } else {
+ return tableAdapter2x.adapt(resp);
}
-
- return tableAdapter2x.adapt(resp);
});
} | Fixing an exception handling case in BigtabeAsyncAdmin (#<I>)
getDescriptor needs to throw exceptions even if the exception is not `TableNotFound` | googleapis_cloud-bigtable-client | train |
3fc864f3c637e06e2fa7a81f6b48a5df58a9bc5b | diff --git a/lib/grit/tree.rb b/lib/grit/tree.rb
index <HASH>..<HASH> 100644
--- a/lib/grit/tree.rb
+++ b/lib/grit/tree.rb
@@ -15,7 +15,7 @@ module Grit
#
# Returns Grit::Tree (baked)
def self.construct(repo, treeish, paths = [])
- output = repo.git.ls_tree({}, treeish, *paths)
+ output = repo.git.ls_tree({:raise => true}, treeish, *paths)
self.allocate.construct_initialize(repo, treeish, output)
end | git ls-tree raises on non-zero exit | mojombo_grit | train |
a773846351ba009d907b04ae0eb07ec9220cd88d | diff --git a/database/table/resources.php b/database/table/resources.php
index <HASH>..<HASH> 100644
--- a/database/table/resources.php
+++ b/database/table/resources.php
@@ -28,10 +28,10 @@ class ComActivitiesDatabaseTableResources extends KDatabaseTableAbstract
'name' => 'resources',
'behaviors' => array(
'com:activities.database.behavior.resources.creatable',
- 'parameterizable' => array('column' => 'metadata')
+ 'parameterizable' => array('column' => 'data')
),
'filters' => array(
- 'metadata' => 'json'
+ 'data' => 'json'
)
)); | #1 Renamed metadata to data. | joomlatools_joomlatools-framework-activities | train |
2e1007793049434492b544c703b576ae4e5398f9 | diff --git a/widgets/TbPager.php b/widgets/TbPager.php
index <HASH>..<HASH> 100644
--- a/widgets/TbPager.php
+++ b/widgets/TbPager.php
@@ -40,6 +40,12 @@ class TbPager extends CBasePager
* @var string the text label for the last page button.
*/
public $lastPageLabel = '»';
+
+ /**
+ * @var boolean whether the "first" and "last" buttons should be hidden.
+ * Defaults to false.
+ */
+ public $hideFirstAndLast = false;
/**
* @var array HTML attributes for the pager container tag.
*/
@@ -81,7 +87,8 @@ class TbPager extends CBasePager
$links = array();
// first page
- $links[] = $this->createPageLink($this->firstPageLabel, 0, $currentPage <= 0, false);
+ if (!$this->hideFirstAndLast)
+ $links[] = $this->createPageLink($this->firstPageLabel, 0, $currentPage <= 0, false);
// prev page
if (($page = $currentPage - 1) < 0)
@@ -100,7 +107,8 @@ class TbPager extends CBasePager
$links[] = $this->createPageLink($this->nextPageLabel, $page, $currentPage >= $pageCount - 1, false);
// last page
- $links[] = $this->createPageLink($this->lastPageLabel, $pageCount - 1, $currentPage >= $pageCount - 1, false);
+ if (!$this->hideFirstAndLast)
+ $links[] = $this->createPageLink($this->lastPageLabel, $pageCount - 1, $currentPage >= $pageCount - 1, false);
return $links;
} | added hideFirstAndLast option | crisu83_yiistrap | train |
12719a788f68598df27760f6ba7c951858979e98 | diff --git a/unixfs/io/pbdagreader.go b/unixfs/io/pbdagreader.go
index <HASH>..<HASH> 100644
--- a/unixfs/io/pbdagreader.go
+++ b/unixfs/io/pbdagreader.go
@@ -75,9 +75,7 @@ func (dr *PBDagReader) preloadNextNodes(ctx context.Context) {
end = len(dr.links)
}
- for i, p := range ipld.GetNodes(ctx, dr.serv, dr.links[beg:end]) {
- dr.promises[beg+i] = p
- }
+ copy(dr.promises[beg:], ipld.GetNodes(ctx, dr.serv, dr.links[beg:end]))
}
// precalcNextBuf follows the next link in line and loads it from the | use `copy` instead of looping
License: MIT | ipfs_go-ipfs | train |
eebd3e3e4a34b2c5df7489187eb963bca25fae51 | diff --git a/salt/modules/network.py b/salt/modules/network.py
index <HASH>..<HASH> 100644
--- a/salt/modules/network.py
+++ b/salt/modules/network.py
@@ -38,7 +38,6 @@ def __virtual__():
# Disable on Windows, a specific file module exists:
if salt.utils.is_windows():
return False
-
return True
@@ -948,19 +947,23 @@ def mod_hostname(hostname):
uname_cmd = '/usr/bin/uname' if salt.utils.is_smartos() else salt.utils.which('uname')
check_hostname_cmd = salt.utils.which('check-hostname')
- if hostname_cmd.endswith('hostnamectl'):
- __salt__['cmd.run']('{0} set-hostname {1}'.format(hostname_cmd, hostname))
- return True
-
# Grab the old hostname so we know which hostname to change and then
# change the hostname using the hostname command
- if not salt.utils.is_sunos():
+ if hostname_cmd.endswith('hostnamectl'):
+ out = __salt__['cmd.run']('{0} status'.format(hostname_cmd))
+ for line in out.splitlines():
+ line = line.split(':')
+ if 'Static hostname' in line[0]:
+ o_hostname = line[1].strip()
+ elif not salt.utils.is_sunos():
o_hostname = __salt__['cmd.run']('{0} -f'.format(hostname_cmd))
else:
# output: Hostname core OK: fully qualified as core.acheron.be
o_hostname = __salt__['cmd.run'](check_hostname_cmd).split(' ')[-1]
- if not salt.utils.is_sunos():
+ if hostname_cmd.endswith('hostnamectl'):
+ __salt__['cmd.run']('{0} set-hostname {1}'.format(hostname_cmd, hostname))
+ elif not salt.utils.is_sunos():
__salt__['cmd.run']('{0} {1}'.format(hostname_cmd, hostname))
else:
__salt__['cmd.run']('{0} -S {1}'.format(uname_cmd, hostname.split('.')[0])) | Hosts file update in mod_hostname.
In systems with systemd now you can use hostnamectl to set hostname. In this case
if hostnamectl exists mod_hostname will use it and return, leaving hosts file as it is,
which is a not expected behavior.
Also now setting the hostname based on commands avaiables or os are
grouped in a single block, which is cleaner I think.
Also update the way to get the current hostname using hostnamectl
Fixes #<I> | saltstack_salt | train |
9b53ace5c577c72df3623b8f3fa76cebed4c2174 | diff --git a/hagelslag/processing/TrackProcessing.py b/hagelslag/processing/TrackProcessing.py
index <HASH>..<HASH> 100644
--- a/hagelslag/processing/TrackProcessing.py
+++ b/hagelslag/processing/TrackProcessing.py
@@ -328,7 +328,7 @@ class TrackProcessor(object):
hour_labels = self.mrms_ew.size_filter(self.mrms_ew.label(gaussian_filter(mrms_data,
self.gaussian_window)),
self.size_filter)
- hour_labels[mrms_data < self.mrms_ew.min_thresh] = 0
+ hour_labels[mrms_data < self.mrms_ew.min_intensity] = 0
obj_slices = find_objects(hour_labels)
num_slices = len(list(obj_slices))
obs_objects.append([])
@@ -511,7 +511,7 @@ class TrackProcessor(object):
model_hail_dists = pd.DataFrame(index=model_track.times,
columns=label_columns)
for t, step in enumerate(obs_track.timesteps):
- step_vals = step[(obs_track.masks[t] == 1) & (obs_track.timesteps[t] > self.mrms_ew.min_thresh)]
+ step_vals = step[(obs_track.masks[t] == 1) & (obs_track.timesteps[t] > self.mrms_ew.min_intensity)]
min_hail = step_vals.min() - 0.1
obs_hail_dists.loc[obs_track.times[t], ["Shape", "Location", "Scale"]] = gamma.fit(step_vals,
floc=min_hail)
@@ -568,7 +568,7 @@ class TrackProcessor(object):
for step_pair in step_pairs:
obs_step = obs_tracks[step_pair[0]].timesteps[step_pair[1]].ravel()
obs_mask = obs_tracks[step_pair[0]].masks[step_pair[1]].ravel()
- all_hail_sizes.append(obs_step[(obs_mask == 1) & (obs_step >= self.mrms_ew.min_thresh)])
+ all_hail_sizes.append(obs_step[(obs_mask == 1) & (obs_step >= self.mrms_ew.min_intensity)])
combined_hail_sizes = np.concatenate(all_hail_sizes)
min_hail = combined_hail_sizes.min() - 0.1
model_track.observations.loc[time, "Max_Hail_Size"] = combined_hail_sizes.max() | Updated min_thresh to min_intensity | djgagne_hagelslag | train |
3ad02bd5f958425aa67bfe77689eaf3f89bbb55b | diff --git a/src/Symfony/Component/Console/Formatter/OutputFormatter.php b/src/Symfony/Component/Console/Formatter/OutputFormatter.php
index <HASH>..<HASH> 100644
--- a/src/Symfony/Component/Console/Formatter/OutputFormatter.php
+++ b/src/Symfony/Component/Console/Formatter/OutputFormatter.php
@@ -32,7 +32,7 @@ class OutputFormatter implements OutputFormatterInterface
* Initializes console output formatter.
*
* @param Boolean $decorated Whether this formatter should actually decorate strings
- * @param array $styles Array of "name => FormatterStyle" instance
+ * @param array $styles Array of "name => FormatterStyle" instances
*
* @api
*/
@@ -53,7 +53,7 @@ class OutputFormatter implements OutputFormatterInterface
/**
* Sets the decorated flag.
*
- * @param Boolean $decorated Whether to decorated the messages or not
+ * @param Boolean $decorated Whether to decorate the messages or not
*
* @api
*/
@@ -108,6 +108,8 @@ class OutputFormatter implements OutputFormatterInterface
*
* @return OutputFormatterStyleInterface
*
+ * @throws \InvalidArgumentException When style isn't defined
+ *
* @api
*/
public function getStyle($name)
diff --git a/src/Symfony/Component/Console/Formatter/OutputFormatterInterface.php b/src/Symfony/Component/Console/Formatter/OutputFormatterInterface.php
index <HASH>..<HASH> 100644
--- a/src/Symfony/Component/Console/Formatter/OutputFormatterInterface.php
+++ b/src/Symfony/Component/Console/Formatter/OutputFormatterInterface.php
@@ -23,7 +23,7 @@ interface OutputFormatterInterface
/**
* Sets the decorated flag.
*
- * @param Boolean $decorated Whether to decorated the messages or not
+ * @param Boolean $decorated Whether to decorate the messages or not
*
* @api
*/
diff --git a/src/Symfony/Component/Console/Formatter/OutputFormatterStyle.php b/src/Symfony/Component/Console/Formatter/OutputFormatterStyle.php
index <HASH>..<HASH> 100644
--- a/src/Symfony/Component/Console/Formatter/OutputFormatterStyle.php
+++ b/src/Symfony/Component/Console/Formatter/OutputFormatterStyle.php
@@ -79,6 +79,8 @@ class OutputFormatterStyle implements OutputFormatterStyleInterface
*
* @param string $color color name
*
+ * @throws \InvalidArgumentException When the color name isn't defined
+ *
* @api
*/
public function setForeground($color = null)
@@ -105,6 +107,8 @@ class OutputFormatterStyle implements OutputFormatterStyleInterface
*
* @param string $color color name
*
+ * @throws \InvalidArgumentException When the color name isn't defined
+ *
* @api
*/
public function setBackground($color = null)
@@ -131,6 +135,8 @@ class OutputFormatterStyle implements OutputFormatterStyleInterface
*
* @param string $option option name
*
+ * @throws \InvalidArgumentException When the option name isn't defined
+ *
* @api
*/
public function setOption($option)
@@ -152,6 +158,9 @@ class OutputFormatterStyle implements OutputFormatterStyleInterface
* Unsets some specific style option.
*
* @param string $option option name
+ *
+ * @throws \InvalidArgumentException When the option name isn't defined
+ *
*/
public function unsetOption($option)
{
@@ -170,7 +179,7 @@ class OutputFormatterStyle implements OutputFormatterStyleInterface
}
/**
- * Set multiple style options at once.
+ * Sets multiple style options at once.
*
* @param array $options
*/
diff --git a/src/Symfony/Component/Console/Formatter/OutputFormatterStyleInterface.php b/src/Symfony/Component/Console/Formatter/OutputFormatterStyleInterface.php
index <HASH>..<HASH> 100644
--- a/src/Symfony/Component/Console/Formatter/OutputFormatterStyleInterface.php
+++ b/src/Symfony/Component/Console/Formatter/OutputFormatterStyleInterface.php
@@ -55,7 +55,7 @@ interface OutputFormatterStyleInterface
function unsetOption($option);
/**
- * Set multiple style options at once.
+ * Sets multiple style options at once.
*
* @param array $options
*/ | [Console][Formatter] Added missing PHPDoc @throws and fixed some minor typos and grammatical errors | symfony_symfony | train |
f3b5d00d86a654f9483713a3d2b168c51d5f92b4 | diff --git a/builder/sshd/server.go b/builder/sshd/server.go
index <HASH>..<HASH> 100644
--- a/builder/sshd/server.go
+++ b/builder/sshd/server.go
@@ -134,7 +134,7 @@ func (s *server) handleConn(conn net.Conn, conf *ssh.ServerConfig) {
_, chans, reqs, err := ssh.NewServerConn(conn, conf)
if err != nil {
// Handshake failure.
- log.Errf(s.c, "Failed handshake: %s (%v)", err, conn)
+ log.Debugf(s.c, "Failed handshake: %s", err)
return
} | fix(builder): demote handshake failure log to debug
It's okay for connections to probe the server without initiating
a handshake, but it's still useful for developers to see when incoming
connections are not connecting. | deis_deis | train |
8989a08b6873784ab0366f1364d358cf5ff03f24 | diff --git a/scripts/init.js b/scripts/init.js
index <HASH>..<HASH> 100644
--- a/scripts/init.js
+++ b/scripts/init.js
@@ -35,10 +35,18 @@ module.exports = function(hostPath, appName, verbose) {
JSON.stringify(hostPackage, null, 2)
);
- // Move the files for the user
- // TODO: copying might be more correct?
- fs.renameSync(path.join(selfPath, 'src'), path.join(hostPath, 'src'));
- fs.renameSync(path.join(selfPath, 'index.html'), path.join(hostPath, 'index.html'));
+ // Copy the files for the user
+ function copySync(src, dest) {
+ return fs.writeFileSync(dest, fs.readFileSync(src));
+ }
+ fs.mkdirSync(path.join(hostPath, 'src'));
+ fs.readdirSync(path.join(selfPath, 'src')).forEach(function(filename) {
+ copySync(
+ path.join(selfPath, 'src', filename),
+ path.join(hostPath, 'src', filename)
+ );
+ });
+ copySync(path.join(selfPath, 'index.html'), path.join(hostPath, 'index.html'));
// Run another npm install for react and react-dom
// TODO: having to do two npm installs is bad, can we avoid it? | Copy files instead of moving them during init | vcarl_create-react-app | train |
efcdefee39bd226d83a44143e1bef0878259c47b | diff --git a/peer.go b/peer.go
index <HASH>..<HASH> 100644
--- a/peer.go
+++ b/peer.go
@@ -594,6 +594,7 @@ func (p *peer) addLink(chanPoint *wire.OutPoint,
MinFeeUpdateTimeout: htlcswitch.DefaultMinLinkFeeUpdateTimeout,
MaxFeeUpdateTimeout: htlcswitch.DefaultMaxLinkFeeUpdateTimeout,
OutgoingCltvRejectDelta: p.outgoingCltvRejectDelta,
+ TowerClient: p.server.towerClient,
}
link := htlcswitch.NewChannelLink(linkCfg, lnChan) | peer: pass watchtower client's to active links | lightningnetwork_lnd | train |
6966991d70756c12e0feb3ef79ad403ee1d938ef | diff --git a/src/main/java/org/jboss/pressgang/ccms/rest/v1/components/ComponentFileV1.java b/src/main/java/org/jboss/pressgang/ccms/rest/v1/components/ComponentFileV1.java
index <HASH>..<HASH> 100644
--- a/src/main/java/org/jboss/pressgang/ccms/rest/v1/components/ComponentFileV1.java
+++ b/src/main/java/org/jboss/pressgang/ccms/rest/v1/components/ComponentFileV1.java
@@ -27,9 +27,11 @@ public class ComponentFileV1 {
}
public static String getExtension(final RESTFileV1 source) {
- final int lastPeriodIndex = source.getFileName().lastIndexOf(".");
- if (lastPeriodIndex != -1 && lastPeriodIndex < source.getFileName().length() - 1) {
- return source.getFileName().substring(lastPeriodIndex + 1);
+ if (source.getFileName() != null) {
+ final int lastPeriodIndex = source.getFileName().lastIndexOf(".");
+ if (lastPeriodIndex != -1 && lastPeriodIndex < source.getFileName().length() - 1) {
+ return source.getFileName().substring(lastPeriodIndex + 1);
+ }
}
return null;
diff --git a/src/main/java/org/jboss/pressgang/ccms/rest/v1/entities/RESTFileV1.java b/src/main/java/org/jboss/pressgang/ccms/rest/v1/entities/RESTFileV1.java
index <HASH>..<HASH> 100644
--- a/src/main/java/org/jboss/pressgang/ccms/rest/v1/entities/RESTFileV1.java
+++ b/src/main/java/org/jboss/pressgang/ccms/rest/v1/entities/RESTFileV1.java
@@ -48,6 +48,7 @@ public class RESTFileV1 extends RESTBasePrimaryEntityV1<RESTFileV1, RESTFileColl
clone.description = this.description;
clone.fileName = this.fileName;
clone.filePath = this.filePath;
+ clone.explodeArchive = this.explodeArchive;
if (deepCopy) {
if (this.languageFiles_OTM != null) { | Fixed some bugs in the File entity implementations. | pressgang-ccms_PressGangCCMSRESTv1Common | train |
0b6970da542bbb02b21c8a790d8bf59d193a3d99 | diff --git a/core-bundle/src/Resources/contao/classes/Backend.php b/core-bundle/src/Resources/contao/classes/Backend.php
index <HASH>..<HASH> 100644
--- a/core-bundle/src/Resources/contao/classes/Backend.php
+++ b/core-bundle/src/Resources/contao/classes/Backend.php
@@ -515,6 +515,8 @@ abstract class Backend extends \Controller
* Add a breadcrumb menu to the page tree
*
* @param string
+ *
+ * @throws \RuntimeException
*/
public static function addPagesBreadcrumb($strKey='tl_page_node')
{
@@ -523,7 +525,13 @@ abstract class Backend extends \Controller
// Set a new node
if (isset($_GET['node']))
{
- $objSession->set($strKey, \Input::get('node'));
+ // Check the path (thanks to Arnaud Buchoux)
+ if (\Validator::isInsecurePath(\Input::get('node', true)))
+ {
+ throw new \RuntimeException('Insecure path ' . \Input::get('node', true));
+ }
+
+ $objSession->set($strKey, \Input::get('node', true));
\Controller::redirect(preg_replace('/&node=[^&]*/', '', \Environment::get('request')));
}
@@ -534,6 +542,12 @@ abstract class Backend extends \Controller
return;
}
+ // Check the path (thanks to Arnaud Buchoux)
+ if (\Validator::isInsecurePath($intNode))
+ {
+ throw new \RuntimeException('Insecure path ' . $intNode);
+ }
+
$arrIds = array();
$arrLinks = array();
$objUser = \BackendUser::getInstance();
@@ -653,6 +667,8 @@ abstract class Backend extends \Controller
* Add a breadcrumb menu to the file tree
*
* @param string
+ *
+ * @throws \RuntimeException
*/
public static function addFilesBreadcrumb($strKey='tl_files_node')
{
@@ -661,6 +677,12 @@ abstract class Backend extends \Controller
// Set a new node
if (isset($_GET['node']))
{
+ // Check the path (thanks to Arnaud Buchoux)
+ if (\Validator::isInsecurePath(\Input::get('node', true)))
+ {
+ throw new \RuntimeException('Insecure path ' . \Input::get('node', true));
+ }
+
$objSession->set($strKey, \Input::get('node', true));
\Controller::redirect(preg_replace('/(&|\?)node=[^&]*/', '', \Environment::get('request')));
}
@@ -672,6 +694,12 @@ abstract class Backend extends \Controller
return;
}
+ // Check the path (thanks to Arnaud Buchoux)
+ if (\Validator::isInsecurePath($strNode))
+ {
+ throw new \RuntimeException('Insecure path ' . $strNode);
+ }
+
// Currently selected folder does not exist
if (!is_dir(TL_ROOT . '/' . $strNode))
{
diff --git a/core-bundle/src/Resources/contao/dca/tl_templates.php b/core-bundle/src/Resources/contao/dca/tl_templates.php
index <HASH>..<HASH> 100644
--- a/core-bundle/src/Resources/contao/dca/tl_templates.php
+++ b/core-bundle/src/Resources/contao/dca/tl_templates.php
@@ -136,12 +136,20 @@ class tl_templates extends Backend
/**
* Add the breadcrumb menu
+ *
+ * @throws RuntimeException
*/
public function addBreadcrumb()
{
// Set a new node
if (isset($_GET['node']))
{
+ // Check the path (thanks to Arnaud Buchoux)
+ if (Validator::isInsecurePath(Input::get('node', true)))
+ {
+ throw new RuntimeException('Insecure path ' . Input::get('node', true));
+ }
+
$this->Session->set('tl_templates_node', Input::get('node', true));
$this->redirect(preg_replace('/(&|\?)node=[^&]*/', '', Environment::get('request')));
}
@@ -153,6 +161,12 @@ class tl_templates extends Backend
return;
}
+ // Check the path (thanks to Arnaud Buchoux)
+ if (Validator::isInsecurePath($strNode))
+ {
+ throw new RuntimeException('Insecure path ' . $strNode);
+ }
+
// Currently selected folder does not exist
if (!is_dir(TL_ROOT . '/' . $strNode))
{
diff --git a/core-bundle/src/Resources/contao/library/Contao/Validator.php b/core-bundle/src/Resources/contao/library/Contao/Validator.php
index <HASH>..<HASH> 100644
--- a/core-bundle/src/Resources/contao/library/Contao/Validator.php
+++ b/core-bundle/src/Resources/contao/library/Contao/Validator.php
@@ -337,6 +337,12 @@ class Validator
$strPath = str_replace('\\', '/', $strPath);
$strPath = preg_replace('#//+#', '/', $strPath);
+ // Equals ..
+ if ($strPath == '..')
+ {
+ return true;
+ }
+
// Begins with ./
if (substr($strPath, 0, 2) == './')
{ | [Core] Fix a directory traversal vulnerability discovered by Arnaud Buchoux | contao_contao | train |
91ef9655aaf2adea3a044bf9a464908084917a98 | diff --git a/hugolib/page__tree.go b/hugolib/page__tree.go
index <HASH>..<HASH> 100644
--- a/hugolib/page__tree.go
+++ b/hugolib/page__tree.go
@@ -104,6 +104,10 @@ func (pt pageTree) InSection(other interface{}) (bool, error) {
}
+func (pt pageTree) Page() page.Page {
+ return pt.p
+}
+
func (pt pageTree) Parent() page.Page {
return pt.p.parent
}
diff --git a/resources/page/page.go b/resources/page/page.go
index <HASH>..<HASH> 100644
--- a/resources/page/page.go
+++ b/resources/page/page.go
@@ -340,6 +340,10 @@ type TreeProvider interface {
// Sections returns this section's subsections, if any.
// Note that for non-sections, this method will always return an empty list.
Sections() Pages
+
+ // Page returns a reference to the Page itself, kept here mostly
+ // for legacy reasons.
+ Page() Page
}
// DeprecatedWarningPageMethods lists deprecated Page methods that will trigger
diff --git a/resources/page/page_generate/generate_page_wrappers.go b/resources/page/page_generate/generate_page_wrappers.go
index <HASH>..<HASH> 100644
--- a/resources/page/page_generate/generate_page_wrappers.go
+++ b/resources/page/page_generate/generate_page_wrappers.go
@@ -107,7 +107,12 @@ func generateMarshalJSON(c *codegen.Inspector) error {
return errors.New("no methods found")
}
- marshalJSON, pkgImports := methods.ToMarshalJSON("Page", "github.com/gohugoio/hugo/resources/page")
+ marshalJSON, pkgImports := methods.ToMarshalJSON(
+ "Page",
+ "github.com/gohugoio/hugo/resources/page",
+ // Exclusion regexps. Matches method names.
+ `\bPage\b`,
+ )
fmt.Fprintf(f, `%s
diff --git a/resources/page/page_nop.go b/resources/page/page_nop.go
index <HASH>..<HASH> 100644
--- a/resources/page/page_nop.go
+++ b/resources/page/page_nop.go
@@ -299,6 +299,10 @@ func (p *nopPage) Params() map[string]interface{} {
return nil
}
+func (p *nopPage) Page() Page {
+ return p
+}
+
func (p *nopPage) Parent() Page {
return nil
}
diff --git a/resources/page/testhelpers_test.go b/resources/page/testhelpers_test.go
index <HASH>..<HASH> 100644
--- a/resources/page/testhelpers_test.go
+++ b/resources/page/testhelpers_test.go
@@ -360,6 +360,10 @@ func (p *testPage) Params() map[string]interface{} {
return p.params
}
+func (p *testPage) Page() Page {
+ return p
+}
+
func (p *testPage) Parent() Page {
panic("not implemented")
} | resources/page: Re-introduce .Page.Page
It was removed in <I>e<I>cb<I>f2cebb<I>e8e<I>f<I> -- it's not documented, but it's used in too many real sites on the web.
See #<I> | gohugoio_hugo | train |
935abc8e42a4893eff00580e3627a24a0ec1e018 | diff --git a/src/ArrayLookupNode.php b/src/ArrayLookupNode.php
index <HASH>..<HASH> 100644
--- a/src/ArrayLookupNode.php
+++ b/src/ArrayLookupNode.php
@@ -44,10 +44,39 @@ class ArrayLookupNode extends ParentNode implements VariableExpressionNode {
}
/**
- * @return Node
+ * @return \Pharborist\Node[]
*/
- public function getKey() {
- return $this->key;
+ public function getKeys() {
+ $keys = [ clone $this->key ];
+ if ($this->array instanceof ArrayLookupNode) {
+ $keys = array_merge($this->array->getKeys(), $keys);
+ }
+ return $keys;
+ }
+
+ /**
+ * Returns a specific key in the lookup.
+ *
+ * @param integer $index
+ * The index of the key to return.
+ *
+ * @return \Pharborist\Node
+ *
+ * @throws
+ * \InvalidArgumentException if $index is not an integer.
+ * \OutOfBoundsException if $index is less than zero or greater than the
+ * number of keys in the lookup.
+ */
+ public function getKey($index = 0) {
+ $keys = $this->getKeys();
+
+ if (!is_integer($index)) {
+ throw new \InvalidArgumentException();
+ }
+ if ($index < 0 || $index >= count($keys)) {
+ throw new \OutOfBoundsException();
+ }
+ return $keys[$index];
}
/**
@@ -77,10 +106,6 @@ class ArrayLookupNode extends ParentNode implements VariableExpressionNode {
if (!$this->hasScalarKeys()) {
throw new \DomainException('Cannot extract non-scalar keys from array lookup ' . $this);
}
- $keys = [ $this->key->toValue() ];
- if ($this->array instanceof ArrayLookupNode) {
- $keys = array_merge($this->array->extractKeys(), $keys);
- }
- return $keys;
+ return array_map(function(Node $key) { return $key->toValue(); }, $this->getKeys());
}
} | ArrayLookupNode::getKey() now expects an index for which key to return (defaults to 0). Added a getKeys() method to return an array of cloned key nodes. | grom358_pharborist | train |
f930d281ba6dea4de613fd2e4b0bea3c4dccfe2c | diff --git a/salt/modules/debian_ip.py b/salt/modules/debian_ip.py
index <HASH>..<HASH> 100644
--- a/salt/modules/debian_ip.py
+++ b/salt/modules/debian_ip.py
@@ -313,14 +313,14 @@ def _parse_interfaces():
context = sline[2]
# Create item in dict, if not already there
- if not iface_name in adapters:
+ if iface_name not in adapters:
adapters[iface_name] = {}
# Create item in dict, if not already there
- if not 'data' in adapters[iface_name]:
+ if 'data' not in adapters[iface_name]:
adapters[iface_name]['data'] = {}
- if not context in adapters[iface_name]['data']:
+ if context not in adapters[iface_name]['data']:
adapters[iface_name]['data'][context] = {}
adapters[iface_name]['data'][context]['inet_type'] = sline[2]
@@ -341,7 +341,7 @@ def _parse_interfaces():
if sline[0] in _REV_ETHTOOL_CONFIG_OPTS:
ethtool_key = sline[0]
- if not 'ethtool' in adapters[iface_name]['data'][context]:
+ if 'ethtool' not in adapters[iface_name]['data'][context]:
adapters[iface_name]['data'][context]['ethtool'] = {}
adapters[iface_name]['data'][context]['ethtool'][ethtool_key] = sline[1]
@@ -350,7 +350,7 @@ def _parse_interfaces():
sline.pop(0)
value = ' '.join(sline)
- if not 'bonding' in adapters[iface_name]['data'][context]:
+ if 'bonding' not in adapters[iface_name]['data'][context]:
adapters[iface_name]['data'][context]['bonding'] = {}
adapters[iface_name]['data'][context]['bonding'][opt] = value
@@ -359,13 +359,13 @@ def _parse_interfaces():
sline.pop(0)
value = ' '.join(sline)
- if not 'bridgeing' in adapters[iface_name]['data'][context]:
+ if 'bridgeing' not in adapters[iface_name]['data'][context]:
adapters[iface_name]['data'][context]['bridgeing'] = {}
adapters[iface_name]['data'][context]['bridgeing'][opt] = value
if sline[0].startswith('dns-nameservers'):
ud = sline.pop(0)
- if not 'dns' in adapters[iface_name]['data'][context]:
+ if 'dns' not in adapters[iface_name]['data'][context]:
adapters[iface_name]['data'][context]['dns'] = []
adapters[iface_name]['data'][context]['dns'] = sline
@@ -373,7 +373,7 @@ def _parse_interfaces():
ud = sline.pop(0)
cmd = ' '.join(sline)
cmd_key = '{0}_cmds'.format(re.sub('-', '_', ud))
- if not cmd_key in adapters[iface_name]['data'][context]:
+ if cmd_key not in adapters[iface_name]['data'][context]:
adapters[iface_name]['data'][context][cmd_key] = []
adapters[iface_name]['data'][context][cmd_key].append(cmd)
@@ -383,7 +383,7 @@ def _parse_interfaces():
if word == 'auto':
pass
else:
- if not word in adapters:
+ if word not in adapters:
adapters[word] = {}
adapters[word]['enabled'] = True
@@ -393,7 +393,7 @@ def _parse_interfaces():
if word == 'allow-hotplug':
pass
else:
- if not word in adapters:
+ if word not in adapters:
adapters[word] = {}
adapters[word]['hotplug'] = True
@@ -1003,7 +1003,7 @@ def _parse_network_settings(opts, current):
result = {}
valid = _CONFIG_TRUE + _CONFIG_FALSE
- if not 'enabled' in opts:
+ if 'enabled' not in opts:
try:
opts['networking'] = current['networking']
_log_default_network('networking', current['networking'])
@@ -1020,7 +1020,7 @@ def _parse_network_settings(opts, current):
else:
_raise_error_network('networking', valid)
- if not 'hostname' in opts:
+ if 'hostname' not in opts:
try:
opts['hostname'] = current['hostname']
_log_default_network('hostname', current['hostname'])
@@ -1055,7 +1055,7 @@ def _parse_routes(iface, opts):
# Normalize keys
opts = dict((k.lower(), v) for (k, v) in opts.iteritems())
result = {}
- if not 'routes' in opts:
+ if 'routes' not in opts:
_raise_error_routes(iface, 'routes', 'List of routes')
for opt in opts:
@@ -1442,7 +1442,7 @@ def apply_network_settings(**settings):
salt '*' ip.apply_network_settings
'''
- if not 'require_reboot' in settings:
+ if 'require_reboot' not in settings:
settings['require_reboot'] = False
if settings['require_reboot'] in _CONFIG_TRUE: | Fix PEP8 E<I> - test for membership should be "not in" | saltstack_salt | train |
d51ece7308f9af64fb67e20d173e87da310ed402 | diff --git a/code/DocumentationManifest.php b/code/DocumentationManifest.php
index <HASH>..<HASH> 100644
--- a/code/DocumentationManifest.php
+++ b/code/DocumentationManifest.php
@@ -459,7 +459,12 @@ class DocumentationManifest
{
$fromLink = $this->stripLinkBase($from);
$toLink = $this->stripLinkBase($to);
- $this->redirects[$fromLink] = $toLink;
+
+ // If the redirect "from" is already registered with a "to", don't override it. This ensures
+ // that the first version processed is treated as the canonical version.
+ if (!isset($this->redirects[$fromLink])) {
+ $this->redirects[$fromLink] = $toLink;
+ }
}
/** | FIX Ensure first processed stable version is treated as canonical | silverstripe_silverstripe-docsviewer | train |
8e0453d27194ffbe5b5b54f214c0b87290d7b8e3 | diff --git a/src/wiotp/sdk/client.py b/src/wiotp/sdk/client.py
index <HASH>..<HASH> 100644
--- a/src/wiotp/sdk/client.py
+++ b/src/wiotp/sdk/client.py
@@ -167,7 +167,7 @@ class AbstractClient(object):
ca_certs=caFile,
certfile=None,
keyfile=None,
- cert_reqs=ssl.CERT_REQUIRED,
+ cert_reqs=ssl.CERT_NONE,
tls_version=ssl.PROTOCOL_TLSv1_2,
)
self.client.username_pw_set(self.username, self.password) | Changed cert request to NONE
Done to test issue with caFile. | ibm-watson-iot_iot-python | train |
f3ebc39ff8b65ba173f0d9cf06bff550266991ab | diff --git a/lib/epubinfo/models/book.rb b/lib/epubinfo/models/book.rb
index <HASH>..<HASH> 100644
--- a/lib/epubinfo/models/book.rb
+++ b/lib/epubinfo/models/book.rb
@@ -4,36 +4,53 @@ module EPUBInfo
# Titles, array of String instances ({http://idpf.org/epub/20/spec/OPF_2.0.1_draft.htm#Section2.2.1 EPUB2 reference})
# @return [Array]
attr_accessor :titles
+
# Creators, array of Person instances ({http://idpf.org/epub/20/spec/OPF_2.0.1_draft.htm#Section2.2.2 EPUB2 reference})
# @return [Array]
attr_accessor :creators
+ def creators; @creators || []; end
+
# Subjects, array of String instances ({http://idpf.org/epub/20/spec/OPF_2.0.1_draft.htm#Section2.2.3 EPUB2 reference})
# @return [Array]
attr_accessor :subjects
+ def subjects; @subjects || []; end
+
# Description ({http://idpf.org/epub/20/spec/OPF_2.0.1_draft.htm#Section2.2.4 EPUB2 reference})
# @return [String]
attr_accessor :description
+
# Publisher ({http://idpf.org/epub/20/spec/OPF_2.0.1_draft.htm#Section2.2.5 EPUB2 reference})
# @return [String]
attr_accessor :publisher
+
# Contributors, array of Person instances ({http://idpf.org/epub/20/spec/OPF_2.0.1_draft.htm#Section2.2.6 EPUB2 reference})
# @return [Array]
attr_accessor :contributors
+ def contributors; @contributors || []; end
+
# Dates, array of Date instances ({http://idpf.org/epub/20/spec/OPF_2.0.1_draft.htm#Section2.2.7 EPUB2 reference})
# @return [Array]
attr_accessor :dates
+ def dates; @dates || []; end
+
# Identifiers, array of Identifier instances ({http://idpf.org/epub/20/spec/OPF_2.0.1_draft.htm#Section2.2.10 EPUB2 reference})
# @return [Array]
attr_accessor :identifiers
+ def identifiers; @identifiers || []; end
+
# Source ({http://idpf.org/epub/20/spec/OPF_2.0.1_draft.htm#Section2.2.11 EPUB2 reference})
# @return [String]
attr_accessor :source
+
# Languages, array of String instances ({http://idpf.org/epub/20/spec/OPF_2.0.1_draft.htm#Section2.2.12 EPUB2 reference})
# @return [Array]
attr_accessor :languages
+ def languages; @languages || []; end
+
# Rights ({http://idpf.org/epub/20/spec/OPF_2.0.1_draft.htm#Section2.2.15 EPUB2 reference})
# @return [String]
attr_accessor :rights
+
# DRM protected
# @return [Boolean]
attr_accessor :drm_protected
@@ -58,12 +75,6 @@ module EPUBInfo
self.drm_protected = drm_protected
end
- def creators; @creators || []; end
- def subjects; @subjects || []; end
- def contributors; @contributors || []; end
- def dates; @dates || []; end
- def identifiers; @identifiers || []; end
- def languages; @languages || []; end
# Returns Hash representation of the book
# @return [Hash] | Refactored default values of attributes in book model | chdorner_epubinfo | train |
ea430b7ef437e004afb8826adad36c60588f4ddc | diff --git a/lib/chef/application.rb b/lib/chef/application.rb
index <HASH>..<HASH> 100644
--- a/lib/chef/application.rb
+++ b/lib/chef/application.rb
@@ -215,7 +215,7 @@ class Chef::Application
else
# Unforked interval runs are disabled, so this runs chef-client
# once and then exits. If TERM signal is received, will "ignore"
- # the signal to finish converge, then exit with exitstatus 3.
+ # the signal to finish converge.
run_with_graceful_exit_option
end
@chef_client = nil
diff --git a/lib/chef/application/client.rb b/lib/chef/application/client.rb
index <HASH>..<HASH> 100644
--- a/lib/chef/application/client.rb
+++ b/lib/chef/application/client.rb
@@ -258,7 +258,6 @@ class Chef::Application::Client < Chef::Application
Chef::Config.chef_zero.host = config[:chef_zero_host] if config[:chef_zero_host]
Chef::Config.chef_zero.port = config[:chef_zero_port] if config[:chef_zero_port]
-
if Chef::Config[:daemonize]
Chef::Config[:interval] ||= 1800
end
@@ -319,7 +318,7 @@ class Chef::Application::Client < Chef::Application
if !Chef::Config[:client_fork] || Chef::Config[:once]
begin
# run immediately without interval sleep, or splay
- run_chef_client(Chef::Config[:specific_recipes] || [])
+ run_chef_client(Chef::Config[:specific_recipes])
rescue SystemExit
raise
rescue Exception => e
@@ -346,7 +345,7 @@ class Chef::Application::Client < Chef::Application
end
@signal = nil
- run_chef_client(Chef::Config[:specific_recipes] || [])
+ run_chef_client(Chef::Config[:specific_recipes])
Chef::Application.exit!("Exiting", 0) if !Chef::Config[:interval]
rescue SystemExit => e
diff --git a/spec/unit/application/client_spec.rb b/spec/unit/application/client_spec.rb
index <HASH>..<HASH> 100644
--- a/spec/unit/application/client_spec.rb
+++ b/spec/unit/application/client_spec.rb
@@ -168,6 +168,8 @@ end
describe Chef::Application::Client, "run_application", :unix_only do
before(:each) do
+ Chef::Config[:specific_recipes] = [] # normally gets set in @app.reconfigure
+
@app = Chef::Application::Client.new
@app.setup_signal_handlers
# Default logger doesn't work correctly when logging from a trap handler. | Tidy up some comments, whitespace. | chef_chef | train |
6eb8b9e29ff1c3048aa0daf650a69cf6489d9463 | diff --git a/src/sap.ui.core/src/sap/ui/qunit/utils/ControlIterator.js b/src/sap.ui.core/src/sap/ui/qunit/utils/ControlIterator.js
index <HASH>..<HASH> 100644
--- a/src/sap.ui.core/src/sap/ui/qunit/utils/ControlIterator.js
+++ b/src/sap.ui.core/src/sap/ui/qunit/utils/ControlIterator.js
@@ -87,6 +87,7 @@ sap.ui.define([ 'jquery.sap.global', 'sap/ui/core/Core', 'sap/ui/base/Object', '
"sap.ui.demokit.IndexLayout._Tile",
"sap.ui.layout.BlockLayoutRow",
"sap.ui.richtexteditor.RichTextEditor",
+ "sap.ui.richtexteditor.ToolbarWrapper",
"sap.ui.suite.TaskCircle",
"sap.ui.table.ColumnMenu",
"sap.ui.unified.Menu", | [INTERNAL] ControlIterator: mark ToolbarWrapper as not renderable
sap.ui.richtexteditor.ToolbarWrapper caused issues with ControlIterator
Change-Id: I<I>d<I>cd0a<I>c<I>d9ba6a<I>b7ad0ac6 | SAP_openui5 | train |
631f8ba571dd2096d044910954e885f53fee451b | diff --git a/core/src/main/java/hudson/model/Fingerprint.java b/core/src/main/java/hudson/model/Fingerprint.java
index <HASH>..<HASH> 100644
--- a/core/src/main/java/hudson/model/Fingerprint.java
+++ b/core/src/main/java/hudson/model/Fingerprint.java
@@ -1305,6 +1305,19 @@ public class Fingerprint implements ModelObject, Saveable {
}
/**
+ * Returns a facet that blocks the deletion of the fingerprint.
+ * Returns null if no such facet.
+ * @since TODO
+ */
+ public @CheckForNull FingerprintFacet getFacetBlockingDeletion() {
+ for (FingerprintFacet facet : facets) {
+ if (facet.isFingerprintDeletionBlocked())
+ return facet;
+ }
+ return null;
+ }
+
+ /**
* Update references to a renamed job in the fingerprint
*/
public synchronized void rename(String oldName, String newName) throws IOException {
diff --git a/core/src/main/java/hudson/model/FingerprintCleanupThread.java b/core/src/main/java/hudson/model/FingerprintCleanupThread.java
index <HASH>..<HASH> 100644
--- a/core/src/main/java/hudson/model/FingerprintCleanupThread.java
+++ b/core/src/main/java/hudson/model/FingerprintCleanupThread.java
@@ -23,9 +23,11 @@
*/
package hudson.model;
+import com.thoughtworks.xstream.converters.basic.DateConverter;
import hudson.Extension;
import hudson.ExtensionList;
import hudson.Functions;
+import jenkins.model.FingerprintFacet;
import jenkins.model.Jenkins;
import org.jenkinsci.Symbol;
import org.kohsuke.accmod.Restricted;
@@ -52,6 +54,8 @@ public class FingerprintCleanupThread extends AsyncPeriodicWork {
static final String FINGERPRINTS_DIR_NAME = "fingerprints";
private static final Pattern FINGERPRINT_FILE_PATTERN = Pattern.compile("[0-9a-f]{28}\\.xml");
+ private static final DateConverter DATE_CONVERTER = new DateConverter();
+
public FingerprintCleanupThread() {
super("Fingerprint cleanup");
}
@@ -107,11 +111,15 @@ public class FingerprintCleanupThread extends AsyncPeriodicWork {
private boolean check(File fingerprintFile, TaskListener listener) {
try {
Fingerprint fp = loadFingerprint(fingerprintFile);
- if (fp == null || !fp.isAlive()) {
+ if (fp == null || (!fp.isAlive() && fp.getFacetBlockingDeletion() == null) ) {
listener.getLogger().println("deleting obsolete " + fingerprintFile);
fingerprintFile.delete();
return true;
} else {
+ if (!fp.isAlive()) {
+ FingerprintFacet deletionBlockerFacet = fp.getFacetBlockingDeletion();
+ listener.getLogger().println(deletionBlockerFacet.getClass().getName() + " created on " + DATE_CONVERTER.toString(deletionBlockerFacet.getTimestamp()) + " blocked deletion of " + fingerprintFile);
+ }
// get the fingerprint in the official map so have the changes visible to Jenkins
// otherwise the mutation made in FingerprintMap can override our trimming.
fp = getFingerprint(fp);
diff --git a/core/src/main/java/jenkins/model/FingerprintFacet.java b/core/src/main/java/jenkins/model/FingerprintFacet.java
index <HASH>..<HASH> 100644
--- a/core/src/main/java/jenkins/model/FingerprintFacet.java
+++ b/core/src/main/java/jenkins/model/FingerprintFacet.java
@@ -104,6 +104,15 @@ public abstract class FingerprintFacet implements ExtensionPoint {
}
/**
+ * Returns whether Fingerprint deletion has been blocked by this Facet.
+ * Returns false by default. Override the default to block the deletion of the associated Fingerprint.
+ * @since TODO
+ */
+ public boolean isFingerprintDeletionBlocked() {
+ return false;
+ }
+
+ /**
* Backdoor for {@link Fingerprint} to set itself to its facets.
* Public only because this needs to be accessible to {@link Fingerprint}. Do not call this method directly.
*/ | [JENKINS-<I>] Allow FingerprintFacet to block the deletion of Fingerprint (#<I>)
* [JENKINS-<I>] Allow FingerprintFacet to block the deletion of Fingerprint
* Syntax improvements + CheckForNull annotation
* Changed method name 'facetBlockingDeletion' -> 'getFacetBlockingDeletion' | jenkinsci_jenkins | train |
d6769f59f7bf56ee895f5c5a4e7c2eb4bb5e644d | diff --git a/command/agent/config_parse.go b/command/agent/config_parse.go
index <HASH>..<HASH> 100644
--- a/command/agent/config_parse.go
+++ b/command/agent/config_parse.go
@@ -624,7 +624,15 @@ func parseConsulConfig(result **config.ConsulConfig, list *ast.ObjectList) error
}
var consulConfig config.ConsulConfig
- if err := mapstructure.WeakDecode(m, &consulConfig); err != nil {
+ dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
+ DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
+ WeaklyTypedInput: true,
+ Result: &consulConfig,
+ })
+ if err != nil {
+ return err
+ }
+ if err := dec.Decode(m); err != nil {
return err
} | Create a weak decoder to parse time.Duration.
Hat tip to Alex for pointing this out (vs patching mapstructure) | hashicorp_nomad | train |
dd4d88c13b84d5fa206a22bdc955997114e8aa1c | diff --git a/go/zk/zkconn.go b/go/zk/zkconn.go
index <HASH>..<HASH> 100644
--- a/go/zk/zkconn.go
+++ b/go/zk/zkconn.go
@@ -310,10 +310,13 @@ func (conn *ZkConn) Delete(path string, version int) (err error) {
// return ErrConnectionClosed.
func (conn *ZkConn) Close() error {
conn.mu.Lock()
+ defer conn.mu.Unlock()
+ if conn.conn == nil {
+ return nil
+ }
c := conn.conn
conn.conn = nil
go c.Close()
- conn.mu.Unlock()
return nil
} | Allowing multiple Close in zkconn. | vitessio_vitess | train |
043590f9a3e8d257abc245184d05f8066735029f | diff --git a/test/replica_sets/connect_test.rb b/test/replica_sets/connect_test.rb
index <HASH>..<HASH> 100644
--- a/test/replica_sets/connect_test.rb
+++ b/test/replica_sets/connect_test.rb
@@ -37,6 +37,10 @@ class ConnectTest < Test::Unit::TestCase
assert_equal RS.primary, @conn.primary
assert_equal RS.secondaries.sort, @conn.secondaries.sort
assert_equal RS.arbiters.sort, @conn.arbiters.sort
+
+ @conn = ReplSetConnection.new([RS.host, RS.ports[1]], [RS.host, RS.ports[0]],
+ :name => RS.name)
+ assert @conn.connected?
end
def test_host_port_accessors | minor: test replica set connect with reverse node order | mongodb_mongo-ruby-driver | train |
46a72ef7cb5b11d4cca527f49d8ff26fed6220ab | diff --git a/controller/api/models.py b/controller/api/models.py
index <HASH>..<HASH> 100644
--- a/controller/api/models.py
+++ b/controller/api/models.py
@@ -626,6 +626,16 @@ def _etcd_purge_user(**kwargs):
pass
+def _etcd_create_app(**kwargs):
+ appname = kwargs['instance']
+ _etcd_client.write('/deis/services/{}'.format(appname), None, dir=True)
+
+
+def _etcd_purge_app(**kwargs):
+ appname = kwargs['instance']
+ _etcd_client.delete('/deis/services/{}'.format(appname), dir=True, recursive=True)
+
+
def _etcd_publish_domains(**kwargs):
app = kwargs['instance'].app
app_domains = app.domain_set.all()
@@ -664,3 +674,5 @@ if _etcd_client:
post_delete.connect(_etcd_purge_user, sender=User, dispatch_uid='api.models')
post_save.connect(_etcd_publish_domains, sender=Domain, dispatch_uid='api.models')
post_delete.connect(_etcd_publish_domains, sender=Domain, dispatch_uid='api.models')
+ post_save.connect(_etcd_create_app, sender=App, dispatch_uid='api.models')
+ post_delete.connect(_etcd_purge_app, sender=App, dispatch_uid='api.models') | feat(controller): set app dir in etcd
If you add or remove the app from the controller, the corresponding etcd
key is not modified. This ensures that the etcd keyspace is created/removed
when an app is modified in the database. | deis_deis | train |
37f571da84cee66b5817318015cf094a77dfafad | diff --git a/src/com/opencms/workplace/CmsWorkplaceDefault.java b/src/com/opencms/workplace/CmsWorkplaceDefault.java
index <HASH>..<HASH> 100755
--- a/src/com/opencms/workplace/CmsWorkplaceDefault.java
+++ b/src/com/opencms/workplace/CmsWorkplaceDefault.java
@@ -1,7 +1,7 @@
/*
* File : $Source: /alkacon/cvs/opencms/src/com/opencms/workplace/Attic/CmsWorkplaceDefault.java,v $
- * Date : $Date: 2000/03/16 19:26:44 $
- * Version: $Revision: 1.17 $
+ * Date : $Date: 2000/03/22 10:39:21 $
+ * Version: $Revision: 1.18 $
*
* Copyright (C) 2000 The OpenCms Group
*
@@ -45,7 +45,7 @@ import javax.servlet.http.*;
* Most special workplace classes may extend this class.
*
* @author Alexander Lucas
- * @version $Revision: 1.17 $ $Date: 2000/03/16 19:26:44 $
+ * @version $Revision: 1.18 $ $Date: 2000/03/22 10:39:21 $
* @see com.opencms.workplace.CmsXmlWpTemplateFile
*/
public class CmsWorkplaceDefault extends CmsXmlTemplate implements I_CmsWpConstants {
@@ -134,6 +134,21 @@ public class CmsWorkplaceDefault extends CmsXmlTemplate implements I_CmsWpConsta
}
/**
+ * User method to get the name of the user.
+ *
+ * @param cms A_CmsObject Object for accessing system resources.
+ * @param tagcontent Unused in this special case of a user method. Can be ignored.
+ * @param doc Reference to the A_CmsXmlContent object of the initiating XLM document <em>(not used here)</em>.
+ * @param userObj Hashtable with parameters <em>(not used here)</em>.
+ * @return String with the pics URL.
+ * @exception CmsException
+ */
+ public Object userName(A_CmsObject cms, String tagcontent, A_CmsXmlContent doc, Object userObj)
+ throws CmsException {
+ return Utils.getFullName(cms.getRequestContext().currentUser());
+ }
+
+ /**
* User method to generate an URL for the system pics folder.
* <P>
* All pictures should reside in the docroot of the webserver for
diff --git a/src/com/opencms/workplace/CmsWpMain.java b/src/com/opencms/workplace/CmsWpMain.java
index <HASH>..<HASH> 100755
--- a/src/com/opencms/workplace/CmsWpMain.java
+++ b/src/com/opencms/workplace/CmsWpMain.java
@@ -1,7 +1,7 @@
/*
* File : $Source: /alkacon/cvs/opencms/src/com/opencms/workplace/Attic/CmsWpMain.java,v $
- * Date : $Date: 2000/03/20 13:50:13 $
- * Version: $Revision: 1.9 $
+ * Date : $Date: 2000/03/22 10:39:21 $
+ * Version: $Revision: 1.10 $
*
* Copyright (C) 2000 The OpenCms Group
*
@@ -44,7 +44,7 @@ import javax.servlet.http.*;
*
* @author Alexander Lucas
* @author Michael Emmerich
- * @version $Revision: 1.9 $ $Date: 2000/03/20 13:50:13 $
+ * @version $Revision: 1.10 $ $Date: 2000/03/22 10:39:21 $
* @see com.opencms.workplace.CmsXmlWpTemplateFile
*/
public class CmsWpMain extends CmsWorkplaceDefault {
@@ -291,19 +291,4 @@ public class CmsWpMain extends CmsWorkplaceDefault {
}
return new Integer(currentViewIndex);
}
-
- /**
- * User method to get the name of the user.
- *
- * @param cms A_CmsObject Object for accessing system resources.
- * @param tagcontent Unused in this special case of a user method. Can be ignored.
- * @param doc Reference to the A_CmsXmlContent object of the initiating XLM document <em>(not used here)</em>.
- * @param userObj Hashtable with parameters <em>(not used here)</em>.
- * @return String with the pics URL.
- * @exception CmsException
- */
- public Object userName(A_CmsObject cms, String tagcontent, A_CmsXmlContent doc, Object userObj)
- throws CmsException {
- return Utils.getFullName(cms.getRequestContext().currentUser());
- }
} | method userName moved from CmsWpMain to CmsWorkplaceDefault | alkacon_opencms-core | train |
54afb77ed1751daec21cc9d3008fe59b8ee9b6dd | diff --git a/src/Bkwld/Decoy/Controllers/Elements.php b/src/Bkwld/Decoy/Controllers/Elements.php
index <HASH>..<HASH> 100644
--- a/src/Bkwld/Decoy/Controllers/Elements.php
+++ b/src/Bkwld/Decoy/Controllers/Elements.php
@@ -73,17 +73,18 @@ class Elements extends Base {
*/
public static function renderField($el, $key = null) {
if (!$key) $key = $el->inputName();
+ $id = str_replace('|', '-', $key);
switch($el->type) {
- case 'text': return Former::text($key, $el->label)->blockHelp($el->help);
- case 'textarea': return Former::textarea($key, $el->label)->blockHelp($el->help);
- case 'wysiwyg': return Former::wysiwyg($key, $el->label)->blockHelp($el->help);
- case 'image': return Former::image($key, $el->label)->blockHelp($el->help);
- case 'file': return Former::upload($key, $el->label)->blockHelp($el->help);
- case 'boolean': return Former::checkbox($key, false)->checkboxes(array("<b>{$el->label}</b>" => array('name' => $key, 'value' => 1)))->blockHelp($el->help);
- case 'select': return Former::select($key, $el->label)->options($el->options)->blockHelp($el->help);
- case 'radios': return Former::radios($key, $el->label)->radios(FormerUtils::radioArray($el->options))->blockHelp($el->help);
- case 'checkboxes': return Former::checkboxes($key, $el->label)->checkboxes(FormerUtils::checkboxArray($key, $el->options))->blockHelp($el->help);
- case 'video-encoder': return Former::videoEncoder($key, $el->label)->blockHelp($el->help)->setModel($el);
+ case 'text': return Former::text($key, $el->label)->blockHelp($el->help)->id($id);
+ case 'textarea': return Former::textarea($key, $el->label)->blockHelp($el->help)->id($id);
+ case 'wysiwyg': return Former::wysiwyg($key, $el->label)->blockHelp($el->help)->id($id);
+ case 'image': return Former::image($key, $el->label)->blockHelp($el->help)->id($id);
+ case 'file': return Former::upload($key, $el->label)->blockHelp($el->help)->id($id);
+ case 'boolean': return Former::checkbox($key, false)->checkboxes(array("<b>{$el->label}</b>" => array('name' => $key, 'value' => 1)))->blockHelp($el->help)->id($id);
+ case 'select': return Former::select($key, $el->label)->options($el->options)->blockHelp($el->help)->id($id);
+ case 'radios': return Former::radios($key, $el->label)->radios(FormerUtils::radioArray($el->options))->blockHelp($el->help)->id($id);
+ case 'checkboxes': return Former::checkboxes($key, $el->label)->checkboxes(FormerUtils::checkboxArray($key, $el->options))->blockHelp($el->help)->id($id);
+ case 'video-encoder': return Former::videoEncoder($key, $el->label)->blockHelp($el->help)->setModel($el)->id($id);
/**
* Not ported yet from Frags:
*/ | Add useable ids to element fields | BKWLD_decoy | train |
ef4ab76c254a3847fd2b6fc644012693cf00d28d | diff --git a/normalize/identity.py b/normalize/identity.py
index <HASH>..<HASH> 100644
--- a/normalize/identity.py
+++ b/normalize/identity.py
@@ -33,7 +33,7 @@ def record_id(object_, type_=None, selector=None, normalize_object_slot=None):
key_vals = list()
pk_cols = type_.primary_key
if selector and pk_cols and not all(
- (x.name,) in selector for x in pk_cols
+ selector[(x.name,)] for x in pk_cols
):
pk_cols = None
@@ -48,7 +48,7 @@ def record_id(object_, type_=None, selector=None, normalize_object_slot=None):
return tuple(
record_id(
v, type_.itemtype, selector[k], normalize_object_slot,
- ) for k, v in gen if (k,) in selector
+ ) for k, v in gen if selector[(k,)]
) if selector else tuple(
record_id(v, type_.itemtype, None, normalize_object_slot) for
k, v in gen
@@ -58,7 +58,7 @@ def record_id(object_, type_=None, selector=None, normalize_object_slot=None):
all_properties = type_._sorted_properties
if selector:
all_properties = tuple(
- x for x in all_properties if (x.name,) in selector
+ x for x in all_properties if selector[(x.name,)]
)
for prop in pk_cols or all_properties:
diff --git a/tests/test_mfs_diff.py b/tests/test_mfs_diff.py
index <HASH>..<HASH> 100644
--- a/tests/test_mfs_diff.py
+++ b/tests/test_mfs_diff.py
@@ -433,8 +433,13 @@ class TestDiffWithMultiFieldSelector(unittest2.TestCase):
self.assertDifferences(
person.diff_iter(
person2,
- compare_filter=no_populated_subfields_mfs,
+ compare_filter=some_populated_subfields_mfs,
ignore_empty_items=True,
),
- set(),
+ {
+ "ADDED .friends[0]",
+ "ADDED .friends[1]",
+ "ADDED .friends[2]",
+ "ADDED .friends[3]",
+ }
) | Fix a regression with diff using compare_filter
A couple of conversions from 'in' to the new MultiFieldSelector
subscript check were missed in the normalize.identity class. This would
lead to many not being detected correctly, especially with the new
'ignore_empty_items' feature. | hearsaycorp_normalize | train |
79d25804d064984466126a1b76b96c8880ccd3b7 | diff --git a/scripts/ec2.rb b/scripts/ec2.rb
index <HASH>..<HASH> 100644
--- a/scripts/ec2.rb
+++ b/scripts/ec2.rb
@@ -39,6 +39,8 @@ Standup.script :node do
[Standup::EC2::SecurityGroup.list[node.id_group]]
puts "waiting until it's up"
inst.wait_until {inst.state != :running}
+ puts "and a bit more to let it really up"
+ sleep 20
end
def configure_elastic_ip | Wait a bit more until instance is up | cloudcastle_standup | train |
1dc0c6d8e1a793cab0db26bb679cc716e7506ccf | diff --git a/lib/neo4j/shared/property.rb b/lib/neo4j/shared/property.rb
index <HASH>..<HASH> 100644
--- a/lib/neo4j/shared/property.rb
+++ b/lib/neo4j/shared/property.rb
@@ -222,7 +222,7 @@ module Neo4j::Shared
end
def type_converter(options)
- converter = options[:type_converter]
+ converter = options[:serializer]
return unless converter
options[:type] = converter.convert_type
options[:typecaster] = ActiveAttr::Typecasting::ObjectTypecaster.new
diff --git a/spec/unit/shared/property_spec.rb b/spec/unit/shared/property_spec.rb
index <HASH>..<HASH> 100644
--- a/spec/unit/shared/property_spec.rb
+++ b/spec/unit/shared/property_spec.rb
@@ -111,7 +111,7 @@ describe Neo4j::Shared::Property do
let(:range) { 1..3 }
before do
- clazz.property :range, type_converter: converter
+ clazz.property :range, serializer: converter
end
it 'sets active_attr typecaster to ObjectTypecaster' do | change type_converter to serializer | neo4jrb_neo4j | train |
dcdc17724ba84fc9f74d9055a6552def70c72c2b | diff --git a/WebPConvertClass.php b/WebPConvertClass.php
index <HASH>..<HASH> 100755
--- a/WebPConvertClass.php
+++ b/WebPConvertClass.php
@@ -180,7 +180,11 @@ class WebPConvert {
foreach (self::$tools_order as $tool_name) {
self::logmsg('<br>trying <b>' . $tool_name . '</b>');
+ $time_start = microtime(true);
$result = call_user_func('webpconvert_' . $tool_name, $source, $destination, $quality, $strip_metadata);
+ $time_end = microtime(true);
+ self::logmsg('execution time: ' . round(($time_end - $time_start) * 1000) . ' ms');
+
if ($result === TRUE) {
self::logmsg('success!');
$success = TRUE; | added meassurement of execution time | rosell-dk_webp-convert | train |
4b99af0a44eabee91ec58f2ec4acd76d50fa8a55 | diff --git a/examples/CTC-TIMIT/train-timit.py b/examples/CTC-TIMIT/train-timit.py
index <HASH>..<HASH> 100755
--- a/examples/CTC-TIMIT/train-timit.py
+++ b/examples/CTC-TIMIT/train-timit.py
@@ -40,8 +40,8 @@ class Model(ModelDesc):
feat, labelidx, labelvalue, labelshape, seqlen = input_vars
label = tf.SparseTensor(labelidx, labelvalue, labelshape)
- cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=HIDDEN)
- cell = tf.nn.rnn_cell.MultiRNNCell([cell] * NLAYER)
+ cell = tf.contrib.rnn.BasicLSTMCell(num_units=HIDDEN)
+ cell = tf.contrib.rnn.MultiRNNCell([cell] * NLAYER)
initial = cell.zero_state(tf.shape(feat)[0], tf.float32)
diff --git a/examples/Char-RNN/char-rnn.py b/examples/Char-RNN/char-rnn.py
index <HASH>..<HASH> 100755
--- a/examples/Char-RNN/char-rnn.py
+++ b/examples/Char-RNN/char-rnn.py
@@ -69,8 +69,8 @@ class Model(ModelDesc):
def _build_graph(self, input_vars):
input, nextinput = input_vars
- cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=param.rnn_size)
- cell = tf.nn.rnn_cell.MultiRNNCell([cell] * param.num_rnn_layer)
+ cell = tf.contrib.rnn.BasicLSTMCell(num_units=param.rnn_size)
+ cell = tf.contrib.rnn.MultiRNNCell([cell] * param.num_rnn_layer)
self.initial = initial = cell.zero_state(tf.shape(input)[0], tf.float32)
@@ -80,7 +80,7 @@ class Model(ModelDesc):
input_list = tf.unstack(input_feature, axis=1) # seqlen x (Bxrnnsize)
# seqlen is 1 in inference. don't need loop_function
- outputs, last_state = tf.nn.rnn(cell, input_list, initial, scope='rnnlm')
+ outputs, last_state = tf.contrib.rnn.static_rnn(cell, input_list, initial, scope='rnnlm')
self.last_state = tf.identity(last_state, 'last_state')
# seqlen x (Bxrnnsize) | Change rnn-cell to fix #<I> (#<I>)
* Change rnn-cell to fix #<I> | tensorpack_tensorpack | train |
9133fdc0e23296429ba5358844a4ecf0e41c0468 | diff --git a/spec/spec_helper.rb b/spec/spec_helper.rb
index <HASH>..<HASH> 100644
--- a/spec/spec_helper.rb
+++ b/spec/spec_helper.rb
@@ -1,4 +1,5 @@
require "rubygems"
+require "test/unit"
require "spec"
# gem install redgreen for colored test output | Need test/unit to test assert functionality | brynary_webrat | train |
d65a65806e797e7b8b206243c289509b86590d99 | diff --git a/components/org.wso2.transport.http.netty/src/main/java/org/wso2/transport/http/netty/contractimpl/common/states/Http2StateUtil.java b/components/org.wso2.transport.http.netty/src/main/java/org/wso2/transport/http/netty/contractimpl/common/states/Http2StateUtil.java
index <HASH>..<HASH> 100644
--- a/components/org.wso2.transport.http.netty/src/main/java/org/wso2/transport/http/netty/contractimpl/common/states/Http2StateUtil.java
+++ b/components/org.wso2.transport.http.netty/src/main/java/org/wso2/transport/http/netty/contractimpl/common/states/Http2StateUtil.java
@@ -20,10 +20,13 @@ package org.wso2.transport.http.netty.contractimpl.common.states;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.http.HttpHeaders;
import io.netty.handler.codec.http2.Http2CodecUtil;
+import io.netty.handler.codec.http2.Http2Connection;
import io.netty.handler.codec.http2.Http2ConnectionEncoder;
import io.netty.handler.codec.http2.Http2Exception;
import io.netty.handler.codec.http2.Http2Headers;
import io.netty.handler.codec.http2.HttpConversionUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.wso2.transport.http.netty.contractimpl.sender.http2.Http2ClientChannel;
import org.wso2.transport.http.netty.contractimpl.sender.http2.Http2DataEventListener;
import org.wso2.transport.http.netty.contractimpl.sender.http2.OutboundMsgHolder;
@@ -33,6 +36,8 @@ import org.wso2.transport.http.netty.contractimpl.sender.http2.OutboundMsgHolder
*/
public class Http2StateUtil {
+ private static final Logger LOG = LoggerFactory.getLogger(Http2StateUtil.class);
+
public static void writeHttp2Headers(ChannelHandlerContext ctx, OutboundMsgHolder outboundMsgHolder,
Http2ClientChannel http2ClientChannel, Http2ConnectionEncoder encoder,
int streamId, HttpHeaders headers, Http2Headers http2Headers,
@@ -54,4 +59,21 @@ public class Http2StateUtil {
outboundMsgHolder.setRequestWritten(true);
}
}
+
+ public static int initiateStream(ChannelHandlerContext ctx, Http2Connection connection,
+ Http2ClientChannel http2ClientChannel,
+ OutboundMsgHolder outboundMsgHolder) throws Http2Exception {
+ int id = getNextStreamId(connection);
+ http2ClientChannel.putInFlightMessage(id, outboundMsgHolder);
+ http2ClientChannel.getDataEventListeners()
+ .forEach(dataEventListener -> dataEventListener.onStreamInit(ctx, id));
+ return id;
+ }
+
+ private static synchronized int getNextStreamId(Http2Connection connection) throws Http2Exception {
+ int nextStreamId = connection.local().incrementAndGetNextStreamId();
+ connection.local().createStream(nextStreamId, false);
+ LOG.debug("Stream created streamId: {}", nextStreamId);
+ return nextStreamId;
+ }
}
diff --git a/components/org.wso2.transport.http.netty/src/main/java/org/wso2/transport/http/netty/contractimpl/sender/states/http2/SendingHeaders.java b/components/org.wso2.transport.http.netty/src/main/java/org/wso2/transport/http/netty/contractimpl/sender/states/http2/SendingHeaders.java
index <HASH>..<HASH> 100644
--- a/components/org.wso2.transport.http.netty/src/main/java/org/wso2/transport/http/netty/contractimpl/sender/states/http2/SendingHeaders.java
+++ b/components/org.wso2.transport.http.netty/src/main/java/org/wso2/transport/http/netty/contractimpl/sender/states/http2/SendingHeaders.java
@@ -94,7 +94,7 @@ public class SendingHeaders implements SenderState {
private void writeHeaders(ChannelHandlerContext ctx, HttpContent msg) throws Http2Exception {
// Initiate the stream
boolean endStream = false;
- this.streamId = initiateStream(ctx);
+ this.streamId = Http2StateUtil.initiateStream(ctx, connection, http2ClientChannel, outboundMsgHolder);
http2RequestWriter.setStreamId(streamId);
HttpRequest httpRequest = Util.createHttpRequest(httpOutboundRequest);
@@ -119,19 +119,4 @@ public class SendingHeaders implements SenderState {
Http2StateUtil.writeHttp2Headers(ctx, outboundMsgHolder, http2ClientChannel, encoder, streamId,
httpMsg.headers(), http2Headers, endStream);
}
-
- private int initiateStream(ChannelHandlerContext ctx) throws Http2Exception {
- int id = getNextStreamId();
- http2ClientChannel.putInFlightMessage(id, outboundMsgHolder);
- http2ClientChannel.getDataEventListeners()
- .forEach(dataEventListener -> dataEventListener.onStreamInit(ctx, id));
- return id;
- }
-
- private synchronized int getNextStreamId() throws Http2Exception {
- int nextStreamId = connection.local().incrementAndGetNextStreamId();
- connection.local().createStream(nextStreamId, false);
- LOG.debug("Stream created streamId: {}", nextStreamId);
- return nextStreamId;
- }
} | Move common functionality to StateUtils | wso2_transport-http | train |
aa25081296e19b994017cc1ca062eb9e1fbf9b86 | diff --git a/src/org/opencms/db/CmsDriverManager.java b/src/org/opencms/db/CmsDriverManager.java
index <HASH>..<HASH> 100644
--- a/src/org/opencms/db/CmsDriverManager.java
+++ b/src/org/opencms/db/CmsDriverManager.java
@@ -1,7 +1,7 @@
/*
* File : $Source: /alkacon/cvs/opencms/src/org/opencms/db/CmsDriverManager.java,v $
- * Date : $Date: 2007/08/10 12:58:50 $
- * Version: $Revision: 1.588 $
+ * Date : $Date: 2007/08/10 15:32:03 $
+ * Version: $Revision: 1.589 $
*
* This library is part of OpenCms -
* the Open Source Content Mananagement System
@@ -4736,18 +4736,18 @@ public final class CmsDriverManager implements I_CmsEventListener {
/**
* Moves a resource.<p>
*
- * You must ensure that the destination path is an absolute, valid and
- * existing VFS path. Relative paths from the source are currently not supported.<p>
+ * You must ensure that the parent of the destination path is an absolute, valid and
+ * existing VFS path. Relative paths from the source are not supported.<p>
*
* The moved resource will always be locked to the current user
* after the move operation.<p>
*
- * In case the target resource already exists, it is overwritten with the
- * source resource.<p>
+ * In case the target resource already exists, it will be overwritten with the
+ * source resource if possible.<p>
*
* @param dbc the current database context
- * @param source the resource to copy
- * @param destination the name of the copy destination with complete path
+ * @param source the resource to move
+ * @param destination the name of the move destination with complete path
* @param internal if set nothing more than the path is modified
*
* @throws CmsException if something goes wrong
@@ -4777,7 +4777,13 @@ public final class CmsDriverManager implements I_CmsEventListener {
m_lockManager.moveResource(source.getRootPath(), destination);
if (!internal) {
- source.setState(source.getState().isNew() ? CmsResource.STATE_NEW : CmsResource.STATE_CHANGED);
+ CmsResourceState newState = CmsResource.STATE_CHANGED;
+ if (source.getState().isNew()) {
+ newState = CmsResource.STATE_NEW;
+ } else if (source.getState().isDeleted()) {
+ newState = CmsResource.STATE_DELETED;
+ }
+ source.setState(newState);
// safe since this operation always uses the ids instead of the resource path
m_vfsDriver.writeResourceState(
dbc, | fixed issue while moving a folder with deleted subresources | alkacon_opencms-core | train |
ac410a3ef1f0b86fed0d25d5d127d189ad9e0d46 | diff --git a/ratpack-zipkin/src/main/java/ratpack/zipkin/ServerTracingModule.java b/ratpack-zipkin/src/main/java/ratpack/zipkin/ServerTracingModule.java
index <HASH>..<HASH> 100644
--- a/ratpack-zipkin/src/main/java/ratpack/zipkin/ServerTracingModule.java
+++ b/ratpack-zipkin/src/main/java/ratpack/zipkin/ServerTracingModule.java
@@ -24,7 +24,6 @@ import com.google.inject.multibindings.Multibinder;
import ratpack.guice.ConfigurableModule;
import ratpack.handling.HandlerDecorator;
import ratpack.zipkin.internal.DefaultServerTracingHandler;
-import ratpack.zipkin.internal.RatpackServerClientLocalSpanState;
import ratpack.zipkin.internal.ServerRequestAdapterFactory;
import ratpack.zipkin.internal.ServerResponseAdapterFactory; | Oops. Removes reference to uncommited file | hyleung_ratpack-zipkin | train |
3879a875ccc79aee5f0effca4e030aece61c3a41 | diff --git a/plugins/shared/cmd/launcher/command/device.go b/plugins/shared/cmd/launcher/command/device.go
index <HASH>..<HASH> 100644
--- a/plugins/shared/cmd/launcher/command/device.go
+++ b/plugins/shared/cmd/launcher/command/device.go
@@ -260,14 +260,27 @@ func (c *Device) startRepl() error {
c.Ui.Output("> Availabile commands are: exit(), fingerprint(), stop_fingerprint(), stats(), stop_stats(), reserve(id1, id2, ...)")
var fingerprintCtx, statsCtx context.Context
var fingerprintCancel, statsCancel context.CancelFunc
+
for {
in, err := c.Ui.Ask("> ")
if err != nil {
+ if fingerprintCancel != nil {
+ fingerprintCancel()
+ }
+ if statsCancel != nil {
+ statsCancel()
+ }
return err
}
switch {
case in == "exit()":
+ if fingerprintCancel != nil {
+ fingerprintCancel()
+ }
+ if statsCancel != nil {
+ statsCancel()
+ }
return nil
case in == "fingerprint()":
if fingerprintCtx != nil {
@@ -301,8 +314,6 @@ func (c *Device) startRepl() error {
c.Ui.Error(fmt.Sprintf("> Unknown command %q", in))
}
}
-
- return nil
}
func (c *Device) replOutput(ctx context.Context, startFingerprint, startStats <-chan context.Context, reserve <-chan []string) { | Fix device launcher ctx cleanup | hashicorp_nomad | train |
acf87811003b8f21f442142dc5c44e86dec93402 | diff --git a/structr-ui/src/main/java/org/structr/websocket/StructrWebSocket.java b/structr-ui/src/main/java/org/structr/websocket/StructrWebSocket.java
index <HASH>..<HASH> 100644
--- a/structr-ui/src/main/java/org/structr/websocket/StructrWebSocket.java
+++ b/structr-ui/src/main/java/org/structr/websocket/StructrWebSocket.java
@@ -53,42 +53,35 @@ import org.structr.websocket.command.PingCommand;
import org.structr.websocket.message.MessageBuilder;
import org.structr.websocket.message.WebSocketMessage;
-//~--- classes ----------------------------------------------------------------
/**
- *
- *
- *
*/
public class StructrWebSocket implements WebSocketListener {
private static final Logger logger = LoggerFactory.getLogger(StructrWebSocket.class.getName());
private static final Map<String, Class> commandSet = new LinkedHashMap<>();
- //~--- fields ---------------------------------------------------------
- private Session session = null;
- private Gson gson = null;
- private HttpServletRequest request = null;
- private SecurityContext securityContext = null;
- private WebsocketController syncController = null;
+ private Session session = null;
+ private Gson gson = null;
+ private HttpServletRequest request = null;
+ private SecurityContext securityContext = null;
+ private WebsocketController syncController = null;
private Map<String, FileUploadHandler> uploads = null;
- private Authenticator authenticator = null;
- private String pagePath = null;
- private Console console = null;
- private Boolean timedOut = false;
+ private Authenticator authenticator = null;
+ private String pagePath = null;
+ private Console console = null;
+ private Boolean timedOut = false;
- //~--- constructors ---------------------------------------------------
public StructrWebSocket() {}
public StructrWebSocket(final WebsocketController syncController, final Gson gson, final Authenticator authenticator) {
- this.uploads = new LinkedHashMap<>();
+ this.uploads = new LinkedHashMap<>();
this.syncController = syncController;
- this.gson = gson;
- this.authenticator = authenticator;
+ this.gson = gson;
+ this.authenticator = authenticator;
}
- //~--- methods --------------------------------------------------------
public void setRequest(final HttpServletRequest request) {
this.request = request;
}
@@ -560,7 +553,6 @@ public class StructrWebSocket implements WebSocketListener {
return null;
}
- //~--- set methods ----------------------------------------------------
public void setAuthenticated(final String sessionId, final Principal user) {
securityContext = SecurityContext.getInstance(user, AccessMode.Backend);
diff --git a/structr-ui/src/main/java/org/structr/websocket/command/SearchCommand.java b/structr-ui/src/main/java/org/structr/websocket/command/SearchCommand.java
index <HASH>..<HASH> 100644
--- a/structr-ui/src/main/java/org/structr/websocket/command/SearchCommand.java
+++ b/structr-ui/src/main/java/org/structr/websocket/command/SearchCommand.java
@@ -20,6 +20,7 @@ package org.structr.websocket.command;
import com.google.gson.Gson;
import com.google.gson.JsonSyntaxException;
+import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang3.StringUtils;
@@ -103,7 +104,7 @@ public class SearchCommand extends AbstractCommand {
}
- final List<GraphObject> result = Iterables.toList(StructrApp.getInstance(securityContext).query(cypherQuery, obj));
+ final List<GraphObject> result = flatten(Iterables.toList(StructrApp.getInstance(securityContext).query(cypherQuery, obj)));
int resultCountBeforePaging = result.size();
webSocketData.setRawResultCount(resultCountBeforePaging);
@@ -184,7 +185,6 @@ public class SearchCommand extends AbstractCommand {
}
- //~--- get methods ----------------------------------------------------
@Override
public String getCommand() {
@@ -192,4 +192,32 @@ public class SearchCommand extends AbstractCommand {
}
+ // ----- private methods -----
+ private List<GraphObject> flatten(final List src) {
+
+ final List<GraphObject> list = new LinkedList<>();
+
+ flatten(list, src);
+
+ return list;
+ }
+
+ private void flatten(final List<GraphObject> list, final Object o) {
+
+ if (o instanceof Iterable) {
+
+ for (final Object obj : (Iterable)o) {
+
+ flatten(list, obj);
+ }
+
+ } else if (o instanceof GraphObject) {
+
+ list.add((GraphObject)o);
+
+ } else {
+
+ logger.warn("Unable to handle object of type {}, ignoring.", o.getClass().getName());
+ }
+ }
} | Fixes graph browser problem caused by modified cypher query result structure. | structr_structr | train |
e142cfeaf5518249c967daf807369c72e67acfdd | diff --git a/lib/rufus/scheduler/cronline.rb b/lib/rufus/scheduler/cronline.rb
index <HASH>..<HASH> 100644
--- a/lib/rufus/scheduler/cronline.rb
+++ b/lib/rufus/scheduler/cronline.rb
@@ -32,7 +32,11 @@ class Rufus::Scheduler
# (man 5 crontab) file line.
#
class CronLine
- NEXT_TIME_YEAR_LIMIT = Date.today.year + 10
+
+ # The max number of years in the future or the past before giving up
+ # searching for #next_time or #previous_time respectively
+ #
+ NEXT_TIME_MAX_YEARS = 14
# The string used for creating this cronline instance.
#
@@ -138,14 +142,16 @@ class Rufus::Scheduler
nt = nil
zt = ZoTime.new(from.to_i + 1, @timezone)
+ maxy = from.year + NEXT_TIME_MAX_YEARS
loop do
nt = zt.dup
- fail ArgumentError.new(
- "failed to calculate next time for '#{original}'"
- ) if nt.year > NEXT_TIME_YEAR_LIMIT
+ fail RangeError.new(
+ "failed to reach occurrence within " +
+ "#{NEXT_TIME_MAX_YEARS} years for '#{original}'"
+ ) if nt.year > maxy
unless date_match?(nt)
zt.add((24 - nt.hour) * 3600 - nt.min * 60 - nt.sec)
@@ -177,6 +183,7 @@ class Rufus::Scheduler
pt = nil
zt = ZoTime.new(from.to_i - 1, @timezone)
+ miny = from.year - NEXT_TIME_MAX_YEARS
loop do
@@ -185,6 +192,10 @@ class Rufus::Scheduler
fail ArgumentError.new(
"failed to calculate previous time for '#{original}'"
) if pt.year < 1965
+ fail RangeError.new(
+ "failed to reach occurrence within " +
+ "#{NEXT_TIME_MAX_YEARS} years for '#{original}'"
+ ) if pt.year < miny
unless date_match?(pt)
zt.substract(pt.hour * 3600 + pt.min * 60 + pt.sec + 1)
@@ -499,11 +510,15 @@ class Rufus::Scheduler
return false unless sub_match?(zt, :day, @days)
return false unless sub_match?(zt, :month, @months)
- if @weekdays && @monthdays
- return true if sub_match?(zt, :wday, @weekdays) || sub_match?(zt, :monthdays, @monthdays)
- end
+
+ return true if (
+ (@weekdays && @monthdays) &&
+ (sub_match?(zt, :wday, @weekdays) ||
+ sub_match?(zt, :monthdays, @monthdays)))
+
return false unless sub_match?(zt, :wday, @weekdays)
return false unless sub_match?(zt, :monthdays, @monthdays)
+
true
end
end
diff --git a/spec/cronline_spec.rb b/spec/cronline_spec.rb
index <HASH>..<HASH> 100644
--- a/spec/cronline_spec.rb
+++ b/spec/cronline_spec.rb
@@ -519,11 +519,12 @@ describe Rufus::Scheduler::CronLine do
)
end
- it 'is not stuck in an infinite loop when the calculation fails' do
+ it "raises a RangeError if it doesn't reach the next time within x years" do
cronline = cl('0 0 * * mon#2,tue')
- allow(cronline).to receive(:date_match?).and_return(false)
- expect { cronline.next_time }.to raise_error(ArgumentError)
+ class << cronline; def date_match?(x=nil); false; end; end
+
+ expect { cronline.next_time }.to raise_error(RangeError)
end
end
@@ -659,11 +660,12 @@ describe Rufus::Scheduler::CronLine do
zlo(1999, 12, 31, 20, 59, 00))
end
- it 'is not stuck in an infinite loop when the calculation fails' do
+ it "raises a RangeError if it doesn't reach the previous time within x years" do
cronline = cl('0 0 * * mon#2,tue')
- allow(cronline).to receive(:date_match?).and_return(false)
- expect { cronline.previous_time }.to raise_error(ArgumentError)
+ class << cronline; def date_match?(x=nil); false; end; end
+
+ expect { cronline.previous_time }.to raise_error(RangeError)
end
end | Polish gh-<I> | jmettraux_rufus-scheduler | train |
5a114cc8bda3cee64800e85bcd925d564233bd01 | diff --git a/src/Engine.php b/src/Engine.php
index <HASH>..<HASH> 100644
--- a/src/Engine.php
+++ b/src/Engine.php
@@ -8,23 +8,27 @@ namespace Amostajo\LightweightMVC;
* @author Alejandro Mostajo
* @license MIT
* @package Amostajo\LightweightMVC
+ * @version 1.0.2
*/
class Engine
{
/**
* Path to where controllers are.
+ * @since 1.0.0
* @var string
*/
protected $controllers_path;
/**
* Plugin namespace.
+ * @since 1.0.0
* @var string
*/
protected $namespace;
/**
* View class object.
+ * @since 1.0.0
* @var string
*/
protected $view;
@@ -32,6 +36,7 @@ class Engine
/**
* Default engine constructor.
+ * @since 1.0.0
*
* @param string $controllers_path
* @param string $namespace
@@ -46,6 +51,7 @@ class Engine
/**
* Calls controller and function.
* Echos return.
+ * @since 1.0.0
*
* @param string $controller_name Controller name and method. i.e. DealController@show
*/
@@ -59,9 +65,25 @@ class Engine
}
/**
+ * Calls controller and function. With arguments are passed by.
+ * Echos return.
+ * @since 1.0.2
+ *
+ * @param string $controller_name Controller name and method. i.e. DealController@show
+ * @param array $args Function args passed by. Arguments ready for call_user_func_array call.
+ */
+ public function call_args( $controller_name, $args )
+ {
+ echo $this->exec( $controller_name, $args );
+ }
+
+ /**
* Returns controller results.
+ * @since 1.0.0
*
* @param string $controller_name Controller name and method. i.e. DealController@show
+ *
+ * @return mixed
*/
public function action( $controller_name )
{
@@ -73,8 +95,23 @@ class Engine
}
/**
+ * Returns controller results. With arguments are passed by.
+ * @since 1.0.2
+ *
+ * @param string $controller_name Controller name and method. i.e. DealController@show
+ * @param array $args Function args passed by. Arguments ready for call_user_func_array call.
+ *
+ * @return mixed
+ */
+ public function action_args( $controller_name, $args )
+ {
+ return $this->exec( $controller_name, $args );
+ }
+
+ /**
* Executes controller.
* Returns result.
+ * @since 1.0.0
*
* @param string $controller_name Controller name and method. i.e. DealController@show
* @param array $args Controller parameters.
@@ -106,6 +143,7 @@ class Engine
/**
* Getter function.
+ * @since 1.0.1
*
* @param string $property
* | Optional controller calls.
Added additional controller calls for when function args are already
processed. | amostajo_lightweight-mvc | train |
314e285c74d75eca3510bc449c4312d06173af72 | diff --git a/a10_neutronclient/a10_certificate.py b/a10_neutronclient/a10_certificate.py
index <HASH>..<HASH> 100644
--- a/a10_neutronclient/a10_certificate.py
+++ b/a10_neutronclient/a10_certificate.py
@@ -36,6 +36,20 @@ class CertificateCreate(client_extension.Create, CertificateExtension):
list_columns = ['id', 'name', 'description', 'cert_data', 'key_data', 'intermediate_data', 'password']
def add_known_arguments(self, parser):
+ parser.add_argument(
+ '--cert-file',
+ action=client_extension.ReadFileAction,
+ dest='cert_data')
+
+ parser.add_argument(
+ '--key-file',
+ action=client_extension.ReadFileAction,
+ dest='key_data')
+
+ parser.add_argument(
+ '--intermediate-file',
+ action=client_extension.ReadFileAction,
+ dest='intermediate_data')
self._add_known_arguments(parser, ['name'])
diff --git a/a10_neutronclient/client_extension.py b/a10_neutronclient/client_extension.py
index <HASH>..<HASH> 100644
--- a/a10_neutronclient/client_extension.py
+++ b/a10_neutronclient/client_extension.py
@@ -13,6 +13,8 @@
# under the License.
+import argparse
+
from neutronclient.common import extension
from neutronclient.neutron import v2_0 as neutronV20
@@ -124,3 +126,10 @@ class Delete(extension.ClientExtensionDelete):
class Show(extension.ClientExtensionShow):
pass
+
+
+class ReadFileAction(argparse.Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ with open(values, 'r') as file:
+ data = file.read()
+ setattr(namespace, self.dest, data) | Woot, adding cert data from files works | a10networks_a10-neutronclient | train |
46f07d71a7b7fa887459f1df054c41b5a8dfe480 | diff --git a/lib/sbsm/admin_server.rb b/lib/sbsm/admin_server.rb
index <HASH>..<HASH> 100644
--- a/lib/sbsm/admin_server.rb
+++ b/lib/sbsm/admin_server.rb
@@ -37,8 +37,8 @@ require 'sbsm/session_store'
module SBSM
# AdminClass must be tied to an Rack app
class AdminServer
- def initialize(app:)
- @session = SBSM::SessionStore.new(app: app)
+ def initialize(app:, multi_threaded: false)
+ @session = SBSM::SessionStore.new(app: app, multi_threaded: multi_threaded)
@admin_threads = ThreadGroup.new
end
def _admin(src, result, priority=0) | Admin server is now multi_threaded by default | zdavatz_sbsm | train |
087a9779035f082b17ba281e2b74aef5310a8298 | diff --git a/conf/webpack.common.js b/conf/webpack.common.js
index <HASH>..<HASH> 100644
--- a/conf/webpack.common.js
+++ b/conf/webpack.common.js
@@ -8,7 +8,7 @@ module.exports = {
path: path.join(process.cwd(), 'dist'),
filename: '[name].[hash].js',
sourceMapFilename: '[name].map',
- library: '@makerdao/makerdao-integration-poc',
+ library: '@makerdao/makerdao-exchange-integration',
libraryTarget: 'umd',
},
resolve: { | Update webpack config with new name | makerdao_dai.js | train |
91f60d916a574eae067e8dac80938654d6415d39 | diff --git a/contrib/externs/chrome_extensions.js b/contrib/externs/chrome_extensions.js
index <HASH>..<HASH> 100644
--- a/contrib/externs/chrome_extensions.js
+++ b/contrib/externs/chrome_extensions.js
@@ -2956,7 +2956,7 @@ chrome.pushMessaging.PushMessageEvent.prototype.hasListeners = function() {};
/**
- * @see http://code.google.com/chrome/extensions/extension.html#type-Port
+ * @see https://developer.chrome.com/apps/runtime.html#type-Port
* @constructor
*/
function Port() {} | Fix broken link to the 'Port' type.
-------------
Created by MOE: <URL> | google_closure-compiler | train |
c940ccc72bdf2fded5f0e98213a06fa1ca337996 | diff --git a/probes/src/main/java/com/hazelcast/simulator/probes/impl/ResultImpl.java b/probes/src/main/java/com/hazelcast/simulator/probes/impl/ResultImpl.java
index <HASH>..<HASH> 100644
--- a/probes/src/main/java/com/hazelcast/simulator/probes/impl/ResultImpl.java
+++ b/probes/src/main/java/com/hazelcast/simulator/probes/impl/ResultImpl.java
@@ -97,12 +97,12 @@ public class ResultImpl implements Result {
PrintStream stream = null;
try {
outputStream = new ByteArrayOutputStream();
- stream = new PrintStream(outputStream);
+ stream = new PrintStream(outputStream, true, "UTF-8");
histogram.outputPercentileDistribution(stream, 1.0);
- stream.flush();
-
- return new String(outputStream.toByteArray());
+ return new String(outputStream.toByteArray(), "UTF-8");
+ } catch (Exception e) {
+ return null;
} finally {
closeQuietly(stream);
closeQuietly(outputStream);
diff --git a/probes/src/test/java/com/hazelcast/simulator/probes/impl/ResultImplTest.java b/probes/src/test/java/com/hazelcast/simulator/probes/impl/ResultImplTest.java
index <HASH>..<HASH> 100644
--- a/probes/src/test/java/com/hazelcast/simulator/probes/impl/ResultImplTest.java
+++ b/probes/src/test/java/com/hazelcast/simulator/probes/impl/ResultImplTest.java
@@ -4,12 +4,18 @@ import com.hazelcast.simulator.probes.Result;
import org.HdrHistogram.Histogram;
import org.junit.Test;
+import java.io.PrintStream;
+
import static com.hazelcast.simulator.probes.ProbeTestUtils.createProbeResult;
import static com.hazelcast.simulator.probes.ProbeTestUtils.createRandomHistogram;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyDouble;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
public class ResultImplTest {
@@ -61,4 +67,16 @@ public class ResultImplTest {
Result result = createProbeResult(0);
assertNull(result.toHumanString("probe1"));
}
+
+ @Test
+ public void testToHumanString_withException() {
+ Histogram histogram = mock(Histogram.class);
+ doThrow(new RuntimeException("expected exception"))
+ .when(histogram).outputPercentileDistribution(any(PrintStream.class), anyDouble());
+
+ Result result = createProbeResult(0);
+ result.addHistogram("probe1", histogram);
+
+ assertNull(result.toHumanString("probe1"));
+ }
} | Fixed "reliance on default encoding" issue from SonarQube. | hazelcast_hazelcast-simulator | train |
e3238b0446ae800ef0b34a1c8dfc9925da14a6ab | diff --git a/great_expectations/data_asset/base.py b/great_expectations/data_asset/base.py
index <HASH>..<HASH> 100644
--- a/great_expectations/data_asset/base.py
+++ b/great_expectations/data_asset/base.py
@@ -45,8 +45,9 @@ class DataAsset(object):
batch_kwargs = kwargs.pop("batch_kwargs", None)
super(DataAsset, self).__init__(*args, **kwargs)
self._interactive_evaluation = interactive_evaluation
- self._initialize_expectations(config=expectations_config, data_asset_name=data_asset_name, batch_kwargs=batch_kwargs)
+ self._initialize_expectations(config=expectations_config, data_asset_name=data_asset_name)
self._data_context = data_context
+ self._batch_kwargs = batch_kwargs
if autoinspect_func is not None:
autoinspect_func(self)
@@ -219,7 +220,7 @@ class DataAsset(object):
return outer_wrapper
- def _initialize_expectations(self, config=None, data_asset_name=None, batch_kwargs=None):
+ def _initialize_expectations(self, config=None, data_asset_name=None):
"""Instantiates `_expectations_config` as empty by default or with a specified expectation `config`.
In addition, this always sets the `default_expectation_args` to:
`include_config`: False,
@@ -266,9 +267,6 @@ class DataAsset(object):
"expectations": []
})
- if batch_kwargs is not None:
- self._expectations_config["meta"].update({"batch_kwargs": batch_kwargs})
-
# Pandas incorrectly interprets this as an attempt to create a column and throws up a warning. Suppress it
# since we are subclassing.
with warnings.catch_warnings():
@@ -552,6 +550,9 @@ class DataAsset(object):
else:
return expectation
+ def get_batch_kwargs(self):
+ return self._batch_kwargs
+
def discard_failing_expectations(self):
res = self.validate(only_return_failures=True).get('results')
if any(res):
@@ -973,6 +974,9 @@ If you wish to change this behavior, please set discard_failed_expectations, dis
else:
result["meta"].update({"run_id": str(uuid.uuid4())})
+ if self._batch_kwargs is not None:
+ result["meta"].update({"batch_kwargs": self._batch_kwargs})
+
if save_dataset_on_failure is not None and result["success"] == False:
##### WARNING: HACKED FOR DEMO #######
bucket = save_dataset_on_failure.bucket_name
diff --git a/great_expectations/dataset/dataset.py b/great_expectations/dataset/dataset.py
index <HASH>..<HASH> 100644
--- a/great_expectations/dataset/dataset.py
+++ b/great_expectations/dataset/dataset.py
@@ -202,10 +202,10 @@ class Dataset(MetaDataset):
"""Returns: int"""
raise NotImplementedError
- def _initialize_expectations(self, config=None, data_asset_name=None, batch_kwargs=None):
+ def _initialize_expectations(self, config=None, data_asset_name=None):
"""Override data_asset_type with "Dataset"
"""
- super(Dataset, self)._initialize_expectations(config=config, data_asset_name=data_asset_name, batch_kwargs=batch_kwargs)
+ super(Dataset, self)._initialize_expectations(config=config, data_asset_name=data_asset_name)
self._expectations_config["data_asset_type"] = "Dataset"
@classmethod | Move batch_kwargs storage | great-expectations_great_expectations | train |
45db555a6abb31b2644f82bed8041117a4e05fa2 | diff --git a/library/src/main/java/trikita/anvil/SimpleAttrNode.java b/library/src/main/java/trikita/anvil/SimpleAttrNode.java
index <HASH>..<HASH> 100644
--- a/library/src/main/java/trikita/anvil/SimpleAttrNode.java
+++ b/library/src/main/java/trikita/anvil/SimpleAttrNode.java
@@ -18,7 +18,7 @@ public abstract class SimpleAttrNode<T> implements Nodes.AttrNode {
@Override
public boolean equals(Object obj) {
- return getClass() == obj.getClass() &&
+ return obj != null && getClass() == obj.getClass() &&
(this.value == null
&& ((SimpleAttrNode) obj).value == null
|| this.value.equals(((SimpleAttrNode) obj).value)); | checking for null in SimpleAttrNode, fixes crashes in some very stupid cases | zserge_anvil | train |
01f43632ca1410e8195835382468d8404c6924e3 | diff --git a/graphql/subscription/map_async_iterator.py b/graphql/subscription/map_async_iterator.py
index <HASH>..<HASH> 100644
--- a/graphql/subscription/map_async_iterator.py
+++ b/graphql/subscription/map_async_iterator.py
@@ -6,6 +6,7 @@ from typing import AsyncIterable, Callable
__all__ = ['MapAsyncIterator']
+# noinspection PyAttributeOutsideInit
class MapAsyncIterator:
"""Map an AsyncIterable over a callback function.
@@ -23,75 +24,72 @@ class MapAsyncIterator:
self.reject_callback = reject_callback
self._close_event = Event()
- @property
- def closed(self) -> bool:
- return self._close_event.is_set()
-
- @closed.setter
- def closed(self, value: bool) -> None:
- if value:
- self._close_event.set()
- else:
- self._close_event.clear()
-
def __aiter__(self):
return self
async def __anext__(self):
- if self.closed:
+ if self.is_closed:
if not isasyncgen(self.iterator):
raise StopAsyncIteration
- result = await self.iterator.__anext__()
- return self.callback(result)
+ value = await self.iterator.__anext__()
+ result = self.callback(value)
- _close = ensure_future(self._close_event.wait())
- _next = ensure_future(self.iterator.__anext__())
- done, pending = await wait(
- [_close, _next],
- return_when=FIRST_COMPLETED,
- )
+ else:
+ aclose = ensure_future(self._close_event.wait())
+ anext = ensure_future(self.iterator.__anext__())
- for task in pending:
- task.cancel()
+ done, pending = await wait(
+ [aclose, anext], return_when=FIRST_COMPLETED)
+ for task in pending:
+ task.cancel()
- if _close.done():
- raise StopAsyncIteration
+ if aclose.done():
+ raise StopAsyncIteration
- if _next.done():
- error = _next.exception()
+ error = anext.exception()
if error:
if not self.reject_callback or isinstance(error, (
StopAsyncIteration, GeneratorExit)):
raise error
result = self.reject_callback(error)
else:
- result = self.callback(_next.result())
+ value = anext.result()
+ result = self.callback(value)
- return (await result) if isawaitable(result) else result
+ return await result if isawaitable(result) else result
async def athrow(self, type_, value=None, traceback=None):
- if self.closed:
- return
- athrow = getattr(self.iterator, 'athrow', None)
- if athrow:
- await athrow(type_, value, traceback)
- else:
- self.closed = True
- if value is None:
- if traceback is None:
- raise type_
- value = type_()
- if traceback is not None:
- value = value.with_traceback(traceback)
- raise value
+ if not self.is_closed:
+ athrow = getattr(self.iterator, 'athrow', None)
+ if athrow:
+ await athrow(type_, value, traceback)
+ else:
+ self.is_closed = True
+ if value is None:
+ if traceback is None:
+ raise type_
+ value = type_()
+ if traceback is not None:
+ value = value.with_traceback(traceback)
+ raise value
async def aclose(self):
- if self.closed:
- return
- aclose = getattr(self.iterator, 'aclose', None)
- if aclose:
- try:
- await aclose()
- except RuntimeError:
- pass
- self.closed = True
+ if not self.is_closed:
+ aclose = getattr(self.iterator, 'aclose', None)
+ if aclose:
+ try:
+ await aclose()
+ except RuntimeError:
+ pass
+ self.is_closed = True
+
+ @property
+ def is_closed(self) -> bool:
+ return self._close_event.is_set()
+
+ @is_closed.setter
+ def is_closed(self, value: bool) -> None:
+ if value:
+ self._close_event.set()
+ else:
+ self._close_event.clear()
diff --git a/tests/subscription/test_map_async_iterator.py b/tests/subscription/test_map_async_iterator.py
index <HASH>..<HASH> 100644
--- a/tests/subscription/test_map_async_iterator.py
+++ b/tests/subscription/test_map_async_iterator.py
@@ -257,18 +257,22 @@ def describe_map_async_iterator():
yield 2
yield 3
- doubles = MapAsyncIterator(source(), lambda x: x * 2)
+ singles = source()
+ doubles = MapAsyncIterator(singles, lambda x: x * 2)
result = await anext(doubles)
assert result == 2
- # Block at event.wait()
- fut = ensure_future(anext(doubles))
- await sleep(.01)
- assert not fut.done()
+ # Make sure it is blocked
+ doubles_future = ensure_future(anext(doubles))
+ await sleep(.05)
+ assert not doubles_future.done()
- # Trigger cancellation and watch StopAsyncIteration propogate
+ # Unblock and watch StopAsyncIteration propagate
await doubles.aclose()
- await sleep(.01)
- assert fut.done()
- assert isinstance(fut.exception(), StopAsyncIteration)
+ await sleep(.05)
+ assert doubles_future.done()
+ assert isinstance(doubles_future.exception(), StopAsyncIteration)
+
+ with raises(StopAsyncIteration):
+ await anext(singles) | Minor changes in MapAsyncIterator and its test | graphql-python_graphql-core-next | train |
c80dd86adc27835b9d36b700d4aebd58501612ec | diff --git a/ncclient/operations/rpc.py b/ncclient/operations/rpc.py
index <HASH>..<HASH> 100644
--- a/ncclient/operations/rpc.py
+++ b/ncclient/operations/rpc.py
@@ -208,26 +208,24 @@ class RPCReplyListener(SessionListener): # internal use
if self._device_handler.perform_qualify_check():
if tag != qualify("rpc-reply"):
return
+ if "message-id" not in attrs:
+ # required attribute so raise OperationError
+ raise OperationError("Could not find 'message-id' attribute in <rpc-reply>")
for key in attrs: # in the <rpc-reply> attributes
- message_id = attrs.get("message-id")
- if message_id is None:
- # required attribute so raise OperationError
- raise OperationError("Could not find 'message-id' attribute in <rpc-reply>")
- else:
- if key == "message-id": # if we found msgid attr
- id = attrs[key] # get the msgid
- with self._lock:
- try:
- rpc = self._id2rpc[id] # the corresponding rpc
- logger.debug("Delivering to %r" % rpc)
- rpc.deliver_reply(raw)
- except KeyError:
- raise OperationError("Unknown 'message-id': %s" % id)
- # no catching other exceptions, fail loudly if must
- else:
- # if no error delivering, can del the reference to the RPC
- del self._id2rpc[id]
- break
+ if key == "message-id": # if we found msgid attr
+ id = attrs[key] # get the msgid
+ with self._lock:
+ try:
+ rpc = self._id2rpc[id] # the corresponding rpc
+ logger.debug("Delivering to %r" % rpc)
+ rpc.deliver_reply(raw)
+ except KeyError:
+ raise OperationError("Unknown 'message-id': %s" % id)
+ # no catching other exceptions, fail loudly if must
+ else:
+ # if no error delivering, can del the reference to the RPC
+ del self._id2rpc[id]
+ break
def errback(self, err):
try: | Moved check out of for loop and changed to simpler if | ncclient_ncclient | train |
d83e93475e70797311c4072b7ccb23293379469b | diff --git a/src/agile.js b/src/agile.js
index <HASH>..<HASH> 100644
--- a/src/agile.js
+++ b/src/agile.js
@@ -23,7 +23,8 @@ var AGILE_METHODS = {
OBJECT: [
{ name: 'keys', action: objKeys },
{ name: 'toArray', action: toArray },
- { name: 'extend', action: extend }],
+ { name: 'extend', action: extend },
+ { name: 'forEach', action: forEach }],
STRING: [
{ name: 'startsWith', action: startsWith },
{ name: 'endsWith', action: endsWith },
@@ -65,7 +66,8 @@ var AGILE_METHODS = {
{ name: 'sum', action: sum },
{ name: 'pluck', action: map },
{ name: 'pick', action: filter },
- { name:'some', action: contains }]
+ { name: 'some', action: contains },
+ { name: 'forEach', action: forEach }] // DRY, remove common collection's function to owned array
};
/**
@@ -219,7 +221,6 @@ agile.noop = noop;
agile.uppercase = uppercase;
agile.lowercase = lowercase;
agile.toJson = toJson;
-agile.forEach = forEach;
//@static parse method
agile.parse = $parse;
diff --git a/test/spec/agile/agile.js b/test/spec/agile/agile.js
index <HASH>..<HASH> 100644
--- a/test/spec/agile/agile.js
+++ b/test/spec/agile/agile.js
@@ -211,6 +211,14 @@ describe('agile.js', function() {
.extend({b:2}, {c:3})
.keys()
.value()).toEqual(['a', 'b', 'c']);
+
+ expect(_({a:1})
+ .forEach(function(){}, this)
+ .value()).toEqual({a:1});
+
+ expect(_([1,2,3])
+ .forEach(function(){}, this)
+ .value()).toEqual([1,2,3]);
});
it('should return a value and not wrapper object', function() { | fix(agile): Array/ObjectWrappers add forEach as a chaining func | a8m_agile | train |
b19d57e1c35fceeace1a53fd87431fd100bf665d | diff --git a/clientv3/cluster.go b/clientv3/cluster.go
index <HASH>..<HASH> 100644
--- a/clientv3/cluster.go
+++ b/clientv3/cluster.go
@@ -31,10 +31,10 @@ type (
)
type Cluster interface {
- // List lists the current cluster membership.
+ // MemberList lists the current cluster membership.
MemberList(ctx context.Context) (*MemberListResponse, error)
- // Leader returns the current leader member.
+ // MemberLeader returns the current leader member.
MemberLeader(ctx context.Context) (*Member, error)
// MemberAdd adds a new member into the cluster. | clientv3: fix godoc for member apis | etcd-io_etcd | train |
b90cbbff8cd31a4a49414bd203f24e742ee1a67b | diff --git a/coursera/coursera_dl.py b/coursera/coursera_dl.py
index <HASH>..<HASH> 100755
--- a/coursera/coursera_dl.py
+++ b/coursera/coursera_dl.py
@@ -466,17 +466,17 @@ def download_file_nowget(url, fn, cookies_file):
bw = BandwidthCalc()
chunk_sz = 1048576
bytesread = 0
- f = open(fn, 'wb')
- while True:
- data = urlfile.read(chunk_sz)
- if not data:
- print '.'
- break
- bw.received(len(data))
- f.write(data)
- bytesread += len(data)
- print '\r%d bytes read%s' % (bytesread, bw),
- sys.stdout.flush()
+ with open(fn, 'wb') as f:
+ while True:
+ data = urlfile.read(chunk_sz)
+ if not data:
+ print '.'
+ break
+ bw.received(len(data))
+ f.write(data)
+ bytesread += len(data)
+ print '\r%d bytes read%s' % (bytesread, bw),
+ sys.stdout.flush()
urlfile.close() | Add another avoidance point of resource leaks with open files. | coursera-dl_coursera-dl | train |
b4fc68d72db92ee75d190c4dbc0306bb634cba63 | diff --git a/safe/gui/tools/minimum_needs/needs_calculator_dialog.py b/safe/gui/tools/minimum_needs/needs_calculator_dialog.py
index <HASH>..<HASH> 100644
--- a/safe/gui/tools/minimum_needs/needs_calculator_dialog.py
+++ b/safe/gui/tools/minimum_needs/needs_calculator_dialog.py
@@ -119,7 +119,7 @@ class NeedsCalculatorDialog(QtGui.QDialog, FORM_CLASS):
displaced = 0
else:
if type(population) is basestring:
- population = str(population).deplace(',', '')
+ population = str(population).replace(',', '')
try:
displaced = int(population) | Fix <I> - fix typo introduced in previous commit | inasafe_inasafe | train |
b3c46b79bf07cf6dceba4a73d428a9adee7d0081 | diff --git a/libexec/sendmailservice.py b/libexec/sendmailservice.py
index <HASH>..<HASH> 100755
--- a/libexec/sendmailservice.py
+++ b/libexec/sendmailservice.py
@@ -73,7 +73,13 @@ msg['To'] = args.to
#
# According to RFC 2046, the last part of a multipart message, in this
# case the HTML message, is best and preferred.
+#
+# :fixme: need to encode the body if not ascii, see
+# http://mg.pov.lt/blog/unicode-emails-in-python.html for a nice
+# solution.
+#
msg.attach(MIMEText(TEXT_template % vars(args), 'plain'))
+# :fixme: need to html-escape all values and encode the body
msg.attach(MIMEText(HTML_template % vars(args), 'html'))
# Send the message via local SMTP server. | Add comments about work still to do (encoding, escaping). | Alignak-monitoring_alignak | train |
70ee6be14e31aa1bfd433f1bddca778f71929afb | diff --git a/ast.go b/ast.go
index <HASH>..<HASH> 100644
--- a/ast.go
+++ b/ast.go
@@ -1057,11 +1057,15 @@ type Show struct {
// Format formats the node.
func (node *Show) Format(buf *TrackedBuffer) {
buf.Myprintf("show %s", node.Type)
- if node.OnTable.Name.v != "" {
+ if node.HasOnTable() {
buf.Myprintf(" on %v", node.OnTable)
}
}
+func (node *Show) HasOnTable() bool {
+ return node.OnTable.Name.v != ""
+}
+
// WalkSubtree walks the nodes of the subtree.
func (node *Show) WalkSubtree(visit Visit) error {
return nil | add support for show vindexes | xwb1989_sqlparser | train |
ad4bbbaa8e060309f3d16ac9884b392b444717c4 | diff --git a/py3status/core.py b/py3status/core.py
index <HASH>..<HASH> 100644
--- a/py3status/core.py
+++ b/py3status/core.py
@@ -163,7 +163,7 @@ class Common:
self.py3_wrapper.notify_user(msg, level=level)
-class Py3statusWrapper():
+class Py3statusWrapper:
"""
This is the py3status wrapper.
"""
@@ -182,6 +182,7 @@ class Py3statusWrapper():
self.py3_modules = []
self.py3_modules_initialized = False
self.queue = deque()
+ self.update_request = Event()
def get_config(self):
"""
@@ -673,6 +674,10 @@ class Py3statusWrapper():
# we don't know so just update.
container_module['module'].force_update()
+ # we need to update the output
+ if self.queue:
+ self.update_request.set()
+
def log(self, msg, level='info'):
"""
log this information to syslog or user provided logfile.
@@ -839,12 +844,12 @@ class Py3statusWrapper():
print_line(dumps(header))
print_line('[[]')
+ update_due = None
# main loop
while True:
- # sleep a bit to avoid killing the CPU
- # by doing this at the begining rather than the end
- # of the loop we ensure a smoother first render of the i3bar
- time.sleep(0.1)
+ # wait untill an update is requested
+ self.update_request.wait(timeout=update_due)
+ update_due = None
while not self.i3bar_running:
time.sleep(0.1)
@@ -872,7 +877,7 @@ class Py3statusWrapper():
# update i3status time/tztime items
if interval == 0 or sec % interval == 0:
- i3status_thread.update_times()
+ update_due = i3status_thread.update_times()
# check if an update is needed
if self.queue:
@@ -889,6 +894,12 @@ class Py3statusWrapper():
# dump the line to stdout
print_line(',[{}]'.format(out))
+ # we've done our work here so we can clear the request
+ self.update_request.clear()
+ # just in case check if we have an update and request if needed
+ if self.queue:
+ self.update_request.set()
+
def handle_cli_command(self, config):
"""Handle a command from the CLI.
"""
diff --git a/py3status/i3status.py b/py3status/i3status.py
index <HASH>..<HASH> 100644
--- a/py3status/i3status.py
+++ b/py3status/i3status.py
@@ -195,19 +195,29 @@ class I3status(Thread):
self.i3status_pipe = None
self.time_modules = []
self.tmpfile_path = None
+ self.update_due = 0
def update_times(self):
"""
Update time for any i3status time/tztime items.
+ Returns the time till next update needed.
"""
- updated = []
- for module in self.i3modules.values():
- if module.is_time_module:
- if module.update_time_value():
- updated.append(module.module_name)
- if updated:
- # trigger the update so new time is shown
- self.py3_wrapper.notify_update(updated)
+ now = time()
+ if now > self.update_due:
+ updated = []
+ for module in self.i3modules.values():
+ if module.is_time_module:
+ if module.update_time_value():
+ updated.append(module.module_name)
+ if updated:
+ # trigger the update so new time is shown
+ self.py3_wrapper.notify_update(updated)
+ # time we next need to do an update
+ self.update_due = int(now) + 1
+
+ # return time till next update wanted
+ # currently once a second
+ return 1 - (now % 1)
def valid_config_param(self, param_name, cleanup=False):
"""
@@ -238,7 +248,8 @@ class I3status(Thread):
self.py3_wrapper)
if self.i3modules[conf_name].update_from_item(item):
updates.append(conf_name)
- self.py3_wrapper.notify_update(updates)
+ if updates:
+ self.py3_wrapper.notify_update(updates)
def update_json_list(self):
""" | use wait() instead of sleep() in main loop | ultrabug_py3status | train |
ec64fc9e14e41104e2ae5e9a078f64e6e5566f19 | diff --git a/ninja-core/src/main/java/ninja/params/ControllerMethodInvoker.java b/ninja-core/src/main/java/ninja/params/ControllerMethodInvoker.java
index <HASH>..<HASH> 100644
--- a/ninja-core/src/main/java/ninja/params/ControllerMethodInvoker.java
+++ b/ninja-core/src/main/java/ninja/params/ControllerMethodInvoker.java
@@ -121,17 +121,7 @@ public class ControllerMethodInvoker {
ArgumentExtractor<?> extractor = ArgumentExtractors.getExtractorForType(paramType);
if (extractor == null) {
- // See if we have a WithArgumentExtractor annotated annotation
- for (Annotation annotation : annotations) {
- WithArgumentExtractor withArgumentExtractor = annotation.annotationType()
- .getAnnotation(WithArgumentExtractor.class);
- if (withArgumentExtractor != null) {
- extractor = instantiateComponent(withArgumentExtractor.value(), annotation,
- paramType, injector);
- return extractor;
- }
- }
- // See if we have a WithArgumentExtractors annotated annotation
+ // See if we have a WithArgumentExtractors annotated annotation for specialized extractors
for (Annotation annotation : annotations) {
WithArgumentExtractors withArgumentExtractors = annotation.annotationType()
.getAnnotation(WithArgumentExtractors.class);
@@ -141,12 +131,23 @@ public class ControllerMethodInvoker {
if (paramType.isAssignableFrom(extractedType)) {
extractor = instantiateComponent(argumentExtractor, annotation,
paramType, injector);
- return extractor;
}
}
}
}
}
+
+ if (extractor == null) {
+ // See if we have a WithArgumentExtractor annotated annotation
+ for (Annotation annotation : annotations) {
+ WithArgumentExtractor withArgumentExtractor = annotation.annotationType()
+ .getAnnotation(WithArgumentExtractor.class);
+ if (withArgumentExtractor != null) {
+ extractor = instantiateComponent(withArgumentExtractor.value(), annotation,
+ paramType, injector);
+ }
+ }
+ }
return extractor;
}
diff --git a/ninja-core/src/main/java/ninja/params/Param.java b/ninja-core/src/main/java/ninja/params/Param.java
index <HASH>..<HASH> 100644
--- a/ninja-core/src/main/java/ninja/params/Param.java
+++ b/ninja-core/src/main/java/ninja/params/Param.java
@@ -30,11 +30,11 @@ import java.lang.annotation.Target;
*
*/
@WithArgumentExtractors({
- ArgumentExtractors.ParamExtractor.class,
ArgumentExtractors.FileItemParamExtractor.class,
ArgumentExtractors.FileParamExtractor.class,
ArgumentExtractors.InputStreamParamExtractor.class
})
+@WithArgumentExtractor(ArgumentExtractors.ParamExtractor.class)
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.PARAMETER})
public @interface Param {
diff --git a/ninja-core/src/main/java/ninja/params/Params.java b/ninja-core/src/main/java/ninja/params/Params.java
index <HASH>..<HASH> 100644
--- a/ninja-core/src/main/java/ninja/params/Params.java
+++ b/ninja-core/src/main/java/ninja/params/Params.java
@@ -30,11 +30,11 @@ import java.lang.annotation.Target;
*
*/
@WithArgumentExtractors({
- ArgumentExtractors.ParamsExtractor.class,
ArgumentExtractors.FileItemParamsExtractor.class,
ArgumentExtractors.FileParamsExtractor.class,
ArgumentExtractors.InputStreamParamsExtractor.class
})
+@WithArgumentExtractor(ArgumentExtractors.ParamExtractor.class)
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.PARAMETER})
public @interface Params { | fix argument extractors to only deal with specialized etractors, and fallback to original argument extractor | ninjaframework_ninja | train |
b97d63fc5a3db440be565d4581a9e13064e72439 | diff --git a/public/assets/libs/appCore.js b/public/assets/libs/appCore.js
index <HASH>..<HASH> 100644
--- a/public/assets/libs/appCore.js
+++ b/public/assets/libs/appCore.js
@@ -610,8 +610,15 @@ $(function ($) {
await dbLocal.clear(cacheName);
if(!isEmpty(dados)) {
if(typeof dados === "object" && dados !== null && dados.constructor === Array) {
- for(let d of dados)
+ let count = 1;
+ let useIdCount = typeof dados[0].id === "undefined" || !isNumberPositive(dados[0].id);
+ for(let d of dados) {
+ if(useIdCount)
+ d.id = count;
+
dbLocal.exeCreate(cacheName, d);
+ count++;
+ }
} else {
dados.typeIsObject = !1;
dbLocal.exeCreate(cacheName, dados);
@@ -3200,8 +3207,15 @@ const sse = {
await dbLocal.clear(cacheName);
if (!isEmpty(dados)) {
if (typeof dados === "object" && dados !== null && dados.constructor === Array) {
- for (let d of dados)
+ let count = 1;
+ let useIdCount = typeof dados[0].id === "undefined" || !isNumberPositive(dados[0].id);
+ for(let d of dados) {
+ if(useIdCount)
+ d.id = count;
+
dbLocal.exeCreate(cacheName, d);
+ count++;
+ }
} else {
dados.typeIsObject = !1;
dbLocal.exeCreate(cacheName, dados); | cache get request check for ID on results | edineibauer_uebConfig | train |
8b9e3a20961eb714fcaa2268c98e17d81038074b | diff --git a/lib/yard/code_objects/proxy.rb b/lib/yard/code_objects/proxy.rb
index <HASH>..<HASH> 100644
--- a/lib/yard/code_objects/proxy.rb
+++ b/lib/yard/code_objects/proxy.rb
@@ -96,9 +96,10 @@ module YARD
if obj = to_obj
obj.type
else
- :proxy
+ @type || :proxy
end
end
+ def type=(type) @type = type end
def instance_of?(klass)
self.class == klass
diff --git a/spec/code_objects/proxy_spec.rb b/spec/code_objects/proxy_spec.rb
index <HASH>..<HASH> 100644
--- a/spec/code_objects/proxy_spec.rb
+++ b/spec/code_objects/proxy_spec.rb
@@ -47,4 +47,11 @@ describe YARD::CodeObjects::Proxy do
obj.name.should == :test
obj.path.should == "A::B#test"
end
+
+ it "should allow type to be changed" do
+ obj = P("InvalidClass")
+ obj.type.should == :proxy
+ obj.type = :class
+ obj.type.should == :class
+ end
end
\ No newline at end of file | Allow type to be set on a proxy object | lsegal_yard | train |
5d7f8803155d2eb8865cce9a60dd677c2400261c | diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinatorTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinatorTest.java
index <HASH>..<HASH> 100644
--- a/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinatorTest.java
+++ b/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinatorTest.java
@@ -36,6 +36,7 @@ import org.mockito.stubbing.Answer;
import scala.concurrent.Future;
import java.io.Serializable;
+import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -1646,7 +1647,94 @@ public class CheckpointCoordinatorTest {
fail(e.getMessage());
}
}
-
+
+ /**
+ * Tests that the savepoints can be triggered concurrently.
+ */
+ @Test
+ public void testConcurrentSavepoints() throws Exception {
+ JobID jobId = new JobID();
+
+ final ExecutionAttemptID attemptID1 = new ExecutionAttemptID();
+ ExecutionVertex vertex1 = mockExecutionVertex(attemptID1);
+
+ StandaloneCheckpointIDCounter checkpointIDCounter = new StandaloneCheckpointIDCounter();
+
+ CheckpointCoordinator coord = new CheckpointCoordinator(
+ jobId,
+ 100000,
+ 200000,
+ 0L,
+ 1, // max one checkpoint at a time => should not affect savepoints
+ 42,
+ new ExecutionVertex[] { vertex1 },
+ new ExecutionVertex[] { vertex1 },
+ new ExecutionVertex[] { vertex1 },
+ cl,
+ checkpointIDCounter,
+ new StandaloneCompletedCheckpointStore(2, cl),
+ new HeapSavepointStore(),
+ new DisabledCheckpointStatsTracker());
+
+ List<Future<String>> savepointFutures = new ArrayList<>();
+
+ int numSavepoints = 5;
+
+ // Trigger savepoints
+ for (int i = 0; i < numSavepoints; i++) {
+ savepointFutures.add(coord.triggerSavepoint(i));
+ }
+
+ // After triggering multiple savepoints, all should in progress
+ for (Future<String> savepointFuture : savepointFutures) {
+ assertFalse(savepointFuture.isCompleted());
+ }
+
+ // ACK all savepoints
+ long checkpointId = checkpointIDCounter.getLast();
+ for (int i = 0; i < numSavepoints; i++, checkpointId--) {
+ coord.receiveAcknowledgeMessage(new AcknowledgeCheckpoint(jobId, attemptID1, checkpointId));
+ }
+
+ // After ACKs, all should be completed
+ for (Future<String> savepointFuture : savepointFutures) {
+ assertTrue(savepointFuture.isCompleted());
+ }
+ }
+
+ /**
+ * Tests that no minimum delay between savepoints is enforced.
+ */
+ @Test
+ public void testMinDelayBetweenSavepoints() throws Exception {
+ JobID jobId = new JobID();
+
+ final ExecutionAttemptID attemptID1 = new ExecutionAttemptID();
+ ExecutionVertex vertex1 = mockExecutionVertex(attemptID1);
+
+ CheckpointCoordinator coord = new CheckpointCoordinator(
+ jobId,
+ 100000,
+ 200000,
+ 100000000L, // very long min delay => should not affect savepoints
+ 1,
+ 42,
+ new ExecutionVertex[] { vertex1 },
+ new ExecutionVertex[] { vertex1 },
+ new ExecutionVertex[] { vertex1 },
+ cl,
+ new StandaloneCheckpointIDCounter(),
+ new StandaloneCompletedCheckpointStore(2, cl),
+ new HeapSavepointStore(),
+ new DisabledCheckpointStatsTracker());
+
+ Future<String> savepoint0 = coord.triggerSavepoint(0);
+ assertFalse("Did not trigger savepoint", savepoint0.isCompleted());
+
+ Future<String> savepoint1 = coord.triggerSavepoint(1);
+ assertFalse("Did not trigger savepoint", savepoint1.isCompleted());
+ }
+
// ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------ | [FLINK-<I>] [checkpointing] Extend CheckpointCoordinatorTest
The added tests check that savepoints ignore the maximum number
of concurrent checkpoints and minimum delay between checkpoints.
This closes #<I>. | apache_flink | train |
6e6eaa1cab19bd2de0882c76853b9eaa91a0a94e | diff --git a/pkg/build/apis/build/types.go b/pkg/build/apis/build/types.go
index <HASH>..<HASH> 100644
--- a/pkg/build/apis/build/types.go
+++ b/pkg/build/apis/build/types.go
@@ -1125,6 +1125,9 @@ type ImageChangeTrigger struct {
// will be used. Only one ImageChangeTrigger with an empty From reference is allowed in
// a build configuration.
From *kapi.ObjectReference
+
+ // Paused is true if this trigger is temporarily disabled. Optional.
+ Paused bool
}
// BuildTriggerPolicy describes a policy for a single trigger that results in a new Build.
diff --git a/pkg/image/trigger/buildconfigs/buildconfigs.go b/pkg/image/trigger/buildconfigs/buildconfigs.go
index <HASH>..<HASH> 100644
--- a/pkg/image/trigger/buildconfigs/buildconfigs.go
+++ b/pkg/image/trigger/buildconfigs/buildconfigs.go
@@ -143,6 +143,10 @@ func (r *buildConfigReactor) ImageChanged(obj runtime.Object, tagRetriever trigg
if p == nil || (p.From != nil && p.From.Kind != "ImageStreamTag") {
continue
}
+ if p.Paused {
+ glog.V(5).Infof("Skipping paused build on bc: %s/%s for trigger: %s", bc.Namespace, bc.Name, t)
+ continue
+ }
var from *kapi.ObjectReference
if p.From != nil {
from = p.From | Skip paused ICT on a BC | openshift_origin | train |
184179b3663b9401b1e83d72a71d3d1bfb1bd6ab | diff --git a/tests/keo.test.js b/tests/keo.test.js
index <HASH>..<HASH> 100644
--- a/tests/keo.test.js
+++ b/tests/keo.test.js
@@ -34,7 +34,7 @@ test('is able to pass in properties to the component;', t => {
});
test('is able to remove `getInitialState` function;', t => {
- const text= starwars();
+ const text = starwars();
const getInitialStateSpy = spy(() => ({ text }));
const Component = stitch({ getInitialState: getInitialStateSpy, render: () => <span /> });
const wrapper = mount(<Component />);
@@ -64,7 +64,32 @@ test('is not able to setState or access the state object;', t => {
t.true(props.setState === undefined);
});
+test.only('is only re-rendering when the component-specific properties change;', t => {
+
+ const renderSpy = spy(({ props }) => <div><h1>{props.quote}</h1><datetime>{props.dateTime}</datetime></div>);
+ const Component = stitch({ propTypes: { quote: PropTypes.string.isRequired }, render: renderSpy });
+
+ const date = Date.now();
+ const initialText = starwars();
+ const wrapper = mount(<Component quote={initialText} dateTime={date} />);
+
+ t.is(renderSpy.callCount, 1);
+ t.is(wrapper.find('h1').text(), initialText);
+ t.is(wrapper.find('datetime').text(), String(date));
+
+ const changedText = starwars();
+ wrapper.setProps({ quote: changedText });
+ t.is(renderSpy.callCount, 2);
+ t.is(wrapper.find('h1').text(), changedText);
+ t.is(wrapper.find('datetime').text(), String(date));
+
+ wrapper.setProps({ dateTime: 'Invalid Date' });
+ t.is(renderSpy.callCount, 2);
+ t.is(wrapper.find('h1').text(), changedText);
+ t.is(wrapper.find('datetime').text(), String(date));
+
+});
+
// Skipped...
-test.skip('is only re-rendering when the component-specific properties change;', t => {});
test.skip('is able to override the default `shouldComponentUpdate` behaviour;', t => {}); | Added check for shouldComponentUpdate | Wildhoney_Keo | train |
58a11985342e4e261381916e6b43210b58ae0908 | diff --git a/command/install.py b/command/install.py
index <HASH>..<HASH> 100644
--- a/command/install.py
+++ b/command/install.py
@@ -77,6 +77,11 @@ class install (Command):
('install-data=', None,
"installation directory for data files"),
+ # For lazy debuggers who just want to test the install
+ # commands without rerunning "build" all the time
+ ('skip-build', None,
+ "skip rebuilding everything (for testing/debugging)"),
+
# Where to install documentation (eventually!)
#('doc-format=', None, "format of documentation to generate"),
#('install-man=', None, "directory for Unix man pages"),
@@ -129,6 +134,8 @@ class install (Command):
self.extra_path = None
self.install_path_file = 0
+ self.skip_build = 0
+
# These are only here as a conduit from the 'build' command to the
# 'install_*' commands that do the real work. ('build_base' isn't
# actually used anywhere, but it might be useful in future.) They
@@ -270,7 +277,10 @@ class install (Command):
from distutils.fancy_getopt import longopt_xlate
print msg + ":"
for opt in self.user_options:
- opt_name = string.translate (opt[0][0:-1], longopt_xlate)
+ opt_name = opt[0]
+ if opt_name[-1] == "=":
+ opt_name = opt_name[0:-1]
+ opt_name = string.translate (opt_name, longopt_xlate)
val = getattr (self, opt_name)
print " %s: %s" % (opt_name, val)
@@ -409,7 +419,8 @@ class install (Command):
def run (self):
# Obviously have to build before we can install
- self.run_peer ('build')
+ if not self.skip_build:
+ self.run_peer ('build')
# Run all sub-commands: currently this just means install all
# Python modules using 'install_lib'.
diff --git a/command/install_lib.py b/command/install_lib.py
index <HASH>..<HASH> 100644
--- a/command/install_lib.py
+++ b/command/install_lib.py
@@ -15,6 +15,7 @@ class install_lib (Command):
('build-dir=','b', "build directory (where to install from)"),
('compile', 'c', "compile .py to .pyc"),
('optimize', 'o', "compile .py to .pyo (optimized)"),
+ ('skip-build', None, "skip the build steps"),
]
@@ -24,6 +25,7 @@ class install_lib (Command):
self.build_dir = None
self.compile = 1
self.optimize = 1
+ self.skip_build = None
def finalize_options (self):
@@ -34,16 +36,19 @@ class install_lib (Command):
('build_lib', 'build_dir'),
('install_lib', 'install_dir'),
('compile_py', 'compile'),
- ('optimize_py', 'optimize'))
+ ('optimize_py', 'optimize'),
+ ('skip_build', 'skip_build'),
+ )
def run (self):
# Make sure we have built everything we need first
- if self.distribution.has_pure_modules():
- self.run_peer ('build_py')
- if self.distribution.has_ext_modules():
- self.run_peer ('build_ext')
+ if not self.skip_build:
+ if self.distribution.has_pure_modules():
+ self.run_peer ('build_py')
+ if self.distribution.has_ext_modules():
+ self.run_peer ('build_ext')
# Install everything: simply dump the entire contents of the build
# directory to the installation directory (that's the beauty of | Added --skip-build option, so lazy debuggers/testers (mainly me) don't
have to wade through all the 'build' output when testing installation. | pypa_setuptools | train |
117621cd38164d4837e0e24a5b65a6950cfd19fb | diff --git a/pkg/endpoint/policy.go b/pkg/endpoint/policy.go
index <HASH>..<HASH> 100644
--- a/pkg/endpoint/policy.go
+++ b/pkg/endpoint/policy.go
@@ -324,11 +324,10 @@ func (e *Endpoint) resolveL4Policy(owner Owner, repo *policy.Repository, c *poli
// and a map matching which rules were successfully removed.
// Must be called with Consumable mutex held.
func (e *Endpoint) regenerateConsumable(owner Owner, labelsMap *identityPkg.IdentityCache,
- repo *policy.Repository, c *policy.Consumable) (changed bool, rulesAdd policy.SecurityIDContexts, rulesRm policy.SecurityIDContexts) {
+ repo *policy.Repository, c *policy.Consumable) (changed bool, rulesAdd, rulesRm policy.SecurityIDContexts, err error) {
var (
l4Rm policy.SecurityIDContexts
- err error
)
// Mark all entries unused by denying them
@@ -362,7 +361,8 @@ func (e *Endpoint) regenerateConsumable(owner Owner, labelsMap *identityPkg.Iden
rulesAdd, l4Rm, err = e.applyL4PolicyLocked(owner, labelsMap, e.L4Policy, c.L4Policy)
if err != nil {
// This should not happen, and we can't fail at this stage anyway.
- e.getLogger().WithError(err).Fatal("L4 Policy application failed")
+ e.getLogger().WithError(err).Error("L4 Policy application failed")
+ return
}
}
// Reuse the common policy, will be used in lxc_config.h (CFG_L4_INGRESS and CFG_L4_EGRESS)
@@ -524,7 +524,7 @@ func (e *Endpoint) regenerateConsumable(owner Owner, labelsMap *identityPkg.Iden
"l4Rm": l4Rm,
"rulesRm": rulesRm,
}).Debug("consumable regenerated")
- return changed, rulesAdd, rulesRm
+ return changed, rulesAdd, rulesRm, nil
}
// Must be called with global repo.Mutrex, e.Mutex, and c.Mutex held
@@ -756,7 +756,10 @@ func (e *Endpoint) regeneratePolicy(owner Owner, opts models.ConfigurationMap) (
optsChanged := e.applyOptsLocked(opts)
// Determines all security-identity based policy.
- policyChanged2, consumersAdd, consumersRm := e.regenerateConsumable(owner, labelsMap, repo, c)
+ policyChanged2, consumersAdd, consumersRm, err := e.regenerateConsumable(owner, labelsMap, repo, c)
+ if err != nil {
+ return false, nil, nil, err
+ }
if policyChanged2 {
policyChanged = true
} | endpoint: Don't fail with fatal on l4 policy application
A better fix should be put in place via #<I>
Fixes: #<I> | cilium_cilium | train |
faf4b4a8edc934990f6f4c9632b66a944ca6e510 | diff --git a/aiohttp_devtools/logs.py b/aiohttp_devtools/logs.py
index <HASH>..<HASH> 100644
--- a/aiohttp_devtools/logs.py
+++ b/aiohttp_devtools/logs.py
@@ -1,6 +1,7 @@
import json
import logging
import logging.config
+import platform
import re
import traceback
from io import StringIO
@@ -30,7 +31,7 @@ split_log = re.compile(r'^(\[.*?\])')
class HighlightStreamHandler(logging.StreamHandler):
def setFormatter(self, fmt):
self.formatter = fmt
- self.formatter.stream_is_tty = isatty(self.stream)
+ self.formatter.stream_is_tty = isatty(self.stream) and platform.system().lower() != 'windows'
class DefaultFormatter(logging.Formatter): | disable colors on windows, fix #<I> (#<I>) | aio-libs_aiohttp-devtools | train |
9e0c199ce59ba5cb0c84c7bb041730be1afee82f | diff --git a/index.js b/index.js
index <HASH>..<HASH> 100644
--- a/index.js
+++ b/index.js
@@ -195,6 +195,7 @@ module.exports = function (content) {
var chunkHash = filename.indexOf('[chunkhash]') !== -1
? hashFiles(generatorOptions.files, options.hashLength) : '';
+ filename = fontConfig.dest.concat(filename);
filename = filename
.replace('[chunkhash]', chunkHash)
.replace('[fontname]', generatorOptions.fontName) | dest has been ignored, now its will be added to path! | jeerbl_webfonts-loader | train |
e0faaebc4dd047ae08a38de9b20721d06f64c7ba | diff --git a/UnitySDK/Assets/ML-Agents/Scripts/Academy.cs b/UnitySDK/Assets/ML-Agents/Scripts/Academy.cs
index <HASH>..<HASH> 100755
--- a/UnitySDK/Assets/ML-Agents/Scripts/Academy.cs
+++ b/UnitySDK/Assets/ML-Agents/Scripts/Academy.cs
@@ -95,7 +95,7 @@ namespace MLAgents
[SerializeField]
public BroadcastHub broadcastHub = new BroadcastHub();
- private const string kApiVersion = "API-6";
+ private const string kApiVersion = "API-7";
/// Temporary storage for global gravity value
/// Used to restore oringal value when deriving Academy modifies it
diff --git a/UnitySDK/Assets/ML-Agents/Scripts/InferenceBrain/BarracudaModelParamLoader.cs b/UnitySDK/Assets/ML-Agents/Scripts/InferenceBrain/BarracudaModelParamLoader.cs
index <HASH>..<HASH> 100644
--- a/UnitySDK/Assets/ML-Agents/Scripts/InferenceBrain/BarracudaModelParamLoader.cs
+++ b/UnitySDK/Assets/ML-Agents/Scripts/InferenceBrain/BarracudaModelParamLoader.cs
@@ -23,7 +23,7 @@ namespace MLAgents.InferenceBrain
Discrete,
Continuous
}
- private const long ApiVersion = 1;
+ private const long ApiVersion = 2;
private IWorker _engine;
private Model _model;
private BrainParameters _brainParameters;
diff --git a/UnitySDK/Assets/ML-Agents/Scripts/InferenceBrain/ModelParamLoader.cs b/UnitySDK/Assets/ML-Agents/Scripts/InferenceBrain/ModelParamLoader.cs
index <HASH>..<HASH> 100644
--- a/UnitySDK/Assets/ML-Agents/Scripts/InferenceBrain/ModelParamLoader.cs
+++ b/UnitySDK/Assets/ML-Agents/Scripts/InferenceBrain/ModelParamLoader.cs
@@ -17,7 +17,7 @@ namespace MLAgents.InferenceBrain
Discrete,
Continuous
}
- private const long ApiVersion = 1;
+ private const long ApiVersion = 2;
private TFSharpInferenceEngine _engine;
private BrainParameters _brainParameters;
private List<string> _failedModelChecks = new List<string>();
diff --git a/gym-unity/setup.py b/gym-unity/setup.py
index <HASH>..<HASH> 100755
--- a/gym-unity/setup.py
+++ b/gym-unity/setup.py
@@ -3,7 +3,7 @@
from setuptools import setup, find_packages
setup(name='gym_unity',
- version='0.2.0',
+ version='0.3.0',
description='Unity Machine Learning Agents Gym Interface',
license='Apache License 2.0',
author='Unity Technologies',
diff --git a/ml-agents/mlagents/envs/environment.py b/ml-agents/mlagents/envs/environment.py
index <HASH>..<HASH> 100644
--- a/ml-agents/mlagents/envs/environment.py
+++ b/ml-agents/mlagents/envs/environment.py
@@ -42,7 +42,7 @@ class UnityEnvironment(object):
atexit.register(self._close)
self.port = base_port + worker_id
self._buffer_size = 12000
- self._version_ = "API-6"
+ self._version_ = "API-7"
self._loaded = False # If true, this means the environment was successfully loaded
self.proc1 = None # The process that is started. If None, no process was started
self.communicator = self.get_communicator(worker_id, base_port)
diff --git a/ml-agents/mlagents/trainers/models.py b/ml-agents/mlagents/trainers/models.py
index <HASH>..<HASH> 100644
--- a/ml-agents/mlagents/trainers/models.py
+++ b/ml-agents/mlagents/trainers/models.py
@@ -8,7 +8,7 @@ logger = logging.getLogger("mlagents.envs")
class LearningModel(object):
- _version_number_ = 1
+ _version_number_ = 2
def __init__(self, m_size, normalize, use_recurrent, brain, seed):
tf.set_random_seed(seed)
diff --git a/ml-agents/setup.py b/ml-agents/setup.py
index <HASH>..<HASH> 100644
--- a/ml-agents/setup.py
+++ b/ml-agents/setup.py
@@ -10,7 +10,7 @@ with open(path.join(here, 'README.md'), encoding='utf-8') as f:
setup(
name='mlagents',
- version='0.6.0',
+ version='0.7.0',
description='Unity Machine Learning Agents',
long_description=long_description,
long_description_content_type='text/markdown',
diff --git a/ml-agents/tests/mock_communicator.py b/ml-agents/tests/mock_communicator.py
index <HASH>..<HASH> 100755
--- a/ml-agents/tests/mock_communicator.py
+++ b/ml-agents/tests/mock_communicator.py
@@ -42,7 +42,7 @@ class MockCommunicator(Communicator):
)
rl_init = UnityRLInitializationOutput(
name="RealFakeAcademy",
- version="API-6",
+ version="API-7",
log_path="",
brain_parameters=[bp]
) | Ticked API : (#<I>)
* Ticked API :
- Ticked API for pypi for mlagents
- Ticked API for pypi for unity-gym
- Ticked Communication number for API
- Ticked Model Loader number for API
* Ticked the API for the pytest | Unity-Technologies_ml-agents | train |
a5eba6f66ae25cd5a4c3196f35bd8dbe8eb43417 | diff --git a/dask_kubernetes/helm.py b/dask_kubernetes/helm.py
index <HASH>..<HASH> 100644
--- a/dask_kubernetes/helm.py
+++ b/dask_kubernetes/helm.py
@@ -166,11 +166,16 @@ class HelmCluster(Cluster):
async def _wait_for_workers(self):
while True:
n_workers = len(self.scheduler_info["workers"])
- deployment = await self.apps_api.read_namespaced_deployment(
- name=f"{self.release_name}-{self.chart_name}{self.worker_name}",
- namespace=self.namespace,
+ deployments = await self.apps_api.list_namespaced_deployment(
+ namespace=self.namespace
)
- deployment_replicas = deployment.spec.replicas
+ deployment_replicas = 0
+ for deployment in deployments.items:
+ if (
+ f"{self.release_name}-{self.chart_name}{self.worker_name}"
+ in deployment.metadata.name
+ ):
+ deployment_replicas += deployment.spec.replicas
if n_workers == deployment_replicas:
return
else:
@@ -236,10 +241,11 @@ class HelmCluster(Cluster):
return _().__await__()
- def scale(self, n_workers):
+ def scale(self, n_workers, worker_group=None):
"""Scale cluster to n workers.
This sets the Dask worker deployment size to the requested number.
+ It also allows you to set the worker deployment size of another worker group.
Workers will not be terminated gracefull so be sure to only scale down
when all futures have been retrieved by the client and the cluster is idle.
@@ -247,24 +253,34 @@ class HelmCluster(Cluster):
--------
>>> cluster
- HelmCluster('tcp://localhost:8786', workers=3, threads=18, memory=18.72 GB)
+ HelmCluster(my-dask.default, 'tcp://localhost:51481', workers=4, threads=241, memory=2.95 TiB)
>>> cluster.scale(4)
>>> cluster
- HelmCluster('tcp://localhost:8786', workers=4, threads=24, memory=24.96 GB)
-
+ HelmCluster(my-dask.default, 'tcp://localhost:51481', workers=5, threads=321, memory=3.94 TiB)
+ >>> cluster.scale(5, worker_group="high-mem-workers")
+ >>> cluster
+ HelmCluster(my-dask.default, 'tcp://localhost:51481', workers=9, threads=325, memory=3.94 TiB)
"""
- return self.sync(self._scale, n_workers)
+ return self.sync(self._scale, n_workers, worker_group=worker_group)
- async def _scale(self, n_workers):
- await self.apps_api.patch_namespaced_deployment(
- name=f"{self.release_name}-{self.chart_name}{self.worker_name}",
- namespace=self.namespace,
- body={
- "spec": {
- "replicas": n_workers,
- }
- },
- )
+ async def _scale(self, n_workers, worker_group=None):
+ deployment = f"{self.release_name}-{self.chart_name}{self.worker_name}"
+ if worker_group:
+ deployment += f"-{worker_group}"
+ try:
+ await self.apps_api.patch_namespaced_deployment(
+ name=deployment,
+ namespace=self.namespace,
+ body={
+ "spec": {
+ "replicas": n_workers,
+ }
+ },
+ )
+ except kubernetes.client.exceptions.ApiException as e:
+ if worker_group:
+ raise ValueError(f"No such worker group {worker_group}") from e
+ raise e
def adapt(self, *args, **kwargs):
"""Turn on adaptivity (Not recommended)."""
diff --git a/dask_kubernetes/tests/helm/values.yaml b/dask_kubernetes/tests/helm/values.yaml
index <HASH>..<HASH> 100644
--- a/dask_kubernetes/tests/helm/values.yaml
+++ b/dask_kubernetes/tests/helm/values.yaml
@@ -13,4 +13,8 @@ scheduler:
worker:
image:
repository: "dask-kubernetes" # Container image repository.
- tag: "dev" # Container image tag.
\ No newline at end of file
+ tag: "dev" # Container image tag.
+
+additional_worker_groups:
+ - name: foo
+ replicas: 1
diff --git a/dask_kubernetes/tests/test_helm.py b/dask_kubernetes/tests/test_helm.py
index <HASH>..<HASH> 100644
--- a/dask_kubernetes/tests/test_helm.py
+++ b/dask_kubernetes/tests/test_helm.py
@@ -63,7 +63,18 @@ def release(k8s_cluster, chart_name, test_namespace, release_name, config_path):
config_path,
]
)
- # time.sleep(10) # Wait for scheduler to start. TODO Replace with more robust check.
+ # Scale back the additional workers group for now
+ subprocess.check_output(
+ [
+ "kubectl",
+ "scale",
+ "-n",
+ test_namespace,
+ "deployment",
+ f"{release_name}-dask-worker-foo",
+ "--replicas=0",
+ ]
+ )
yield release_name
subprocess.check_output(["helm", "delete", "-n", test_namespace, release_name])
@@ -144,6 +155,22 @@ async def test_scale_cluster(cluster):
await cluster # Wait for workers
assert len(cluster.scheduler_info["workers"]) == 3
+ # Scale up an additional worker group 'foo'
+ await cluster.scale(2, worker_group="foo")
+ await cluster # Wait for workers
+ assert len(cluster.scheduler_info["workers"]) == 5
+
+ # Scale down an additional worker group 'foo'
+ await cluster.scale(0, worker_group="foo")
+ await cluster # Wait for workers
+ assert len(cluster.scheduler_info["workers"]) == 3
+
+ # Scaling a non-existent eorker group 'bar' raises a ValueError
+ import kubernetes_asyncio as kubernetes
+
+ with pytest.raises((ValueError, kubernetes.client.exceptions.ApiException)):
+ await cluster.scale(2, worker_group="bar")
+
@pytest.mark.asyncio
async def test_logs(cluster): | Allow scaling of additional worker groups with HelmCluster (#<I>) (#<I>)
* Allow scaling of addiotional worker groups with Helm Cluster (#<I>)
* Change worker_name to worker_group, Add error checking
* Add testing for changes
* Update scaling example
* Fix scaling non-existent worker group test. Remove scaling additional worker group tests
* Scale down additional group in fixture and wait for all groups | dask_dask-kubernetes | train |
673d732d341eef6cadaaf1fdeef11aac5bdca3eb | diff --git a/google-http-client/src/main/java/com/google/api/client/http/HttpTransport.java b/google-http-client/src/main/java/com/google/api/client/http/HttpTransport.java
index <HASH>..<HASH> 100644
--- a/google-http-client/src/main/java/com/google/api/client/http/HttpTransport.java
+++ b/google-http-client/src/main/java/com/google/api/client/http/HttpTransport.java
@@ -52,8 +52,10 @@ import java.util.logging.Logger;
* </li>
* <li>Other Java environments
* <ul>
+ * <li>{@code com.google.api.client.googleapis.javanet.GoogleNetHttpTransport} is included in
+ * google-api-cient 1.22.0, so easy to include.</li>
* <li>{@code com.google.api.client.javanet.NetHttpTransport} is based on the HttpURLConnection
- * built into the Java SDK, so it is normally the preferred choice.</li>
+ * built into the Java SDK, so it used to be the preferred choice.</li>
* <li>{@code com.google.api.client.apache.ApacheHttpTransport} is a good choice for users of the
* Apache HTTP Client, especially if you need some of the configuration options available in that
* library.</li> | Updated comments which looked outdated to me.
The example here:
<URL>
But doesn't show where the transport or jsonFactory come from.
It was easy to find the right JSON library from the left-nav here:
<URL> | googleapis_google-http-java-client | train |
8f50cdcece5fc4dfb5fff21a243d74ff3afb9aa5 | diff --git a/integration/v7/isolated/scale_command_test.go b/integration/v7/isolated/scale_command_test.go
index <HASH>..<HASH> 100644
--- a/integration/v7/isolated/scale_command_test.go
+++ b/integration/v7/isolated/scale_command_test.go
@@ -178,26 +178,21 @@ var _ = Describe("scale command", func() {
_, err := buffer.Write([]byte("y\n"))
Expect(err).ToNot(HaveOccurred())
session := helpers.CFWithStdin(buffer, "scale", appName, "-m", "64M")
- Eventually(session).Should(Say(`Scaling app %s in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
- Eventually(session).Should(Say(`This will cause the app to restart\. Are you sure you want to scale %s\? \[yN\]:`, appName))
- Eventually(session).Should(Say(`Stopping app %s in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
- Eventually(session).Should(Say(`Starting app %s in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
Eventually(session).Should(Exit(0))
- helpers.WaitForAppMemoryToTakeEffect(appName, 0, 0, true)
+ Expect(session).Should(Say(`Scaling app %s in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
+ Expect(session).Should(Say(`This will cause the app to restart\. Are you sure you want to scale %s\? \[yN\]:`, appName))
+ Expect(session).Should(Say(`Stopping app %s in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
+ Expect(session).Should(Say(`Starting app %s in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
- session = helpers.CF("app", appName)
- Eventually(session).Should(Exit(0))
-
- updatedAppTable := helpers.ParseV3AppProcessTable(session.Out.Contents())
- Expect(updatedAppTable.Processes).To(HaveLen(2))
+ helpers.WaitForAppMemoryToTakeEffect(appName, 0, 0, true)
- processSummary := updatedAppTable.Processes[0]
- instanceSummary := processSummary.Instances[0]
- Expect(processSummary.Type).To(Equal("web"))
- Expect(processSummary.InstanceCount).To(MatchRegexp(`\d/1`))
- Expect(instanceSummary.Memory).To(MatchRegexp(`\d+(\.\d+)?[KMG]? of 64M`))
- Expect(instanceSummary.Disk).To(MatchRegexp(`\d+(\.\d+)?[KMG]? of \d+[KMG]`))
+ Eventually(func() string {
+ session := helpers.CF("app", appName)
+ Eventually(session).Should(Exit(0))
+ appTable := helpers.ParseV3AppProcessTable(session.Out.Contents())
+ return appTable.Processes[0].Instances[0].Memory
+ }).Should(MatchRegexp(`\d+(\.\d+)?[KMG]? of 64M`))
})
When("-f flag provided", func() { | Update scale int test to wait for scale to take effect | cloudfoundry_cli | train |
af3cd5a133e4100765dcd5e71d3dd0dd5dcd7be7 | diff --git a/src/utils.js b/src/utils.js
index <HASH>..<HASH> 100644
--- a/src/utils.js
+++ b/src/utils.js
@@ -1,4 +1,6 @@
-import { SHA256, RIPEMD160, enc } from 'crypto-js'
+import hexEncoding from 'crypto-js/enc-hex'
+import SHA256 from 'crypto-js/sha256'
+import RIPEMD160 from 'crypto-js/ripemd160'
import BN from 'bignumber.js'
import util from 'util'
@@ -296,7 +298,7 @@ export class StringStream {
export const hash160 = (hex) => {
if (typeof hex !== 'string') throw new Error('reverseHex expects a string')
if (hex.length % 2 !== 0) throw new Error(`Incorrect Length: ${hex}`)
- let hexEncoded = enc.Hex.parse(hex)
+ let hexEncoded = hexEncoding.parse(hex)
let ProgramSha256 = SHA256(hexEncoded)
return RIPEMD160(ProgramSha256).toString()
}
@@ -309,7 +311,7 @@ export const hash160 = (hex) => {
export const hash256 = (hex) => {
if (typeof hex !== 'string') throw new Error('reverseHex expects a string')
if (hex.length % 2 !== 0) throw new Error(`Incorrect Length: ${hex}`)
- let hexEncoded = enc.Hex.parse(hex)
+ let hexEncoded = hexEncoding.parse(hex)
let ProgramSha256 = SHA256(hexEncoded)
return SHA256(ProgramSha256).toString()
}
@@ -322,7 +324,7 @@ export const hash256 = (hex) => {
export const sha256 = (hex) => {
if (typeof hex !== 'string') throw new Error('reverseHex expects a string')
if (hex.length % 2 !== 0) throw new Error(`Incorrect Length: ${hex}`)
- let hexEncoded = enc.Hex.parse(hex)
+ let hexEncoded = hexEncoding.parse(hex)
return SHA256(hexEncoded).toString()
} | refactor(utils): reduce crypto-js imports | CityOfZion_neon-js | train |
316491b029732281b5a00cbe6ebcc1c1887caf4c | diff --git a/config/deploy.rb b/config/deploy.rb
index <HASH>..<HASH> 100644
--- a/config/deploy.rb
+++ b/config/deploy.rb
@@ -8,7 +8,7 @@ set :scm, :git
# set :pty, true
# set :linked_files, %w{config/database.yml}
-set :linked_dirs, %w{log tmp/pids tmp/cache tmp/sockets public/system public/assets}
+# set :linked_dirs, %w{log tmp/pids tmp/cache tmp/sockets public/system public/assets}
set :rvm_type, :system
@@ -45,8 +45,17 @@ namespace :deploy do
# deploy:restart not firing, so I added this hack for now
after 'deploy:symlink:release', :restart_hack do
on roles(:app) do
- execute "ln -s #{shared_path}/restart.txt #{release_path}/tmp/restart.txt"
- execute :touch, release_path.join('tmp','restart.txt')
+ execute "ln -s #{shared_path}/restart.txt #{release_path}/spec/dummy/tmp/restart.txt"
+ execute :touch, release_path.join('spec', 'dummy', 'tmp','restart.txt')
+ end
+ end
+
+ after 'deploy:symlink:release', :symlink_dummy_files do
+ on roles(:app) do
+ %w(log tmp/pids tmp/cache tmp/sockets public/system public/assets).each do |path|
+ execute "rm -rf #{release_path}/spec/dummy/#{path}"
+ execute "ln -s #{release_path}/spec/dummy/log #{shared_path}/#{path}"
+ end
end
end | update deploy to symlink dummy directories | wearefine_fae | train |
55cc7e5d17094e666ebc40a5a0a2f6f68892880a | diff --git a/lib/deliver/deliver_process.rb b/lib/deliver/deliver_process.rb
index <HASH>..<HASH> 100644
--- a/lib/deliver/deliver_process.rb
+++ b/lib/deliver/deliver_process.rb
@@ -108,6 +108,7 @@ module Deliver
@app_version ||= @deploy_information[Deliverer::ValKey::APP_VERSION]
@app_version ||= (FastlaneCore::ItunesSearchApi.fetch_by_identifier(app_identifier)['version'] rescue nil)
+ @app_version ||= (app.get_live_version rescue nil)
end
##################################################### | Added automatic detection of live version using the iTC JSON API | fastlane_fastlane | train |
846eaa7e9969a6cf4586e080afde36a306a85d1f | diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -55,8 +55,7 @@ else:
extensions = [Extension(
name='neal.src.simulated_annealing',
sources=['./neal/src/simulated_annealing' + ext,
- './neal/src/cpu_sa.cpp'
- ],
+ './neal/src/cpu_sa.cpp'],
include_dirs=['./neal/src/'],
language='c++',
)]
@@ -65,16 +64,26 @@ if USE_CYTHON:
extensions = cythonize(extensions, language='c++')
packages = ['neal',
- 'neal/src'
- ]
+ 'neal/src']
install_requires = ['dimod>=0.6.11,<0.7.0',
'numpy>=1.14.0,<1.15.0',
- 'six>=1.11.0,<2.0.0'
- ]
+ 'six>=1.11.0,<2.0.0']
-setup_requires = ['numpy>=1.14.0,<1.15.0'
- ]
+setup_requires = ['numpy>=1.14.0,<1.15.0']
+
+classifiers = [
+ 'License :: OSI Approved :: Apache Software License',
+ 'Operating System :: OS Independent',
+ 'Programming Language :: Python :: 2',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: 3.7'
+]
+
+python_requires = '>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*'
_PY2 = sys.version_info.major == 2
@@ -94,10 +103,12 @@ setup(
long_description=open('README.rst').read(),
url='https://github.com/dwavesystems/dwave-neal',
license='Apache 2.0',
+ classifiers=classifiers,
packages=packages,
install_requires=install_requires,
ext_modules=extensions,
cmdclass={'build_ext': build_ext_compiler_check},
setup_requires=setup_requires,
+ python_requires=python_requires,
zip_safe=False
) | Restrict package to python <I>,<I>+ (due to dimod dep) | dwavesystems_dwave-neal | train |
7e6689da39f00f68c1ee37730eb5d3a7b9f2e985 | diff --git a/lib/do_snapshot.rb b/lib/do_snapshot.rb
index <HASH>..<HASH> 100644
--- a/lib/do_snapshot.rb
+++ b/lib/do_snapshot.rb
@@ -1,5 +1,5 @@
# -*- encoding : utf-8 -*-
-require 'do_snapshot/version'
+require_relative 'do_snapshot/version'
# Used primary for creating snapshot's as backups for DigitalOcean
#
diff --git a/lib/do_snapshot/cli.rb b/lib/do_snapshot/cli.rb
index <HASH>..<HASH> 100644
--- a/lib/do_snapshot/cli.rb
+++ b/lib/do_snapshot/cli.rb
@@ -1,9 +1,9 @@
# -*- encoding : utf-8 -*-
require 'thor'
require 'do_snapshot'
-require 'do_snapshot/command'
-require 'do_snapshot/mail'
-require 'do_snapshot/log'
+require_relative 'command'
+require_relative 'mail'
+require_relative 'log'
module DoSnapshot
# CLI is here
diff --git a/lib/do_snapshot/command.rb b/lib/do_snapshot/command.rb
index <HASH>..<HASH> 100644
--- a/lib/do_snapshot/command.rb
+++ b/lib/do_snapshot/command.rb
@@ -1,5 +1,5 @@
# -*- encoding : utf-8 -*-
-require 'do_snapshot/api'
+require_relative 'api'
module DoSnapshot
# Our commands live here :)
diff --git a/lib/do_snapshot/mail.rb b/lib/do_snapshot/mail.rb
index <HASH>..<HASH> 100644
--- a/lib/do_snapshot/mail.rb
+++ b/lib/do_snapshot/mail.rb
@@ -1,7 +1,7 @@
# -*- encoding : utf-8 -*-
require 'date'
require 'pony'
-require 'do_snapshot/core_ext/hash'
+require_relative 'core_ext/hash'
module DoSnapshot
# Shared mailer.
diff --git a/spec/spec_helper.rb b/spec/spec_helper.rb
index <HASH>..<HASH> 100644
--- a/spec/spec_helper.rb
+++ b/spec/spec_helper.rb
@@ -8,10 +8,9 @@ end
require 'do_snapshot/cli'
require 'webmock/rspec'
require 'digitalocean'
-require 'shared/api_helpers'
-require 'shared/uri_helpers'
-require 'shared/environment'
-require 'shared/uri_helpers'
+require_relative 'shared/api_helpers'
+require_relative 'shared/uri_helpers'
+require_relative 'shared/environment'
require 'do_snapshot/core_ext/hash'
WebMock.disable_net_connect!(allow_localhost: true) | Change some require to require_relative. | merqlove_do_snapshot | train |
fb5d9dbec7e6ea60a1dc60163d5afed210e5c8b8 | diff --git a/JSAT/src/jsat/regression/RegressionDataSet.java b/JSAT/src/jsat/regression/RegressionDataSet.java
index <HASH>..<HASH> 100644
--- a/JSAT/src/jsat/regression/RegressionDataSet.java
+++ b/JSAT/src/jsat/regression/RegressionDataSet.java
@@ -68,7 +68,7 @@ public class RegressionDataSet extends DataSet
for(int i = 0; i < origVec.length()-1; i++)
{
if(i >= predicting)
- newVec.set(i+1, origVec.get(i+1));
+ newVec.set(i, origVec.get(i+1));
else
newVec.set(i, origVec.get(i));
} | Fixed off by one bug in one of the constructors.
git-svn-id: <URL> | EdwardRaff_JSAT | train |
c9bc5913fd93f40e4d1d51a1b82051df5e68c2ed | diff --git a/internal/driver/graphics.go b/internal/driver/graphics.go
index <HASH>..<HASH> 100644
--- a/internal/driver/graphics.go
+++ b/internal/driver/graphics.go
@@ -28,7 +28,6 @@ type Graphics interface {
SetWindow(window unsafe.Pointer)
SetTransparent(transparent bool)
SetVertices(vertices []float32, indices []uint16)
- Flush()
NewImage(width, height int) (Image, error)
NewScreenFramebufferImage(width, height int) (Image, error)
Reset() error
diff --git a/internal/graphicscommand/command.go b/internal/graphicscommand/command.go
index <HASH>..<HASH> 100644
--- a/internal/graphicscommand/command.go
+++ b/internal/graphicscommand/command.go
@@ -274,10 +274,6 @@ func (q *commandQueue) Flush() {
// introduced than drawTrianglesCommand.
indexOffset += c.NumIndices()
}
- if 0 < nc {
- // Call glFlush to prevent black flicking (especially on Android (#226) and iOS).
- theGraphicsDriver.Flush()
- }
cs = cs[nc:]
}
theGraphicsDriver.End()
diff --git a/internal/graphicsdriver/metal/driver.go b/internal/graphicsdriver/metal/driver.go
index <HASH>..<HASH> 100644
--- a/internal/graphicsdriver/metal/driver.go
+++ b/internal/graphicsdriver/metal/driver.go
@@ -364,10 +364,6 @@ func (d *Driver) SetVertices(vertices []float32, indices []uint16) {
})
}
-func (d *Driver) Flush() {
- // On Metal, flushing command buffers only once is enough except for manipulating pixels. Do not call flush.
-}
-
func (d *Driver) flush(wait bool, present bool) {
d.t.Call(func() error {
if d.cb == (mtl.CommandBuffer{}) {
diff --git a/internal/graphicsdriver/opengl/driver.go b/internal/graphicsdriver/opengl/driver.go
index <HASH>..<HASH> 100644
--- a/internal/graphicsdriver/opengl/driver.go
+++ b/internal/graphicsdriver/opengl/driver.go
@@ -47,7 +47,9 @@ func (d *Driver) Begin() {
}
func (d *Driver) End() {
- // Do nothing.
+ // Call glFlush to prevent black flicking (especially on Android (#226) and iOS).
+ // TODO: examples/sprites worked without this. Is this really needed?
+ d.context.flush()
}
func (d *Driver) SetWindow(window unsafe.Pointer) {
@@ -128,10 +130,6 @@ func (d *Driver) Draw(indexLen int, indexOffset int, mode driver.CompositeMode,
return nil
}
-func (d *Driver) Flush() {
- d.context.flush()
-}
-
func (d *Driver) SetVsyncEnabled(enabled bool) {
// Do nothing
} | driver: Refactoring: Remove Graphics.Flush
Updates #<I> | hajimehoshi_ebiten | train |
Subsets and Splits