prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>jquery.fileupload.js<|end_file_name|><|fim▁begin|>/*
* jQuery File Upload Plugin 5.32.0
* https://github.com/blueimp/jQuery-File-Upload
*
* Copyright 2010, Sebastian Tschan
* https://blueimp.net
*
* Licensed under the MIT license:
* http://www.opensource.org/licenses/MIT
*/
/*jslint nomen: true, unparam: true, regexp: true */
/*global define, window, document, location, File, Blob, FormData */
(function (factory) {
'use strict';
if (typeof define === 'function' && define.amd) {
// Register as an anonymous AMD module:
define([
'jquery',
'jquery.ui.widget'
], factory);
} else {
// Browser globals:
factory(window.jQuery);
}
}(function ($) {
'use strict';
// Detect file input support, based on
// http://viljamis.com/blog/2012/file-upload-support-on-mobile/
$.support.fileInput = !(new RegExp(
// Handle devices which give false positives for the feature detection:
'(Android (1\\.[0156]|2\\.[01]))' +
'|(Windows Phone (OS 7|8\\.0))|(XBLWP)|(ZuneWP)' +
'|(w(eb)?OSBrowser)|(webOS)' +
'|(Kindle/(1\\.0|2\\.[05]|3\\.0))'
).test(window.navigator.userAgent) ||
// Feature detection for all other devices:
$('<input type="file">').prop('disabled'));
// The FileReader API is not actually used, but works as feature detection,
// as e.g. Safari supports XHR file uploads via the FormData API,
// but not non-multipart XHR file uploads:
$.support.xhrFileUpload = !!(window.XMLHttpRequestUpload && window.FileReader);
$.support.xhrFormDataFileUpload = !!window.FormData;
// Detect support for Blob slicing (required for chunked uploads):
$.support.blobSlice = window.Blob && (Blob.prototype.slice ||
Blob.prototype.webkitSlice || Blob.prototype.mozSlice);
// The fileupload widget listens for change events on file input fields defined
// via fileInput setting and paste or drop events of the given dropZone.
// In addition to the default jQuery Widget methods, the fileupload widget
// exposes the "add" and "send" methods, to add or directly send files using
// the fileupload API.
// By default, files added via file input selection, paste, drag & drop or
// "add" method are uploaded immediately, but it is possible to override
// the "add" callback option to queue file uploads.
$.widget('blueimp.fileupload', {
options: {
// The drop target element(s), by the default the complete document.
// Set to null to disable drag & drop support:
dropZone: $(document),
// The paste target element(s), by the default the complete document.
// Set to null to disable paste support:
pasteZone: $(document),
// The file input field(s), that are listened to for change events.
// If undefined, it is set to the file input fields inside
// of the widget element on plugin initialization.
// Set to null to disable the change listener.
fileInput: undefined,
// By default, the file input field is replaced with a clone after
// each input field change event. This is required for iframe transport
// queues and allows change events to be fired for the same file
// selection, but can be disabled by setting the following option to false:
replaceFileInput: true,
// The parameter name for the file form data (the request argument name).
// If undefined or empty, the name property of the file input field is
// used, or "files[]" if the file input name property is also empty,
// can be a string or an array of strings:
paramName: undefined,
// By default, each file of a selection is uploaded using an individual
// request for XHR type uploads. Set to false to upload file
// selections in one request each:
singleFileUploads: true,
// To limit the number of files uploaded with one XHR request,
// set the following option to an integer greater than 0:
limitMultiFileUploads: undefined,
// Set the following option to true to issue all file upload requests
// in a sequential order:
sequentialUploads: false,
// To limit the number of concurrent uploads,
// set the following option to an integer greater than 0:
limitConcurrentUploads: undefined,
// Set the following option to true to force iframe transport uploads:
forceIframeTransport: false,
// Set the following option to the location of a redirect url on the
// origin server, for cross-domain iframe transport uploads:
redirect: undefined,
// The parameter name for the redirect url, sent as part of the form
// data and set to 'redirect' if this option is empty:
redirectParamName: undefined,
// Set the following option to the location of a postMessage window,
// to enable postMessage transport uploads:
postMessage: undefined,
// By default, XHR file uploads are sent as multipart/form-data.
// The iframe transport is always using multipart/form-data.
// Set to false to enable non-multipart XHR uploads:
multipart: true,
// To upload large files in smaller chunks, set the following option
// to a preferred maximum chunk size. If set to 0, null or undefined,
// or the browser does not support the required Blob API, files will
// be uploaded as a whole.
maxChunkSize: undefined,
// When a non-multipart upload or a chunked multipart upload has been
// aborted, this option can be used to resume the upload by setting
// it to the size of the already uploaded bytes. This option is most
// useful when modifying the options object inside of the "add" or
// "send" callbacks, as the options are cloned for each file upload.
uploadedBytes: undefined,
// By default, failed (abort or error) file uploads are removed from the
// global progress calculation. Set the following option to false to
// prevent recalculating the global progress data:
recalculateProgress: true,
// Interval in milliseconds to calculate and trigger progress events:
progressInterval: 100,
// Interval in milliseconds to calculate progress bitrate:
bitrateInterval: 500,
// By default, uploads are started automatically when adding files:
autoUpload: true,
// Error and info messages:
messages: {
uploadedBytes: 'Uploaded bytes exceed file size'
},
// Translation function, gets the message key to be translated
// and an object with context specific data as arguments:
i18n: function (message, context) {
message = this.messages[message] || message.toString();
if (context) {
$.each(context, function (key, value) {
message = message.replace('{' + key + '}', value);
});
}
return message;
},
// Additional form data to be sent along with the file uploads can be set
// using this option, which accepts an array of objects with name and
// value properties, a function returning such an array, a FormData
// object (for XHR file uploads), or a simple object.
// The form of the first fileInput is given as parameter to the function:
formData: function (form) {
return form.serializeArray();
},
// The add callback is invoked as soon as files are added to the fileupload
// widget (via file input selection, drag & drop, paste or add API call).
// If the singleFileUploads option is enabled, this callback will be
// called once for each file in the selection for XHR file uploads, else
// once for each file selection.
//
// The upload starts when the submit method is invoked on the data parameter.
// The data object contains a files property holding the added files
// and allows you to override plugin options as well as define ajax settings.
//
// Listeners for this callback can also be bound the following way:
// .bind('fileuploadadd', func);
//
// data.submit() returns a Promise object and allows to attach additional
// handlers using jQuery's Deferred callbacks:
// data.submit().done(func).fail(func).always(func);
add: function (e, data) {
if (data.autoUpload || (data.autoUpload !== false &&
$(this).fileupload('option', 'autoUpload'))) {
data.process().done(function () {
data.submit();
});
}
},
// Other callbacks:
// Callback for the submit event of each file upload:
// submit: function (e, data) {}, // .bind('fileuploadsubmit', func);
// Callback for the start of each file upload request:
// send: function (e, data) {}, // .bind('fileuploadsend', func);
// Callback for successful uploads:
// done: function (e, data) {}, // .bind('fileuploaddone', func);
// Callback for failed (abort or error) uploads:
// fail: function (e, data) {}, // .bind('fileuploadfail', func);
// Callback for completed (success, abort or error) requests:
// always: function (e, data) {}, // .bind('fileuploadalways', func);
// Callback for upload progress events:
// progress: function (e, data) {}, // .bind('fileuploadprogress', func);
// Callback for global upload progress events:
// progressall: function (e, data) {}, // .bind('fileuploadprogressall', func);
// Callback for uploads start, equivalent to the global ajaxStart event:
// start: function (e) {}, // .bind('fileuploadstart', func);
// Callback for uploads stop, equivalent to the global ajaxStop event:
// stop: function (e) {}, // .bind('fileuploadstop', func);
// Callback for change events of the fileInput(s):
// change: function (e, data) {}, // .bind('fileuploadchange', func);
// Callback for paste events to the pasteZone(s):
// paste: function (e, data) {}, // .bind('fileuploadpaste', func);
// Callback for drop events of the dropZone(s):
// drop: function (e, data) {}, // .bind('fileuploaddrop', func);
// Callback for dragover events of the dropZone(s):
// dragover: function (e) {}, // .bind('fileuploaddragover', func);
// Callback for the start of each chunk upload request:
// chunksend: function (e, data) {}, // .bind('fileuploadchunksend', func);
// Callback for successful chunk uploads:
// chunkdone: function (e, data) {}, // .bind('fileuploadchunkdone', func);
// Callback for failed (abort or error) chunk uploads:
// chunkfail: function (e, data) {}, // .bind('fileuploadchunkfail', func);
// Callback for completed (success, abort or error) chunk upload requests:
// chunkalways: function (e, data) {}, // .bind('fileuploadchunkalways', func);
// The plugin options are used as settings object for the ajax calls.
// The following are jQuery ajax settings required for the file uploads:
processData: false,
contentType: false,
cache: false
},
// A list of options that require reinitializing event listeners and/or
// special initialization code:
_specialOptions: [
'fileInput',
'dropZone',
'pasteZone',
'multipart',
'forceIframeTransport'
],
_blobSlice: $.support.blobSlice && function () {
var slice = this.slice || this.webkitSlice || this.mozSlice;
return slice.apply(this, arguments);
},
_BitrateTimer: function () {
this.timestamp = ((Date.now) ? Date.now() : (new Date()).getTime());
this.loaded = 0;
this.bitrate = 0;
this.getBitrate = function (now, loaded, interval) {
var timeDiff = now - this.timestamp;
if (!this.bitrate || !interval || timeDiff > interval) {
this.bitrate = (loaded - this.loaded) * (1000 / timeDiff) * 8;
this.loaded = loaded;
this.timestamp = now;
}
return this.bitrate;
};
},
_isXHRUpload: function (options) {
return !options.forceIframeTransport &&
((!options.multipart && $.support.xhrFileUpload) ||
$.support.xhrFormDataFileUpload);
},
_getFormData: function (options) {
var formData;
if (typeof options.formData === 'function') {
return options.formData(options.form);
}
if ($.isArray(options.formData)) {
return options.formData;
}
if ($.type(options.formData) === 'object') {
formData = [];
$.each(options.formData, function (name, value) {
formData.push({name: name, value: value});
});
return formData;
}
return [];
},
_getTotal: function (files) {
var total = 0;
$.each(files, function (index, file) {
total += file.size || 1;
});
return total;
},
_initProgressObject: function (obj) {
var progress = {
loaded: 0,
total: 0,
bitrate: 0
};
if (obj._progress) {
$.extend(obj._progress, progress);
} else {
obj._progress = progress;
}
},
_initResponseObject: function (obj) {
var prop;
if (obj._response) {
for (prop in obj._response) {
if (obj._response.hasOwnProperty(prop)) {
delete obj._response[prop];
}
}
} else {
obj._response = {};
}
},
_onProgress: function (e, data) {
if (e.lengthComputable) {
var now = ((Date.now) ? Date.now() : (new Date()).getTime()),
loaded;
if (data._time && data.progressInterval &&
(now - data._time < data.progressInterval) &&
e.loaded !== e.total) {
return;
}
data._time = now;
loaded = Math.floor(
e.loaded / e.total * (data.chunkSize || data._progress.total)
) + (data.uploadedBytes || 0);
// Add the difference from the previously loaded state
// to the global loaded counter:
this._progress.loaded += (loaded - data._progress.loaded);
this._progress.bitrate = this._bitrateTimer.getBitrate(
now,
this._progress.loaded,
data.bitrateInterval
);
data._progress.loaded = data.loaded = loaded;
data._progress.bitrate = data.bitrate = data._bitrateTimer.getBitrate(
now,
loaded,
data.bitrateInterval
);
// Trigger a custom progress event with a total data property set
// to the file size(s) of the current upload and a loaded data
// property calculated accordingly:
this._trigger('progress', e, data);
// Trigger a global progress event for all current file uploads,
// including ajax calls queued for sequential file uploads:
this._trigger('progressall', e, this._progress);
}
},
_initProgressListener: function (options) {
var that = this,
xhr = options.xhr ? options.xhr() : $.ajaxSettings.xhr();
// Accesss to the native XHR object is required to add event listeners
// for the upload progress event:
if (xhr.upload) {
$(xhr.upload).bind('progress', function (e) {
var oe = e.originalEvent;
// Make sure the progress event properties get copied over:
e.lengthComputable = oe.lengthComputable;
e.loaded = oe.loaded;
e.total = oe.total;
that._onProgress(e, options);
});
options.xhr = function () {
return xhr;
};
}
},
_isInstanceOf: function (type, obj) {
// Cross-frame instanceof check
return Object.prototype.toString.call(obj) === '[object ' + type + ']';
},
_initXHRData: function (options) {
var that = this,
formData,
file = options.files[0],
// Ignore non-multipart setting if not supported:
multipart = options.multipart || !$.support.xhrFileUpload,
paramName = options.paramName[0];
options.headers = options.headers || {};
if (options.contentRange) {
options.headers['Content-Range'] = options.contentRange;
}
if (!multipart || options.blob || !this._isInstanceOf('File', file)) {
options.headers['Content-Disposition'] = 'attachment; filename="' +
encodeURI(file.name) + '"';
}
if (!multipart) {
options.contentType = file.type;
options.data = options.blob || file;
} else if ($.support.xhrFormDataFileUpload) {
if (options.postMessage) {
// window.postMessage does not allow sending FormData
// objects, so we just add the File/Blob objects to
// the formData array and let the postMessage window
// create the FormData object out of this array:
formData = this._getFormData(options);
if (options.blob) {
formData.push({
name: paramName,
value: options.blob
});
} else {
$.each(options.files, function (index, file) {
formData.push({
name: options.paramName[index] || paramName,
value: file
});
});
}
} else {
if (that._isInstanceOf('FormData', options.formData)) {
formData = options.formData;
} else {
formData = new FormData();
$.each(this._getFormData(options), function (index, field) {
formData.append(field.name, field.value);
});
}
if (options.blob) {
formData.append(paramName, options.blob, file.name);
} else {
$.each(options.files, function (index, file) {
// This check allows the tests to run with
// dummy objects:
if (that._isInstanceOf('File', file) ||
that._isInstanceOf('Blob', file)) {
formData.append(
options.paramName[index] || paramName,
file,
file.name
);
}
});
}
}
options.data = formData;
}
// Blob reference is not needed anymore, free memory:
options.blob = null;
},
_initIframeSettings: function (options) {
var targetHost = $('<a></a>').prop('href', options.url).prop('host');
// Setting the dataType to iframe enables the iframe transport:
options.dataType = 'iframe ' + (options.dataType || '');
// The iframe transport accepts a serialized array as form data:
options.formData = this._getFormData(options);
// Add redirect url to form data on cross-domain uploads:
if (options.redirect && targetHost && targetHost !== location.host) {
options.formData.push({
name: options.redirectParamName || 'redirect',
value: options.redirect
});
}
},
_initDataSettings: function (options) {
if (this._isXHRUpload(options)) {
if (!this._chunkedUpload(options, true)) {
if (!options.data) {
this._initXHRData(options);
}
this._initProgressListener(options);
}
if (options.postMessage) {
// Setting the dataType to postmessage enables the
// postMessage transport:
options.dataType = 'postmessage ' + (options.dataType || '');
}
} else {
this._initIframeSettings(options);
}
},
_getParamName: function (options) {
var fileInput = $(options.fileInput),
paramName = options.paramName;
if (!paramName) {
paramName = [];
fileInput.each(function () {
var input = $(this),
name = input.prop('name') || 'files[]',
i = (input.prop('files') || [1]).length;
while (i) {
paramName.push(name);
i -= 1;
}
});
if (!paramName.length) {
paramName = [fileInput.prop('name') || 'files[]'];
}
} else if (!$.isArray(paramName)) {
paramName = [paramName];
}
return paramName;
},
_initFormSettings: function (options) {
// Retrieve missing options from the input field and the
// associated form, if available:
if (!options.form || !options.form.length) {
options.form = $(options.fileInput.prop('form'));
// If the given file input doesn't have an associated form,
// use the default widget file input's form:
if (!options.form.length) {
options.form = $(this.options.fileInput.prop('form'));
}
}
options.paramName = this._getParamName(options);
if (!options.url) {
options.url = options.form.prop('action') || location.href;
}
// The HTTP request method must be "POST" or "PUT":
options.type = (options.type || options.form.prop('method') || '')
.toUpperCase();
if (options.type !== 'POST' && options.type !== 'PUT' &&
options.type !== 'PATCH') {
options.type = 'POST';
}
if (!options.formAcceptCharset) {
options.formAcceptCharset = options.form.attr('accept-charset');
}
},
_getAJAXSettings: function (data) {
var options = $.extend({}, this.options, data);
this._initFormSettings(options);
this._initDataSettings(options);
return options;
},
// jQuery 1.6 doesn't provide .state(),
// while jQuery 1.8+ removed .isRejected() and .isResolved():
_getDeferredState: function (deferred) {
if (deferred.state) {
return deferred.state();
}
if (deferred.isResolved()) {
return 'resolved';
}
if (deferred.isRejected()) {
return 'rejected';
}
return 'pending';
},
// Maps jqXHR callbacks to the equivalent
// methods of the given Promise object:
_enhancePromise: function (promise) {
promise.success = promise.done;
promise.error = promise.fail;
promise.complete = promise.always;
return promise;
},
// Creates and returns a Promise object enhanced with
// the jqXHR methods abort, success, error and complete:
_getXHRPromise: function (resolveOrReject, context, args) {
var dfd = $.Deferred(),
promise = dfd.promise();
context = context || this.options.context || promise;
if (resolveOrReject === true) {
dfd.resolveWith(context, args);
} else if (resolveOrReject === false) {
dfd.rejectWith(context, args);
}
promise.abort = dfd.promise;
return this._enhancePromise(promise);
},
// Adds convenience methods to the data callback argument:
_addConvenienceMethods: function (e, data) {
var that = this,
getPromise = function (data) {
return $.Deferred().resolveWith(that, [data]).promise();
};
data.process = function (resolveFunc, rejectFunc) {
if (resolveFunc || rejectFunc) {
data._processQueue = this._processQueue =
(this._processQueue || getPromise(this))
.pipe(resolveFunc, rejectFunc);
}
return this._processQueue || getPromise(this);
};
data.submit = function () {
if (this.state() !== 'pending') {
data.jqXHR = this.jqXHR =
(that._trigger('submit', e, this) !== false) &&
that._onSend(e, this);
}
return this.jqXHR || that._getXHRPromise();
};
data.abort = function () {
if (this.jqXHR) {
return this.jqXHR.abort();
}
return that._getXHRPromise();
};
data.state = function () {
if (this.jqXHR) {
return that._getDeferredState(this.jqXHR);
}
if (this._processQueue) {
return that._getDeferredState(this._processQueue);
}
};
data.progress = function () {
return this._progress;
};
data.response = function () {
return this._response;<|fim▁hole|>
// Parses the Range header from the server response
// and returns the uploaded bytes:
_getUploadedBytes: function (jqXHR) {
var range = jqXHR.getResponseHeader('Range'),
parts = range && range.split('-'),
upperBytesPos = parts && parts.length > 1 &&
parseInt(parts[1], 10);
return upperBytesPos && upperBytesPos + 1;
},
// Uploads a file in multiple, sequential requests
// by splitting the file up in multiple blob chunks.
// If the second parameter is true, only tests if the file
// should be uploaded in chunks, but does not invoke any
// upload requests:
_chunkedUpload: function (options, testOnly) {
options.uploadedBytes = options.uploadedBytes || 0;
var that = this,
file = options.files[0],
fs = file.size,
ub = options.uploadedBytes,
mcs = options.maxChunkSize || fs,
slice = this._blobSlice,
dfd = $.Deferred(),
promise = dfd.promise(),
jqXHR,
upload;
if (!(this._isXHRUpload(options) && slice && (ub || mcs < fs)) ||
options.data) {
return false;
}
if (testOnly) {
return true;
}
if (ub >= fs) {
file.error = options.i18n('uploadedBytes');
return this._getXHRPromise(
false,
options.context,
[null, 'error', file.error]
);
}
// The chunk upload method:
upload = function () {
// Clone the options object for each chunk upload:
var o = $.extend({}, options),
currentLoaded = o._progress.loaded;
o.blob = slice.call(
file,
ub,
ub + mcs,
file.type
);
// Store the current chunk size, as the blob itself
// will be dereferenced after data processing:
o.chunkSize = o.blob.size;
// Expose the chunk bytes position range:
o.contentRange = 'bytes ' + ub + '-' +
(ub + o.chunkSize - 1) + '/' + fs;
// Process the upload data (the blob and potential form data):
that._initXHRData(o);
// Add progress listeners for this chunk upload:
that._initProgressListener(o);
jqXHR = ((that._trigger('chunksend', null, o) !== false && $.ajax(o)) ||
that._getXHRPromise(false, o.context))
.done(function (result, textStatus, jqXHR) {
ub = that._getUploadedBytes(jqXHR) ||
(ub + o.chunkSize);
// Create a progress event if no final progress event
// with loaded equaling total has been triggered
// for this chunk:
if (currentLoaded + o.chunkSize - o._progress.loaded) {
that._onProgress($.Event('progress', {
lengthComputable: true,
loaded: ub - o.uploadedBytes,
total: ub - o.uploadedBytes
}), o);
}
options.uploadedBytes = o.uploadedBytes = ub;
o.result = result;
o.textStatus = textStatus;
o.jqXHR = jqXHR;
that._trigger('chunkdone', null, o);
that._trigger('chunkalways', null, o);
if (ub < fs) {
// File upload not yet complete,
// continue with the next chunk:
upload();
} else {
dfd.resolveWith(
o.context,
[result, textStatus, jqXHR]
);
}
})
.fail(function (jqXHR, textStatus, errorThrown) {
o.jqXHR = jqXHR;
o.textStatus = textStatus;
o.errorThrown = errorThrown;
that._trigger('chunkfail', null, o);
that._trigger('chunkalways', null, o);
dfd.rejectWith(
o.context,
[jqXHR, textStatus, errorThrown]
);
});
};
this._enhancePromise(promise);
promise.abort = function () {
return jqXHR.abort();
};
upload();
return promise;
},
_beforeSend: function (e, data) {
if (this._active === 0) {
// the start callback is triggered when an upload starts
// and no other uploads are currently running,
// equivalent to the global ajaxStart event:
this._trigger('start');
// Set timer for global bitrate progress calculation:
this._bitrateTimer = new this._BitrateTimer();
// Reset the global progress values:
this._progress.loaded = this._progress.total = 0;
this._progress.bitrate = 0;
}
// Make sure the container objects for the .response() and
// .progress() methods on the data object are available
// and reset to their initial state:
this._initResponseObject(data);
this._initProgressObject(data);
data._progress.loaded = data.loaded = data.uploadedBytes || 0;
data._progress.total = data.total = this._getTotal(data.files) || 1;
data._progress.bitrate = data.bitrate = 0;
this._active += 1;
// Initialize the global progress values:
this._progress.loaded += data.loaded;
this._progress.total += data.total;
},
_onDone: function (result, textStatus, jqXHR, options) {
var total = options._progress.total,
response = options._response;
if (options._progress.loaded < total) {
// Create a progress event if no final progress event
// with loaded equaling total has been triggered:
this._onProgress($.Event('progress', {
lengthComputable: true,
loaded: total,
total: total
}), options);
}
response.result = options.result = result;
response.textStatus = options.textStatus = textStatus;
response.jqXHR = options.jqXHR = jqXHR;
this._trigger('done', null, options);
},
_onFail: function (jqXHR, textStatus, errorThrown, options) {
var response = options._response;
if (options.recalculateProgress) {
// Remove the failed (error or abort) file upload from
// the global progress calculation:
this._progress.loaded -= options._progress.loaded;
this._progress.total -= options._progress.total;
}
response.jqXHR = options.jqXHR = jqXHR;
response.textStatus = options.textStatus = textStatus;
response.errorThrown = options.errorThrown = errorThrown;
this._trigger('fail', null, options);
},
_onAlways: function (jqXHRorResult, textStatus, jqXHRorError, options) {
// jqXHRorResult, textStatus and jqXHRorError are added to the
// options object via done and fail callbacks
this._trigger('always', null, options);
},
_onSend: function (e, data) {
if (!data.submit) {
this._addConvenienceMethods(e, data);
}
var that = this,
jqXHR,
aborted,
slot,
pipe,
options = that._getAJAXSettings(data),
send = function () {
that._sending += 1;
// Set timer for bitrate progress calculation:
options._bitrateTimer = new that._BitrateTimer();
jqXHR = jqXHR || (
((aborted || that._trigger('send', e, options) === false) &&
that._getXHRPromise(false, options.context, aborted)) ||
that._chunkedUpload(options) || $.ajax(options)
).done(function (result, textStatus, jqXHR) {
that._onDone(result, textStatus, jqXHR, options);
}).fail(function (jqXHR, textStatus, errorThrown) {
that._onFail(jqXHR, textStatus, errorThrown, options);
}).always(function (jqXHRorResult, textStatus, jqXHRorError) {
that._onAlways(
jqXHRorResult,
textStatus,
jqXHRorError,
options
);
that._sending -= 1;
that._active -= 1;
if (options.limitConcurrentUploads &&
options.limitConcurrentUploads > that._sending) {
// Start the next queued upload,
// that has not been aborted:
var nextSlot = that._slots.shift();
while (nextSlot) {
if (that._getDeferredState(nextSlot) === 'pending') {
nextSlot.resolve();
break;
}
nextSlot = that._slots.shift();
}
}
if (that._active === 0) {
// The stop callback is triggered when all uploads have
// been completed, equivalent to the global ajaxStop event:
that._trigger('stop');
}
});
return jqXHR;
};
this._beforeSend(e, options);
if (this.options.sequentialUploads ||
(this.options.limitConcurrentUploads &&
this.options.limitConcurrentUploads <= this._sending)) {
if (this.options.limitConcurrentUploads > 1) {
slot = $.Deferred();
this._slots.push(slot);
pipe = slot.pipe(send);
} else {
this._sequence = this._sequence.pipe(send, send);
pipe = this._sequence;
}
// Return the piped Promise object, enhanced with an abort method,
// which is delegated to the jqXHR object of the current upload,
// and jqXHR callbacks mapped to the equivalent Promise methods:
pipe.abort = function () {
aborted = [undefined, 'abort', 'abort'];
if (!jqXHR) {
if (slot) {
slot.rejectWith(options.context, aborted);
}
return send();
}
return jqXHR.abort();
};
return this._enhancePromise(pipe);
}
return send();
},
_onAdd: function (e, data) {
var that = this,
result = true,
options = $.extend({}, this.options, data),
limit = options.limitMultiFileUploads,
paramName = this._getParamName(options),
paramNameSet,
paramNameSlice,
fileSet,
i;
if (!(options.singleFileUploads || limit) ||
!this._isXHRUpload(options)) {
fileSet = [data.files];
paramNameSet = [paramName];
} else if (!options.singleFileUploads && limit) {
fileSet = [];
paramNameSet = [];
for (i = 0; i < data.files.length; i += limit) {
fileSet.push(data.files.slice(i, i + limit));
paramNameSlice = paramName.slice(i, i + limit);
if (!paramNameSlice.length) {
paramNameSlice = paramName;
}
paramNameSet.push(paramNameSlice);
}
} else {
paramNameSet = paramName;
}
data.originalFiles = data.files;
$.each(fileSet || data.files, function (index, element) {
var newData = $.extend({}, data);
newData.files = fileSet ? element : [element];
newData.paramName = paramNameSet[index];
that._initResponseObject(newData);
that._initProgressObject(newData);
that._addConvenienceMethods(e, newData);
result = that._trigger('add', e, newData);
return result;
});
return result;
},
_replaceFileInput: function (input) {
var inputClone = input.clone(true);
$('<form></form>').append(inputClone)[0].reset();
// Detaching allows to insert the fileInput on another form
// without loosing the file input value:
input.after(inputClone).detach();
// Avoid memory leaks with the detached file input:
$.cleanData(input.unbind('remove'));
// Replace the original file input element in the fileInput
// elements set with the clone, which has been copied including
// event handlers:
this.options.fileInput = this.options.fileInput.map(function (i, el) {
if (el === input[0]) {
return inputClone[0];
}
return el;
});
// If the widget has been initialized on the file input itself,
// override this.element with the file input clone:
if (input[0] === this.element[0]) {
this.element = inputClone;
}
},
_handleFileTreeEntry: function (entry, path) {
var that = this,
dfd = $.Deferred(),
errorHandler = function (e) {
if (e && !e.entry) {
e.entry = entry;
}
// Since $.when returns immediately if one
// Deferred is rejected, we use resolve instead.
// This allows valid files and invalid items
// to be returned together in one set:
dfd.resolve([e]);
},
dirReader;
path = path || '';
if (entry.isFile) {
if (entry._file) {
// Workaround for Chrome bug #149735
entry._file.relativePath = path;
dfd.resolve(entry._file);
} else {
entry.file(function (file) {
file.relativePath = path;
dfd.resolve(file);
}, errorHandler);
}
} else if (entry.isDirectory) {
dirReader = entry.createReader();
dirReader.readEntries(function (entries) {
that._handleFileTreeEntries(
entries,
path + entry.name + '/'
).done(function (files) {
dfd.resolve(files);
}).fail(errorHandler);
}, errorHandler);
} else {
// Return an empy list for file system items
// other than files or directories:
dfd.resolve([]);
}
return dfd.promise();
},
_handleFileTreeEntries: function (entries, path) {
var that = this;
return $.when.apply(
$,
$.map(entries, function (entry) {
return that._handleFileTreeEntry(entry, path);
})
).pipe(function () {
return Array.prototype.concat.apply(
[],
arguments
);
});
},
_getDroppedFiles: function (dataTransfer) {
dataTransfer = dataTransfer || {};
var items = dataTransfer.items;
if (items && items.length && (items[0].webkitGetAsEntry ||
items[0].getAsEntry)) {
return this._handleFileTreeEntries(
$.map(items, function (item) {
var entry;
if (item.webkitGetAsEntry) {
entry = item.webkitGetAsEntry();
if (entry) {
// Workaround for Chrome bug #149735:
entry._file = item.getAsFile();
}
return entry;
}
return item.getAsEntry();
})
);
}
return $.Deferred().resolve(
$.makeArray(dataTransfer.files)
).promise();
},
_getSingleFileInputFiles: function (fileInput) {
fileInput = $(fileInput);
var entries = fileInput.prop('webkitEntries') ||
fileInput.prop('entries'),
files,
value;
if (entries && entries.length) {
return this._handleFileTreeEntries(entries);
}
files = $.makeArray(fileInput.prop('files'));
if (!files.length) {
value = fileInput.prop('value');
if (!value) {
return $.Deferred().resolve([]).promise();
}
// If the files property is not available, the browser does not
// support the File API and we add a pseudo File object with
// the input value as name with path information removed:
files = [{name: value.replace(/^.*\\/, '')}];
} else if (files[0].name === undefined && files[0].fileName) {
// File normalization for Safari 4 and Firefox 3:
$.each(files, function (index, file) {
file.name = file.fileName;
file.size = file.fileSize;
});
}
return $.Deferred().resolve(files).promise();
},
_getFileInputFiles: function (fileInput) {
if (!(fileInput instanceof $) || fileInput.length === 1) {
return this._getSingleFileInputFiles(fileInput);
}
return $.when.apply(
$,
$.map(fileInput, this._getSingleFileInputFiles)
).pipe(function () {
return Array.prototype.concat.apply(
[],
arguments
);
});
},
_onChange: function (e) {
var that = this,
data = {
fileInput: $(e.target),
form: $(e.target.form)
};
this._getFileInputFiles(data.fileInput).always(function (files) {
data.files = files;
if (that.options.replaceFileInput) {
that._replaceFileInput(data.fileInput);
}
if (that._trigger('change', e, data) !== false) {
that._onAdd(e, data);
}
});
},
_onPaste: function (e) {
var items = e.originalEvent && e.originalEvent.clipboardData &&
e.originalEvent.clipboardData.items,
data = {files: []};
if (items && items.length) {
$.each(items, function (index, item) {
var file = item.getAsFile && item.getAsFile();
if (file) {
data.files.push(file);
}
});
if (this._trigger('paste', e, data) === false ||
this._onAdd(e, data) === false) {
return false;
}
}
},
_onDrop: function (e) {
e.dataTransfer = e.originalEvent && e.originalEvent.dataTransfer;
var that = this,
dataTransfer = e.dataTransfer,
data = {};
if (dataTransfer && dataTransfer.files && dataTransfer.files.length) {
e.preventDefault();
this._getDroppedFiles(dataTransfer).always(function (files) {
data.files = files;
if (that._trigger('drop', e, data) !== false) {
that._onAdd(e, data);
}
});
}
},
_onDragOver: function (e) {
e.dataTransfer = e.originalEvent && e.originalEvent.dataTransfer;
var dataTransfer = e.dataTransfer;
if (dataTransfer) {
if (this._trigger('dragover', e) === false) {
return false;
}
if ($.inArray('Files', dataTransfer.types) !== -1) {
dataTransfer.dropEffect = 'copy';
e.preventDefault();
}
}
},
_initEventHandlers: function () {
if (this._isXHRUpload(this.options)) {
this._on(this.options.dropZone, {
dragover: this._onDragOver,
drop: this._onDrop
});
this._on(this.options.pasteZone, {
paste: this._onPaste
});
}
if ($.support.fileInput) {
this._on(this.options.fileInput, {
change: this._onChange
});
}
},
_destroyEventHandlers: function () {
this._off(this.options.dropZone, 'dragover drop');
this._off(this.options.pasteZone, 'paste');
this._off(this.options.fileInput, 'change');
},
_setOption: function (key, value) {
var reinit = $.inArray(key, this._specialOptions) !== -1;
if (reinit) {
this._destroyEventHandlers();
}
this._super(key, value);
if (reinit) {
this._initSpecialOptions();
this._initEventHandlers();
}
},
_initSpecialOptions: function () {
var options = this.options;
if (options.fileInput === undefined) {
options.fileInput = this.element.is('input[type="file"]') ?
this.element : this.element.find('input[type="file"]');
} else if (!(options.fileInput instanceof $)) {
options.fileInput = $(options.fileInput);
}
if (!(options.dropZone instanceof $)) {
options.dropZone = $(options.dropZone);
}
if (!(options.pasteZone instanceof $)) {
options.pasteZone = $(options.pasteZone);
}
},
_getRegExp: function (str) {
var parts = str.split('/'),
modifiers = parts.pop();
parts.shift();
return new RegExp(parts.join('/'), modifiers);
},
_isRegExpOption: function (key, value) {
return key !== 'url' && $.type(value) === 'string' &&
/^\/.*\/[igm]{0,3}$/.test(value);
},
_initDataAttributes: function () {
var that = this,
options = this.options;
// Initialize options set via HTML5 data-attributes:
$.each(
$(this.element[0].cloneNode(false)).data(),
function (key, value) {
if (that._isRegExpOption(key, value)) {
value = that._getRegExp(value);
}
options[key] = value;
}
);
},
_create: function () {
this._initDataAttributes();
this._initSpecialOptions();
this._slots = [];
this._sequence = this._getXHRPromise(true);
this._sending = this._active = 0;
this._initProgressObject(this);
this._initEventHandlers();
},
// This method is exposed to the widget API and allows to query
// the number of active uploads:
active: function () {
return this._active;
},
// This method is exposed to the widget API and allows to query
// the widget upload progress.
// It returns an object with loaded, total and bitrate properties
// for the running uploads:
progress: function () {
return this._progress;
},
// This method is exposed to the widget API and allows adding files
// using the fileupload API. The data parameter accepts an object which
// must have a files property and can contain additional options:
// .fileupload('add', {files: filesList});
add: function (data) {
var that = this;
if (!data || this.options.disabled) {
return;
}
if (data.fileInput && !data.files) {
this._getFileInputFiles(data.fileInput).always(function (files) {
data.files = files;
that._onAdd(null, data);
});
} else {
data.files = $.makeArray(data.files);
this._onAdd(null, data);
}
},
// This method is exposed to the widget API and allows sending files
// using the fileupload API. The data parameter accepts an object which
// must have a files or fileInput property and can contain additional options:
// .fileupload('send', {files: filesList});
// The method returns a Promise object for the file upload call.
send: function (data) {
if (data && !this.options.disabled) {
if (data.fileInput && !data.files) {
var that = this,
dfd = $.Deferred(),
promise = dfd.promise(),
jqXHR,
aborted;
promise.abort = function () {
aborted = true;
if (jqXHR) {
return jqXHR.abort();
}
dfd.reject(null, 'abort', 'abort');
return promise;
};
this._getFileInputFiles(data.fileInput).always(
function (files) {
if (aborted) {
return;
}
data.files = files;
jqXHR = that._onSend(null, data).then(
function (result, textStatus, jqXHR) {
dfd.resolve(result, textStatus, jqXHR);
},
function (jqXHR, textStatus, errorThrown) {
dfd.reject(jqXHR, textStatus, errorThrown);
}
);
}
);
return this._enhancePromise(promise);
}
data.files = $.makeArray(data.files);
if (data.files.length) {
return this._onSend(null, data);
}
}
return this._getXHRPromise(false, data && data.context);
}
});
}));<|fim▁end|>
|
};
},
|
<|file_name|>keys_test.py<|end_file_name|><|fim▁begin|># coding=utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import<|fim▁hole|>
import unittest
import mock
from apache_beam.examples.snippets.transforms.element_wise.keys import *
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
@mock.patch('apache_beam.Pipeline', TestPipeline)
# pylint: disable=line-too-long
@mock.patch('apache_beam.examples.snippets.transforms.element_wise.keys.print', lambda elem: elem)
# pylint: enable=line-too-long
class KeysTest(unittest.TestCase):
def __init__(self, methodName):
super(KeysTest, self).__init__(methodName)
# [START icons]
icons = [
'🍓',
'🥕',
'🍆',
'🍅',
'🥔',
]
# [END icons]
self.icons_test = lambda actual: assert_that(actual, equal_to(icons))
def test_keys(self):
keys(self.icons_test)
if __name__ == '__main__':
unittest.main()<|fim▁end|>
|
from __future__ import print_function
|
<|file_name|>Pattern.ts<|end_file_name|><|fim▁begin|>import { Loop, LoopOptions } from "./Loop";
import { PatternGenerator, PatternName } from "./PatternGenerator";
import { ToneEventCallback } from "./ToneEvent";
import { optionsFromArguments } from "../core/util/Defaults";
import { Seconds } from "../core/type/Units";
import { noOp } from "../core/util/Interface";
export interface PatternOptions<ValueType> extends LoopOptions {
pattern: PatternName;
values: ValueType[];
callback: (time: Seconds, value?: ValueType) => void;
}
/**
* Pattern arpeggiates between the given notes
* in a number of patterns.
* @example
* const pattern = new Tone.Pattern((time, note) => {
* // the order of the notes passed in depends on the pattern
* }, ["C2", "D4", "E5", "A6"], "upDown");
* @category Event
*/
export class Pattern<ValueType> extends Loop<PatternOptions<ValueType>> {
readonly name: string = "Pattern";
/**
* The pattern generator function
*/
private _pattern: Iterator<ValueType>;
/**<|fim▁hole|>
/**
* Hold the pattern type
*/
private _type: PatternName;
/**
* Hold the values
*/
private _values: ValueType[];
/**
* The callback to be invoked at a regular interval
*/
callback: (time: Seconds, value?: ValueType) => void;
/**
* @param callback The callback to invoke with the event.
* @param values The values to arpeggiate over.
* @param pattern The name of the pattern
*/
constructor(
callback?: ToneEventCallback<ValueType>,
values?: ValueType[],
pattern?: PatternName,
);
constructor(options?: Partial<PatternOptions<ValueType>>);
constructor() {
super(optionsFromArguments(Pattern.getDefaults(), arguments, ["callback", "values", "pattern"]));
const options = optionsFromArguments(Pattern.getDefaults(), arguments, ["callback", "values", "pattern"]);
this.callback = options.callback;
this._values = options.values;
this._pattern = PatternGenerator(options.values, options.pattern);
this._type = options.pattern;
}
static getDefaults(): PatternOptions<any> {
return Object.assign(Loop.getDefaults(), {
pattern: "up" as "up",
values: [],
callback: noOp,
});
}
/**
* Internal function called when the notes should be called
*/
protected _tick(time: Seconds): void {
const value = this._pattern.next() as IteratorResult<ValueType>;
this._value = value.value;
this.callback(time, this._value);
}
/**
* The array of events.
*/
get values(): ValueType[] {
return this._values;
}
set values(val) {
this._values = val;
// reset the pattern
this.pattern = this._type;
}
/**
* The current value of the pattern.
*/
get value(): ValueType | undefined {
return this._value;
}
/**
* The pattern type. See Tone.CtrlPattern for the full list of patterns.
*/
get pattern(): PatternName {
return this._type;
}
set pattern(pattern) {
this._type = pattern;
this._pattern = PatternGenerator(this._values, this._type);
}
}<|fim▁end|>
|
* The current value
*/
private _value?: ValueType;
|
<|file_name|>SqlViewView.java<|end_file_name|><|fim▁begin|>/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,<|fim▁hole|> * limitations under the License.
*/
package org.apache.ignite.spi.systemview.view;
import org.apache.ignite.internal.managers.systemview.walker.Order;
import org.apache.ignite.internal.processors.query.QueryUtils;
import org.apache.ignite.internal.processors.query.h2.sys.view.SqlSystemView;
/**
* Sql view representation for a {@link SystemView}.
*/
public class SqlViewView {
/** Sql system view. */
private final SqlSystemView view;
/** @param view Sql system view. */
public SqlViewView(SqlSystemView view) {
this.view = view;
}
/** View name. */
@Order
public String name() {
return view.getTableName();
}
/** View description. */
@Order(2)
public String description() {
return view.getDescription();
}
/** View schema. */
@Order(1)
public String schema() {
return QueryUtils.SCHEMA_SYS;
}
}<|fim▁end|>
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
|
<|file_name|>separator.py<|end_file_name|><|fim▁begin|>from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import wx
from .common import update_class
class Separator(wx.StaticLine):
def __init__(self, parent):
<|fim▁hole|> wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL)
update_class(Separator)<|fim▁end|>
|
wx.StaticLine.__init__(self, parent.get_container(), -1,
|
<|file_name|>simpleStreaming.js<|end_file_name|><|fim▁begin|>//=====================================================================
// This sample demonstrates using TeslaJS
//
// https://github.com/mseminatore/TeslaJS
//
// Copyright (c) 2016 Mark Seminatore
//
// Refer to included LICENSE file for usage rights and restrictions
//=====================================================================
"use strict";
require('colors');
var program = require('commander');
var framework = require('./sampleFramework.js');
//
//
//
program
.usage('[options]')<|fim▁hole|> .option('-U, --uri [string]', 'URI of test server (e.g. http://127.0.0.1:3000)')
.parse(process.argv);
//
var sample = new framework.SampleFramework(program, sampleMain);
sample.run();
//
//
//
function sampleMain(tjs, options) {
var streamingOptions = {
vehicle_id: options.vehicle_id,
authToken: options.authToken
};
console.log("\nNote: " + "Inactive vehicle streaming responses can take up to several minutes.".green);
console.log("\nStreaming starting...".cyan);
console.log("Columns: timestamp," + tjs.streamingColumns.toString());
tjs.startStreaming(streamingOptions, function (error, response, body) {
if (error) {
console.log(error);
return;
}
// display the streaming results
console.log(body);
console.log("...Streaming ended.".cyan);
});
}<|fim▁end|>
|
.option('-i, --index <n>', 'vehicle index (first car by default)', parseInt)
|
<|file_name|>procurement.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
class ProcurementOrder(models.Model):
_inherit = 'procurement.order'
task_id = fields.Many2one('project.task', 'Task', copy=False)
def _is_procurement_task(self):
return self.product_id.type == 'service' and self.product_id.track_service == 'task'
@api.multi
def _assign(self):
self.ensure_one()
res = super(ProcurementOrder, self)._assign()
if not res:
# if there isn't any specific procurement.rule defined for the product, we may want to create a task
return self._is_procurement_task()
return res
@api.multi
def _run(self):
self.ensure_one()
if self._is_procurement_task() and not self.task_id:
# If the SO was confirmed, cancelled, set to draft then confirmed, avoid creating a new
# task.
if self.sale_line_id:
existing_task = self.env['project.task'].search(
[('sale_line_id', '=', self.sale_line_id.id)]
)
if existing_task:
return existing_task
# create a task for the procurement
return self._create_service_task()
return super(ProcurementOrder, self)._run()
def _convert_qty_company_hours(self):
company_time_uom_id = self.env.user.company_id.project_time_mode_id
if self.product_uom.id != company_time_uom_id.id and self.product_uom.category_id.id == company_time_uom_id.category_id.id:
planned_hours = self.product_uom._compute_quantity(self.product_qty, company_time_uom_id)
else:
planned_hours = self.product_qty
return planned_hours
def _get_project(self):
Project = self.env['project.project']
project = self.product_id.with_context(force_company=self.company_id.id).project_id
if not project and self.sale_line_id:<|fim▁hole|> self.sale_line_id.order_id._create_analytic_account()
account = self.sale_line_id.order_id.project_id
project = Project.search([('analytic_account_id', '=', account.id)], limit=1)
if not project:
project_id = account.project_create({'name': account.name, 'use_tasks': True})
project = Project.browse(project_id)
return project
def _create_service_task(self):
project = self._get_project()
planned_hours = self._convert_qty_company_hours()
task = self.env['project.task'].create({
'name': '%s:%s' % (self.origin or '', self.product_id.name),
'date_deadline': self.date_planned,
'planned_hours': planned_hours,
'remaining_hours': planned_hours,
'partner_id': self.sale_line_id.order_id.partner_id.id or self.partner_dest_id.id,
'user_id': self.env.uid,
'procurement_id': self.id,
'description': self.name + '\n',
'project_id': project.id,
'company_id': self.company_id.id,
})
self.write({'task_id': task.id})
msg_body = _("Task Created (%s): <a href=# data-oe-model=project.task data-oe-id=%d>%s</a>") % (self.product_id.name, task.id, task.name)
self.message_post(body=msg_body)
if self.sale_line_id.order_id:
self.sale_line_id.order_id.message_post(body=msg_body)
task_msg = _("This task has been created from: <a href=# data-oe-model=sale.order data-oe-id=%d>%s</a> (%s)") % (self.sale_line_id.order_id.id, self.sale_line_id.order_id.name, self.product_id.name)
task.message_post(body=task_msg)
return task<|fim▁end|>
|
# find the project corresponding to the analytic account of the sales order
account = self.sale_line_id.order_id.project_id
if not account:
|
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>use std::env::args;
use std::collections::HashMap;
trait Validator {
fn increment(&mut self) -> bool;
fn has_sequence(&self) -> bool;
fn no_forbidden_chars(&self) -> bool;
fn has_two_doubles(&self) -> bool;
}
impl Validator for Vec<u8> {
fn increment(&mut self) -> bool {
*(self.last_mut().unwrap()) += 1;
let mut carry: u8 = 0;
for pos in (0..self.len()).rev() {
if carry > 0 {
self[pos] += 1;
carry = 0;
}
if self[pos] >= 26 {
carry = self[pos] / 26;
self[pos] = 0;
}
}
carry != 0
}
fn has_sequence(&self) -> bool {
for win in self.windows(3) {
if win[0] + 2 == win[1] + 1 && win[1] + 1 == win[2] {
return true;
}
}
false
}
fn no_forbidden_chars(&self) -> bool {
let i = ('i' as u8) - ('a' as u8);
let o = ('o' as u8) - ('a' as u8);<|fim▁hole|>
fn has_two_doubles(&self) -> bool {
let mut double_count = 0;
let mut pos = 0;
while pos < (self.len() - 1) {
if self[pos] == self[pos + 1] {
double_count += 1;
pos += 1;
if double_count >= 2 {
return true;
}
}
pos += 1;
}
false
}
}
fn main() {
let mut a = args();
a.next(); // The first argument is the binary name/path
let start = a.next().unwrap(); // The puzzle input
let mut char_to_num = HashMap::new();
let mut num_to_char = HashMap::new();
for i in 0..26 {
let ch = (('a' as u8) + i) as char;
char_to_num.insert(ch, i);
num_to_char.insert(i, ch);
}
let mut passwd_vec = start.chars().map(|ch| char_to_num[&ch]).collect::<Vec<u8>>();
loop {
if passwd_vec.increment() {
panic!("All password combinations exhausted and no password found.");
}
if !passwd_vec.has_sequence() {
continue;
}
if !passwd_vec.no_forbidden_chars() {
continue;
}
if !passwd_vec.has_two_doubles() {
continue;
}
break;
}
let readable_passwd = passwd_vec.iter().map(|ch_num| num_to_char[ch_num]).collect::<String>();
println!("The next password is: {:?}", passwd_vec);
println!("Readable password: {:?}", readable_passwd);
}<|fim▁end|>
|
let l = ('l' as u8) - ('a' as u8);
!(self.contains(&i) || self.contains(&o) || self.contains(&l))
}
|
<|file_name|>BalancedBinaryTree.py<|end_file_name|><|fim▁begin|>__source__ = 'https://leetcode.com/problems/balanced-binary-tree/#/description'
# https://github.com/kamyu104/LeetCode/blob/master/Python/balanced-binary-tree.py
# Time: O(n)
# Space: O(h), h is height of binary tree
# divide and conquer
#
# Description: Leetcode # 110. Balanced Binary Tree
#
# Given a binary tree, determine if it is height-balanced.
#
# For this problem, a height-balanced binary tree is defined as a binary tree
# in which the depth of the two subtrees of every node never differ by more than 1.
#
# Companies
# Bloomberg
# Related Topics
# Tree Depth-first Search
# Similar Questions
# Maximum Depth of Binary Tree
#
import unittest
# Definition for a binary tree node
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param root, a tree node
# @return a boolean
def isBalanced(self, root):
return (self.getHeight(root) >= 0)
def getHeight(self, root):
if root is None:
return 0
left_height = self.getHeight(root.left)
right_height = self.getHeight(root.right)
if left_height < 0 or right_height < 0 or abs(left_height - right_height) > 1:
return -1
return max(left_height, right_height) + 1
#http://www.programcreek.com/2013/02/leetcode-balanced-binary-tree-java/
class javaSolution:
# @param root, a tree node
# @return a boolean
def isBalanced(self, root):
if not root:
return None
if self.getHeight(root) == -1:
return False
return True
def getHeight(self, root):
if not root:
return 0
left = self.getHeight(root.left)
right = self.getHeight(root.right)
if left == -1 or right == -1:
return -1
if abs(left - right) > 1:
return -1
return max(left, right) + 1
class SolutionOther:
# @param root, a tree node
# @return a boolean
# http://www.cnblogs.com/zuoyuan/p/3720169.html
def isBalanced(self, root):
if root == None:
return True
if abs(self.Height(root.left) - self.Height(root.right)) <= 1:
return self.isBalanced(root.left) and self.isBalanced(root.right)
else:
return False
def Height(self, root) :
if root == None:
return 0
return max(self.Height(root.left), self.Height(root.right)) +1
#############test
#creating BST tree ####
root0=TreeNode(0)
tree1=TreeNode(1)
tree2=TreeNode(2)
tree3=TreeNode(3)
tree4=TreeNode(4)
tree5=TreeNode(5)
tree6=TreeNode(6)
root0.left=tree1
#root0.right=tree2
tree1.left=tree3
tree1.right=tree4
tree2.left=tree5
#tree2.right=tree6
#end of creating BST tree ####
#test
test = SolutionOther()
print test.isBalanced(root0)
#print test.isBalanced3(root0)
#print test.isBalanced2(root0)
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
root = TreeNode(0)
root.left = TreeNode(1)
result = Solution().isBalanced(root)
print result
root.left.left = TreeNode(2)
result = javaSolution().isBalanced(root)
print result
if __name__ == '__main__':
unittest.main()
Java = '''
#Thought: https://leetcode.com/problems/contains-duplicate/solution/
Thought: This problem is generally believed to have two solutions:
the top down approach and the bottom up way.<|fim▁hole|>DFS 1) The first method checks whether the tree is balanced strictly according to the definition
of balanced binary tree: the difference between the heights of the two sub trees are not bigger than 1,
and both the left sub tree and right sub tree are also balanced. With the helper function depth(),
we could easily write the code;
For the current node root, calling depth() for its left and right children actually has to access all of its children,
thus the complexity is O(N). We do this for each node in the tree,
so the overall complexity of isBalanced will be O(N^2). This is a top down approach.
DFS 2)The second method is based on DFS. Instead of calling depth() explicitly for each child node,
we return the height of the current node in DFS recursion.
When the sub tree of the current node (inclusive) is balanced, the function dfsHeight()
returns a non-negative value as the height.
Otherwise -1 is returned. According to the leftHeight and rightHeight of the two children,
the parent node could check if the sub tree is balanced, and decides its return value.
# DFS
# 87.89% 1ms
class Solution {
public boolean isBalanced(TreeNode root) {
return dfsHeight(root) != -1;
}
public int dfsHeight(TreeNode root) {
if (root == null) return 0;
int left = dfsHeight(root.left);
int right = dfsHeight(root.right);
if (left == -1 || right == -1 || Math.abs(left - right) > 1) return -1;
return Math.max(left, right) + 1;
}
}
# DFS
# 87.89% 1ms
class Solution {
public boolean isBalanced(TreeNode root) {
if (root == null) return true;
int left = getDpeth(root.left);
int right = getDpeth(root.right);
return Math.abs(left - right) <= 1 && isBalanced(root.left) && isBalanced(root.right);
}
public int getDpeth(TreeNode root) {
if (root == null) return 0;
return Math.max(getDpeth(root.left), getDpeth(root.right)) + 1;
}
}
'''<|fim▁end|>
| |
<|file_name|>neutron_plugin_base_v2.py<|end_file_name|><|fim▁begin|># Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
v2 Neutron Plug-in API specification.
:class:`NeutronPluginBaseV2` provides the definition of minimum set of
methods that needs to be implemented by a v2 Neutron Plug-in.
"""
import abc
import six
<|fim▁hole|>@six.add_metaclass(abc.ABCMeta)
class NeutronPluginBaseV2(object):
@abc.abstractmethod
def create_subnet(self, context, subnet):
"""Create a subnet.
Create a subnet, which represents a range of IP addresses
that can be allocated to devices
:param context: neutron api request context
:param subnet: dictionary describing the subnet, with keys
as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object
in :file:`neutron/api/v2/attributes.py`. All keys will
be populated.
"""
pass
@abc.abstractmethod
def update_subnet(self, context, id, subnet):
"""Update values of a subnet.
:param context: neutron api request context
:param id: UUID representing the subnet to update.
:param subnet: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for
'allow_put' as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`.
"""
pass
@abc.abstractmethod
def get_subnet(self, context, id, fields=None):
"""Retrieve a subnet.
:param context: neutron api request context
:param id: UUID representing the subnet to fetch.
:param fields: a list of strings that are valid keys in a
subnet dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
@abc.abstractmethod
def get_subnets(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
"""Retrieve a list of subnets.
The contents of the list depends on
the identity of the user making the request (as indicated by the
context) as well as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a subnet as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP`
object in :file:`neutron/api/v2/attributes.py`.
Values in this dictiontary are an iterable containing
values that will be used for an exact match comparison
for that value. Each result returned by this
function will have matched one of the values for each
key in filters.
:param fields: a list of strings that are valid keys in a
subnet dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
def get_subnets_count(self, context, filters=None):
"""Return the number of subnets.
The result depends on the identity of
the user making the request (as indicated by the context) as well as
any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a network as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Values in this
dictiontary are an iterable containing values that
will be used for an exact match comparison for that
value. Each result returned by this function will
have matched one of the values for each key in filters.
.. note:: this method is optional, as it was not part of the originally
defined plugin API.
"""
raise NotImplementedError
@abc.abstractmethod
def delete_subnet(self, context, id):
"""Delete a subnet.
:param context: neutron api request context
:param id: UUID representing the subnet to delete.
"""
pass
@abc.abstractmethod
def create_network(self, context, network):
"""Create a network.
Create a network, which represents an L2 network segment which
can have a set of subnets and ports associated with it.
:param context: neutron api request context
:param network: dictionary describing the network, with keys
as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object
in :file:`neutron/api/v2/attributes.py`. All keys will
be populated.
"""
pass
@abc.abstractmethod
def update_network(self, context, id, network):
"""Update values of a network.
:param context: neutron api request context
:param id: UUID representing the network to update.
:param network: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for
'allow_put' as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`.
"""
pass
@abc.abstractmethod
def get_network(self, context, id, fields=None):
"""Retrieve a network.
:param context: neutron api request context
:param id: UUID representing the network to fetch.
:param fields: a list of strings that are valid keys in a
network dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
@abc.abstractmethod
def get_networks(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
"""Retrieve a list of networks.
The contents of the list depends on
the identity of the user making the request (as indicated by the
context) as well as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a network as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Values in this
dictiontary are an iterable containing values that will
be used for an exact match comparison for that value.
Each result returned by this function will have matched
one of the values for each key in filters.
:param fields: a list of strings that are valid keys in a
network dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
def get_networks_count(self, context, filters=None):
"""Return the number of networks.
The result depends on the identity
of the user making the request (as indicated by the context) as well
as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a network as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object
in :file:`neutron/api/v2/attributes.py`. Values in
this dictiontary are an iterable containing values that
will be used for an exact match comparison for that
value. Each result returned by this function will have
matched one of the values for each key in filters.
NOTE: this method is optional, as it was not part of the originally
defined plugin API.
"""
raise NotImplementedError
@abc.abstractmethod
def delete_network(self, context, id):
"""Delete a network.
:param context: neutron api request context
:param id: UUID representing the network to delete.
"""
pass
@abc.abstractmethod
def create_port(self, context, port):
"""Create a port.
Create a port, which is a connection point of a device (e.g., a VM
NIC) to attach to a L2 neutron network.
:param context: neutron api request context
:param port: dictionary describing the port, with keys as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. All keys will be
populated.
"""
pass
@abc.abstractmethod
def update_port(self, context, id, port):
"""Update values of a port.
:param context: neutron api request context
:param id: UUID representing the port to update.
:param port: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for
'allow_put' as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP`
object in :file:`neutron/api/v2/attributes.py`.
"""
pass
@abc.abstractmethod
def get_port(self, context, id, fields=None):
"""Retrieve a port.
:param context: neutron api request context
:param id: UUID representing the port to fetch.
:param fields: a list of strings that are valid keys in a port
dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
@abc.abstractmethod
def get_ports(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
"""Retrieve a list of ports.
The contents of the list depends on the identity of the user making
the request (as indicated by the context) as well as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a port as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP`
object in :file:`neutron/api/v2/attributes.py`. Values
in this dictiontary are an iterable containing values
that will be used for an exact match comparison for
that value. Each result returned by this function will
have matched one of the values for each key in filters.
:param fields: a list of strings that are valid keys in a
port dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
def get_ports_count(self, context, filters=None):
"""Return the number of ports.
The result depends on the identity of the user making the request
(as indicated by the context) as well as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a network as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Values in this
dictiontary are an iterable containing values that will
be used for an exact match comparison for that value.
Each result returned by this function will have matched
one of the values for each key in filters.
.. note:: this method is optional, as it was not part of the originally
defined plugin API.
"""
raise NotImplementedError
@abc.abstractmethod
def delete_port(self, context, id):
"""Delete a port.
:param context: neutron api request context
:param id: UUID representing the port to delete.
"""
pass
def start_rpc_listeners(self):
"""Start the RPC listeners.
Most plugins start RPC listeners implicitly on initialization. In
order to support multiple process RPC, the plugin needs to expose
control over when this is started.
.. note:: this method is optional, as it was not part of the originally
defined plugin API.
"""
raise NotImplementedError
def rpc_workers_supported(self):
"""Return whether the plugin supports multiple RPC workers.
A plugin that supports multiple RPC workers should override the
start_rpc_listeners method to ensure that this method returns True and
that start_rpc_listeners is called at the appropriate time.
Alternately, a plugin can override this method to customize detection
of support for multiple rpc workers
.. note:: this method is optional, as it was not part of the originally
defined plugin API.
"""
return (self.__class__.start_rpc_listeners !=
NeutronPluginBaseV2.start_rpc_listeners)<|fim▁end|>
| |
<|file_name|>test.js<|end_file_name|><|fim▁begin|>var should=require('should');
var toast=('../toast');
describe('test/toast.js', function () {
it('toast', function () {<|fim▁hole|><|fim▁end|>
|
toast.should.equal(toast);
});
});
|
<|file_name|>TypeFilterConfigTests.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2008-2011 the original author or authors.
*<|fim▁hole|> * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.data.jpa.repository.config;
import static org.junit.Assert.*;
import org.springframework.test.context.ContextConfiguration;
/**
* Integration test to test {@link org.springframework.core.type.filter.TypeFilter} integration into namespace.
*
* @author Oliver Gierke
*/
@ContextConfiguration(locations = "classpath:config/namespace-autoconfig-typefilter-context.xml")
public class TypeFilterConfigTests extends AbstractRepositoryConfigTests {
/*
* (non-Javadoc)
*
* @see
* org.springframework.data.jpa.repository.config.AbstractRepositoryConfigTests
* #testContextCreation()
*/
@Override
public void testContextCreation() {
assertNotNull(userRepository);
assertNotNull(roleRepository);
assertNull(auditableUserRepository);
}
}<|fim▁end|>
| |
<|file_name|>lang_utils.ts<|end_file_name|><|fim▁begin|>/**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/<|fim▁hole|>
export function instantiateType(type: Function, params: any[] = []) {
return new (<any>type)(...params);
}<|fim▁end|>
|
export function getTypeOf(instance: any /** TODO #9100 */) {
return instance.constructor;
}
|
<|file_name|>Multibound1.java<|end_file_name|><|fim▁begin|>/*
* @test /nodynamiccopyright/
* @bug 4482403
* @summary javac failed to check second bound
* @author gafter
*
* @compile/fail/ref=Multibound1.out -XDrawDiagnostics Multibound1.java
*/
package Multibound1;
interface A {}
interface B {}
class C<T extends A&B> {}
class D implements A {}<|fim▁hole|><|fim▁end|>
|
class E extends C<D> {}
|
<|file_name|>styling_title.py<|end_file_name|><|fim▁begin|>from bokeh.plotting import figure, output_file, show
output_file("title.html")<|fim▁hole|>
p = figure(width=400, height=400, title="Some Title")
p.title.text_color = "olive"
p.title.text_font = "times"
p.title.text_font_style = "italic"
p.circle([1, 2, 3, 4, 5], [2, 5, 8, 2, 7], size=10)
show(p)<|fim▁end|>
| |
<|file_name|>filter-inline-item.component.ts<|end_file_name|><|fim▁begin|><|fim▁hole|> Component
} from '@angular/core';
@Component({
selector: 'sky-filter-inline-item',
styleUrls: ['./filter-inline-item.component.scss'],
templateUrl: './filter-inline-item.component.html'
})
export class SkyFilterInlineItemComponent {
}<|fim▁end|>
|
import {
|
<|file_name|>coverage.py<|end_file_name|><|fim▁begin|># This file is part of the MapProxy project.
# Copyright (C) 2010 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import operator
import threading
from mapproxy.grid import bbox_intersects, bbox_contains
from mapproxy.util.py import cached_property
from mapproxy.util.geom import (
require_geom_support,
load_polygon_lines,
transform_geometry,
bbox_polygon,
)
from mapproxy.srs import SRS
import logging
log_config = logging.getLogger('mapproxy.config.coverage')
try:
import shapely.geometry
import shapely.prepared<|fim▁hole|> pass
def coverage(geom, srs):
if isinstance(geom, (list, tuple)):
return BBOXCoverage(geom, srs)
else:
return GeomCoverage(geom, srs)
def load_limited_to(limited_to):
require_geom_support()
srs = SRS(limited_to['srs'])
geom = limited_to['geometry']
if not hasattr(geom, 'type'): # not a Shapely geometry
if isinstance(geom, (list, tuple)):
geom = bbox_polygon(geom)
else:
polygons = load_polygon_lines(geom.split('\n'))
if len(polygons) == 1:
geom = polygons[0]
else:
geom = shapely.geometry.MultiPolygon(polygons)
return GeomCoverage(geom, srs, clip=True)
class MultiCoverage(object):
clip = False
"""Aggregates multiple coverages"""
def __init__(self, coverages):
self.coverages = coverages
self.bbox = self.extent.bbox
@cached_property
def extent(self):
return reduce(operator.add, [c.extent for c in self.coverages])
def intersects(self, bbox, srs):
return any(c.intersects(bbox, srs) for c in self.coverages)
def contains(self, bbox, srs):
return any(c.contains(bbox, srs) for c in self.coverages)
def transform_to(self, srs):
return MultiCoverage([c.transform_to(srs) for c in self.coverages])
def __eq__(self, other):
if not isinstance(other, MultiCoverage):
return NotImplemented
if self.bbox != other.bbox:
return False
if len(self.coverages) != len(other.coverages):
return False
for a, b in zip(self.coverages, other.coverages):
if a != b:
return False
return True
def __ne__(self, other):
if not isinstance(other, MultiCoverage):
return NotImplemented
return not self.__eq__(other)
def __repr__(self):
return '<MultiCoverage %r: %r>' % (self.extent.llbbox, self.coverages)
class BBOXCoverage(object):
clip = False
def __init__(self, bbox, srs):
self.bbox = bbox
self.srs = srs
self.geom = None
@property
def extent(self):
from mapproxy.layer import MapExtent
return MapExtent(self.bbox, self.srs)
def _bbox_in_coverage_srs(self, bbox, srs):
if srs != self.srs:
bbox = srs.transform_bbox_to(self.srs, bbox)
return bbox
def intersects(self, bbox, srs):
bbox = self._bbox_in_coverage_srs(bbox, srs)
return bbox_intersects(self.bbox, bbox)
def intersection(self, bbox, srs):
bbox = self._bbox_in_coverage_srs(bbox, srs)
intersection = (
max(self.bbox[0], bbox[0]),
max(self.bbox[1], bbox[1]),
min(self.bbox[2], bbox[2]),
min(self.bbox[3], bbox[3]),
)
if intersection[0] >= intersection[2] or intersection[1] >= intersection[3]:
return None
return BBOXCoverage(intersection, self.srs)
def contains(self, bbox, srs):
bbox = self._bbox_in_coverage_srs(bbox, srs)
return bbox_contains(self.bbox, bbox)
def transform_to(self, srs):
if srs == self.srs:
return self
bbox = self.srs.transform_bbox_to(srs, self.bbox)
return BBOXCoverage(bbox, srs)
def __eq__(self, other):
if not isinstance(other, BBOXCoverage):
return NotImplemented
if self.srs != other.srs:
return False
if self.bbox != other.bbox:
return False
return True
def __ne__(self, other):
if not isinstance(other, BBOXCoverage):
return NotImplemented
return not self.__eq__(other)
def __repr__(self):
return '<BBOXCoverage %r/%r>' % (self.extent.llbbox, self.bbox)
class GeomCoverage(object):
def __init__(self, geom, srs, clip=False):
self.geom = geom
self.bbox = geom.bounds
self.srs = srs
self.clip = clip
self._prep_lock = threading.Lock()
self._prepared_geom = None
self._prepared_counter = 0
self._prepared_max = 10000
@property
def extent(self):
from mapproxy.layer import MapExtent
return MapExtent(self.bbox, self.srs)
@property
def prepared_geom(self):
# GEOS internal data structure for prepared geometries grows over time,
# recreate to limit memory consumption
if not self._prepared_geom or self._prepared_counter > self._prepared_max:
self._prepared_geom = shapely.prepared.prep(self.geom)
self._prepared_counter = 0
self._prepared_counter += 1
return self._prepared_geom
def _geom_in_coverage_srs(self, geom, srs):
if isinstance(geom, shapely.geometry.base.BaseGeometry):
if srs != self.srs:
geom = transform_geometry(srs, self.srs, geom)
elif len(geom) == 2:
if srs != self.srs:
geom = srs.transform_to(self.srs, geom)
geom = shapely.geometry.Point(geom)
else:
if srs != self.srs:
geom = srs.transform_bbox_to(self.srs, geom)
geom = bbox_polygon(geom)
return geom
def transform_to(self, srs):
if srs == self.srs:
return self
geom = transform_geometry(self.srs, srs, self.geom)
return GeomCoverage(geom, srs)
def intersects(self, bbox, srs):
bbox = self._geom_in_coverage_srs(bbox, srs)
with self._prep_lock:
return self.prepared_geom.intersects(bbox)
def intersection(self, bbox, srs):
bbox = self._geom_in_coverage_srs(bbox, srs)
return GeomCoverage(self.geom.intersection(bbox), self.srs)
def contains(self, bbox, srs):
bbox = self._geom_in_coverage_srs(bbox, srs)
with self._prep_lock:
return self.prepared_geom.contains(bbox)
def __eq__(self, other):
if not isinstance(other, GeomCoverage):
return NotImplemented
if self.srs != other.srs:
return False
if self.bbox != other.bbox:
return False
if not self.geom.equals(other.geom):
return False
return True
def __ne__(self, other):
if not isinstance(other, GeomCoverage):
return NotImplemented
return not self.__eq__(other)
def __repr__(self):
return '<GeomCoverage %r: %r>' % (self.extent.llbbox, self.geom)<|fim▁end|>
|
except ImportError:
# missing Shapely is handled by require_geom_support
|
<|file_name|>eezyReport.py<|end_file_name|><|fim▁begin|>from lxml import etree
import os
from BeautifulSoup import BeautifulSoup
from itertools import chain
def replacements(text):
text = text.replace('>', '\\textgreater ')
text = text.replace('<', '\\textless ')
text = text.replace('&', '\&')
text = text.replace('_', '\_')
text = text.replace('%', '\%')
text = text.replace('[', '\lbrack')
text = text.replace(']', '\\rbrack')
return text
def fillContent(tex, srchStr, insStr):
insStr = replacements(insStr)
insIndex = tex.index(srchStr)
tex = tex[:insIndex+len(srchStr)] + insStr + tex[insIndex+len(srchStr):]
return tex
def convertToTex(text, figInTabular=False):
text = replacements(text)
soup = BeautifulSoup(text)
contents = soup.contents[0].contents
retTxt = ''
for content in contents:
if str(type(content)) == "<class 'BeautifulSoup.NavigableString'>":
content = content.replace('\\newline', '~\\\\')
content = content.replace('\\newpara', '~\\\\\\\\')
content = content.replace('\\backslash', '\\textbackslash')
content = content.replace('|', '\\textbar ')
retTxt += content
elif str(type(content)) == "<class 'BeautifulSoup.Tag'>":
if content.name == 'b':
retTxt += '\\textbf{' + convertToTex(str(content)) + '}'
elif content.name == 'u':
retTxt += '\underline{' + convertToTex(str(content)) + '}'
elif content.name == 'i':
retTxt += '\\textit{' + convertToTex(str(content)) + '}'
elif content.name == 'ul':
retTxt += '\n\\begin{itemize}'
for item in content.contents:
if str(type(item)) == "<class 'BeautifulSoup.Tag'>":
retTxt += '\n \item ' + convertToTex(str(item))
retTxt += '\n\end{itemize}\n'
elif content.name == 'chapter':
attrs = dict(content.attrs)
if not attrs.has_key('name'):
print "One of the chapters do not have a 'name' attribute or is misspelled. Please correct it and re-run."
exit(0)
elif attrs['name'] == '':
print "One of the chapters' name is empty. Please correct it and re-run."
exit(0)
else:
retTxt += '\\begin{projChapter}{' + attrs['name'] + '}' + convertToTex(str(content)) + '\\end{projChapter}'
elif content.name == 'section':
attrs = dict(content.attrs)
if not attrs.has_key('name'):
print "One of the sections do not have a 'name' attribute or is misspelled. Please correct it and re-run."
exit(0)
elif attrs['name'] == '':
print "One of the sections' name is empty. Please correct it and re-run."
exit(0)
else:
retTxt += '\\begin{projSection}{' + attrs['name'] + '}' + convertToTex(str(content)) + '\\end{projSection}'
elif content.name == 'subsection':
attrs = dict(content.attrs)
if not attrs.has_key('name'):
print "One of the subsections do not have a 'name' attribute or is misspelled. Please correct it and re-run."
exit(0)
elif attrs['name'] == '':
print "One of the subsections' name is empty. Please correct it and re-run."
exit(0)
else:
retTxt += '\\begin{projSubSection}{' + attrs['name'] + '}' + convertToTex(str(content)) + '\\end{projSubSection}'
elif content.name == 'img':
props = dict(content.attrs)
if not props.has_key('id'):
print "One of the images do not have an 'id' attribute or is misspelled. Please correct it and re-run."
exit(0)
elif not props.has_key('src'):
print "One of the images do not have a 'src' attribute or is misspelled. Please correct it and re-run."
exit(0)
elif not props.has_key('caption'):
print "One of the images do not have a 'caption' attribute or is misspelled. Please correct it and re-run."
exit(0)
elif not props.has_key('scale'):
print "One of the images do not have a 'scale' attribute or is misspelled. Please correct it and re-run."
exit(0)
elif props['id'] == '':
print "One of the images has an empty 'id'. Please correct it and re-run."
exit(0)
elif props['src'] == '':
print "One of the images has an empty 'src'. Please correct it and re-run."
exit(0)
elif props['scale'] == '':
print "Scaling factor for one of the images hasnt been defined. Please correct it and re-run."
exit(0)
else:
if figInTabular:
retTxt += '\\raisebox{-\\totalheight}{\centering\n\includegraphics[scale=' + props['scale'] + ']{' + props['src'] + '}\n\label{' + props['id'] + '}}\n'
else:
retTxt += '\\begin{figure}[ht!]\n\centering\n\includegraphics[scale=' + props['scale'] + ']{' + props['src'] + '}\n\caption{' + props['caption'] + '}\n\label{' + props['id'] + '}\n\end{figure}\n'
elif content.name == 'ref':
props = dict(content.attrs)
if not props.has_key('type'):
print "One of the references doesnt have a 'type' attribute. Please correct it and re-run."
exit(0)
elif props['type'] == '':
print "One of the references has an empty string for 'type'. Please correct it and re-run."
exit(0)
else:
if props['type'] == 'figure':
retTxt += 'Figure \\ref{' + content.text + '}'
elif props['type'] == 'table':<|fim▁hole|> props = dict(content.attrs)
if not props.has_key('id'):
print "One of the tables do not have an 'id' attribute or is misspelled. Please correct it and re-run."
exit(0)
elif not props.has_key('alignments'):
print "One of the tables do not have a 'alignments' attribute or is misspelled. Please correct it and re-run."
exit(0)
elif not props.has_key('caption'):
print "One of the tables do not have a 'caption' attribute or is misspelled. Please correct it and re-run."
exit(0)
elif props['id'] == '':
print "One of the tables has an empty 'id'. Please correct it and re-run."
exit(0)
elif props['alignments'] == '':
print "One of the tables has an empty 'alignments'. Please correct it and re-run."
exit(0)
else:
alignments = props['alignments']
retTxt += '\\begin{table}[h]\\begin{center}\\begin{tabular}{' + alignments + '}'
for horizontal in content.contents:
if str(type(horizontal)) == "<class 'BeautifulSoup.Tag'>":
if horizontal.name == "tr":
cols = horizontal.contents
numOfCols = len(cols)
for i in range(numOfCols):
if str(type(cols[i])) == "<class 'BeautifulSoup.Tag'>":
retTxt += convertToTex(str(cols[i]), figInTabular=True)
print str(cols[i])
if i != numOfCols - 2:
retTxt += ' & '
else:
retTxt += ' \\\\\n'
elif horizontal.name == 'hline':
retTxt += '\hline\n'
retTxt += '\\end{tabular}\\end{center}\\caption{' + props['caption'] + '}\\label{' + props['id'] + '}\\end{table}'
return retTxt
def main():
f = open("fyp.stmplt", "r")
sty = f.read()
f.close()
f = open("fyp.ttmplt", "r")
tex = f.read()
f.close()
f = open("report.xml", "r")
xmlStr = f.read()
f.close()
root = etree.fromstring(xmlStr)
projectTitle = root.find('projectDetails').find('projectTitle').text
guide = root.find('projectDetails').find('guide').text
principal = root.find('projectDetails').find('principal').text
HOD = root.find('projectDetails').find('HOD').text
durationLong = root.find('projectDetails').find('duration').text
collLogoPath = root.find('projectDetails').find('collLogoPath').text
defaultFontFamily = root.find('font').find('defaultFontFamily').text
fontLevelOne = root.find('font').find('levelOne').text
fontLevelTwo = root.find('font').find('levelTwo').text
fontLevelThree = root.find('font').find('levelThree').text
fontLevelFour = root.find('font').find('levelFour').text
numberStrings = ["One", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine", "Ten"]
students = [ (student.find('name').text, student.find('usn').text) for student in root.find('students').getchildren() if student.tag == 'student']
students = [ (numberStrings[i], students[i][0], students[i][1]) for i in range(len(students))]
headerLogoScale = root.find('header').find('logoScale').text
headerTitleSize = root.find('header').find('titleSize').text
headerLineWidth = root.find('header').find('lineWidth').text
dept = root.find('footer').find('dept').text
durationShort = root.find('footer').find('duration').text
footerLineWidth = root.find('footer').find('lineWidth').text
chapterFontFamily = root.find('chapterControls').find('fontFamily').text
coverFontFamily = root.find('cover').find('fontFamily').text
univName = root.find('cover').find('univName').text
univLogoPath = root.find('cover').find('univLogoPath').text
univLogoScale = root.find('cover').find('univLogoScale').text
course = root.find('cover').find('course').text
stream = root.find('cover').find('stream').text
deptName = root.find('cover').find('deptName').text
collName = root.find('cover').find('collName').text
affiliation = root.find('cover').find('affiliation').text
address = root.find('cover').find('address').text
collCoverLogoScale = root.find('cover').find('collCoverLogoScale').text
vspaceInterblock = root.find('cover').find('vspaceInterblock').text
vspaceIntrablock = root.find('cover').find('vspaceIntrablock').text
certificateLogoScale = root.find('certificate').find('logoScale').text
certificateCourse = root.find('certificate').find('course').text
certificateStream = root.find('certificate').find('stream').text
certificateUnivName = root.find('certificate').find('univName').text
abstractFontFamily = root.find('abstractControls').find('fontFamily').text
'''
modifying the tex file
'''
tex = fillContent(tex, 'newcommand{\projectTitle}{', projectTitle)
tex = fillContent(tex, 'newcommand{\guide}{', guide)
tex = fillContent(tex, 'newcommand{\principal}{', principal)
tex = fillContent(tex, 'newcommand{\HOD}{', HOD)
tex = fillContent(tex, 'newcommand{\durationLong}{', durationLong)
tex = fillContent(tex, 'newcommand{\headerLineWidth}{', headerLineWidth)
tex = fillContent(tex, 'newcommand{\\footerLineWidth}{', footerLineWidth)
tex = fillContent(tex, 'newcommand{\collLogoPath}{', collLogoPath)
tex = fillContent(tex, 'newcommand{\defaultFontFamily}{', defaultFontFamily)
tex = fillContent(tex, 'newcommand{\\fontLevelOne}{', fontLevelOne)
tex = fillContent(tex, 'newcommand{\\fontLevelTwo}{', fontLevelTwo)
tex = fillContent(tex, 'newcommand{\\fontLevelThree}{', fontLevelThree)
tex = fillContent(tex, 'newcommand{\\fontLevelFour}{', fontLevelFour)
insIndex = tex.index('@studentsList')
insStr = ''
for student in students:
insStr += '\\newcommand{\\student' + student[0] + '}{' + student[1] + '}\n'
insStr += '\\newcommand{\\usn' + student[0] + '}{' + student[2] + '}\n'
tex = tex[:insIndex] + insStr + tex[insIndex + len('@studentsList'):]
tex = fillContent(tex, 'newcommand{\headerLogoScale}{', headerLogoScale)
tex = fillContent(tex, 'newcommand{\headerTitleSize}{', headerTitleSize)
tex = fillContent(tex, 'newcommand{\dept}{', dept)
tex = fillContent(tex, 'newcommand{\durationShort}{', durationShort)
tex = fillContent(tex, 'newcommand{\chapterFontFamily}{', chapterFontFamily)
tex = fillContent(tex, 'newcommand{\coverFontFamily}{', coverFontFamily)
tex = fillContent(tex, 'newcommand{\univName}{', univName)
tex = fillContent(tex, 'newcommand{\univLogoPath}{', univLogoPath)
tex = fillContent(tex, 'newcommand{\univLogoScale}{', univLogoScale)
tex = fillContent(tex, 'newcommand{\course}{', course)
tex = fillContent(tex, 'newcommand{\stream}{', stream)
tex = fillContent(tex, 'newcommand{\deptName}{', deptName)
tex = fillContent(tex, 'newcommand{\collName}{', collName)
tex = fillContent(tex, 'newcommand{\\affiliation}{', affiliation)
tex = fillContent(tex, 'newcommand{\\address}{', address)
tex = fillContent(tex, 'newcommand{\collCoverLogoScale}{', collCoverLogoScale)
tex = fillContent(tex, 'newcommand{\\vspaceInterblock}{', vspaceInterblock)
tex = fillContent(tex, 'newcommand{\\vspaceIntrablock}{', vspaceIntrablock)
tex = fillContent(tex, 'newcommand{\certificateLogoScale}{', certificateLogoScale)
tex = fillContent(tex, 'newcommand{\certificateCourse}{', certificateCourse)
tex = fillContent(tex, 'newcommand{\certificateStream}{', certificateStream)
tex = fillContent(tex, 'newcommand{\certificateUnivName}{', certificateUnivName)
tex = fillContent(tex, 'newcommand{\\abstractFontFamily}{', abstractFontFamily)
insIndex = tex.index('@acknowledgement')
insStr = etree.tostring(root.find('acknowledgement'))
insStr = convertToTex(insStr)
tex = tex[:insIndex] + insStr + tex[insIndex + len('@acknowledgement'):]
insIndex = tex.index('@abstract')
insStr = etree.tostring(root.find('abstract'))
insStr = convertToTex(insStr)
tex = tex[:insIndex] + insStr + tex[insIndex + len('@abstract'):]
insIndex = tex.index('@chapters')
insStr = ''
chapters = root.findall('chapter')
for chapter in chapters:
insStrTemp = etree.tostring(chapter)
insStrTemp = convertToTex('<content>' + insStrTemp + '</content>')
insStr += insStrTemp + '\n'
tex = tex[:insIndex] + insStr + tex[insIndex + len('@chapters'):]
f = open("sample.tex", "w")
f.write(tex)
f.close()
'''
modifying the style file
'''
#modifying the cover page
coverIndex = sty.index("@studentsListCover")
insStrCover = ''
for i in range(len(students)):
if i == 0:
insStrCover += '\\vspace{\\vspaceInterblock}\n\\textbf{\\student' + students[i][0] + ' - \usn' + students[i][0] + '}\n\n'
else:
insStrCover += '\\vspace{\\vspaceIntrablock}\n\\textbf{\\student' + students[i][0] + ' - \usn' + students[i][0] + '}\n\n'
sty = sty[:coverIndex] + insStrCover + sty[coverIndex + len('@studentsListCover'):]
#modifying the certificate
certIndex = sty.index("@studentsListCertificate")
insStrCertificate = ''
for i in range(len(students)):
if i == 0:
insStrCertificate += '\\vspace{\\vspaceInterblock}\n\\textbf{\student' + students[i][0] + ', \usn' + students[i][0] + '}\n\n'
else:
insStrCertificate += '\\vspace{\\vspaceIntrablock}\n\\textbf{\student' + students[i][0] + ', \usn' + students[i][0] + '}\n\n'
print insStrCertificate
sty = sty[:certIndex] + insStrCertificate + sty[certIndex + len('@studentsListCertificate'):]
f = open("sample.sty", "w")
f.write(sty)
f.close()
os.system("pdflatex sample.tex")
os.system("pdflatex sample.tex") #it must be compiled twice in order to get the table of contents updated properly
if __name__ == '__main__':
main()<|fim▁end|>
|
retTxt += 'Table \\ref{' + content.text +'}'
elif content.name == 'table':
|
<|file_name|>admin.js<|end_file_name|><|fim▁begin|>var config = require('../config'),
_ = require('underscore'),
path = require('path'),
when = require('when'),
api = require('../api'),
mailer = require('../mail'),
errors = require('../errorHandling'),
storage = require('../storage'),
updateCheck = require('../update-check'),
adminNavbar,
adminControllers,
loginSecurity = [];
// TODO: combine path/navClass to single "slug(?)" variable with no prefix
adminNavbar = {
content: {
name: 'Content',
navClass: 'content',
key: 'admin.navbar.content',
path: '/'
},
add: {
name: 'New Post',
navClass: 'editor',
key: 'admin.navbar.editor',
path: '/editor/'
},
settings: {
name: 'Settings',
navClass: 'settings',
key: 'admin.navbar.settings',
path: '/settings/'
}
};
// TODO: make this a util or helper
function setSelected(list, name) {
_.each(list, function (item, key) {
item.selected = key === name;
});
return list;
}
adminControllers = {
'uploader': function (req, res) {
var type = req.files.uploadimage.type,
ext = path.extname(req.files.uploadimage.name).toLowerCase(),
store = storage.get_storage();
if ((type !== 'image/jpeg' && type !== 'image/png' && type !== 'image/gif' && type !== 'image/svg+xml')
|| (ext !== '.jpg' && ext !== '.jpeg' && ext !== '.png' && ext !== '.gif' && ext !== '.svg' && ext !== '.svgz')) {
return res.send(415, 'Unsupported Media Type');
}
store
.save(req.files.uploadimage)
.then(function (url) {
return res.send(url);
})
.otherwise(function (e) {
errors.logError(e);
return res.send(500, e.message);
});
},
'login': function (req, res) {
/*jslint unparam:true*/
res.render('login', {
bodyClass: 'ghost-login',
hideNavbar: true,
adminNav: setSelected(adminNavbar, 'login')
});
},
'auth': function (req, res) {
var currentTime = process.hrtime()[0],
remoteAddress = req.connection.remoteAddress,
denied = '';
loginSecurity = _.filter(loginSecurity, function (ipTime) {
return (ipTime.time + 2 > currentTime);
});
denied = _.find(loginSecurity, function (ipTime) {
return (ipTime.ip === remoteAddress);
});
if (!denied) {
loginSecurity.push({ip: remoteAddress, time: currentTime});
api.users.check({email: req.body.email, pw: req.body.password}).then(function (user) {
req.session.regenerate(function (err) {
if (!err) {
req.session.user = user.id;
var redirect = config.paths().subdir + '/ghost/';
if (req.body.redirect) {
redirect += decodeURIComponent(req.body.redirect);
}
// If this IP address successfully logins we
// can remove it from the array of failed login attempts.
loginSecurity = _.reject(loginSecurity, function (ipTime) {
return ipTime.ip === remoteAddress;
});
res.json(200, {redirect: redirect});
}
});
}, function (error) {
res.json(401, {error: error.message});
});
} else {
res.json(401, {error: 'Slow down, there are way too many login attempts!'});
}
},
'changepw': function (req, res) {
return api.users.changePassword({
currentUser: req.session.user,
oldpw: req.body.password,
newpw: req.body.newpassword,
ne2pw: req.body.ne2password
}).then(function () {
res.json(200, {msg: 'Password changed successfully'});
}, function (error) {
res.send(401, {error: error.message});
});
},
'signup': function (req, res) {
/*jslint unparam:true*/
res.render('signup', {
bodyClass: 'ghost-signup',
hideNavbar: true,
adminNav: setSelected(adminNavbar, 'login')
});
},
'doRegister': function (req, res) {
var name = req.body.name,
email = req.body.email,
password = req.body.password;
api.users.add({
name: name,
email: email,
password: password
}).then(function (user) {
api.settings.edit('email', email).then(function () {
var message = {
to: email,
subject: 'Your New Ghost Blog',
html: '<p><strong>Hello!</strong></p>' +
'<p>Good news! You\'ve successfully created a brand new Ghost blog over on ' + config().url + '</p>' +
'<p>You can log in to your admin account with the following details:</p>' +
'<p> Email Address: ' + email + '<br>' +
'Password: The password you chose when you signed up</p>' +
'<p>Keep this email somewhere safe for future reference, and have fun!</p>' +
'<p>xoxo</p>' +
'<p>Team Ghost<br>' +
'<a href="https://ghost.org">https://ghost.org</a></p>'
};
mailer.send(message).otherwise(function (error) {
errors.logError(
error.message,
"Unable to send welcome email, your blog will continue to function.",
"Please see http://docs.ghost.org/mail/ for instructions on configuring email."
);
});
req.session.regenerate(function (err) {
if (!err) {
if (req.session.user === undefined) {
req.session.user = user.id;
}
res.json(200, {redirect: config.paths().subdir + '/ghost/'});
}
});
});
}).otherwise(function (error) {
res.json(401, {error: error.message});
});
},
'forgotten': function (req, res) {
/*jslint unparam:true*/
res.render('forgotten', {
bodyClass: 'ghost-forgotten',
hideNavbar: true,
adminNav: setSelected(adminNavbar, 'login')
});
},
'generateResetToken': function (req, res) {
var email = req.body.email;
api.users.generateResetToken(email).then(function (token) {
var siteLink = '<a href="' + config().url + '">' + config().url + '</a>',
resetUrl = config().url.replace(/\/$/, '') + '/ghost/reset/' + token + '/',
resetLink = '<a href="' + resetUrl + '">' + resetUrl + '</a>',
message = {
to: email,
subject: 'Reset Password',
html: '<p><strong>Hello!</strong></p>' +
'<p>A request has been made to reset the password on the site ' + siteLink + '.</p>' +
'<p>Please follow the link below to reset your password:<br><br>' + resetLink + '</p>' +
'<p>Ghost</p>'
};
return mailer.send(message);
}).then(function success() {
var notification = {
type: 'success',
message: 'Check your email for further instructions',
status: 'passive',
id: 'successresetpw'
};
return api.notifications.add(notification).then(function () {
res.json(200, {redirect: config.paths().subdir + '/ghost/signin/'});
});
}, function failure(error) {
// TODO: This is kind of sketchy, depends on magic string error.message from Bookshelf.
// TODO: It's debatable whether we want to just tell the user we sent the email in this case or not, we are giving away sensitive info here.
if (error && error.message === 'EmptyResponse') {
error.message = "Invalid email address";
}
res.json(401, {error: error.message});
});
},
'reset': function (req, res) {
// Validate the request token
var token = req.params.token;
api.users.validateToken(token).then(function () {
// Render the reset form
res.render('reset', {
bodyClass: 'ghost-reset',
hideNavbar: true,
adminNav: setSelected(adminNavbar, 'reset')
});
}).otherwise(function (err) {
// Redirect to forgotten if invalid token
var notification = {
type: 'error',
message: 'Invalid or expired token',
status: 'persistent',
id: 'errorinvalidtoken'
};
errors.logError(err, 'admin.js', "Please check the provided token for validity and expiration.");
return api.notifications.add(notification).then(function () {
res.redirect(config.paths().subdir + '/ghost/forgotten');
});
});
},
'resetPassword': function (req, res) {<|fim▁hole|> api.users.resetPassword(token, newPassword, ne2Password).then(function () {
var notification = {
type: 'success',
message: 'Password changed successfully.',
status: 'passive',
id: 'successresetpw'
};
return api.notifications.add(notification).then(function () {
res.json(200, {redirect: config.paths().subdir + '/ghost/signin/'});
});
}).otherwise(function (err) {
// TODO: Better error message if we can tell whether the passwords didn't match or something
res.json(401, {error: err.message});
});
},
'logout': function (req, res) {
req.session.destroy();
var notification = {
type: 'success',
message: 'You were successfully signed out',
status: 'passive',
id: 'successlogout'
};
return api.notifications.add(notification).then(function () {
res.redirect(config.paths().subdir + '/ghost/signin/');
});
},
'index': function (req, res) {
/*jslint unparam:true*/
function renderIndex() {
res.render('content', {
bodyClass: 'manage',
adminNav: setSelected(adminNavbar, 'content')
});
}
when.join(
updateCheck(res),
when(renderIndex())
// an error here should just get logged
).otherwise(errors.logError);
},
'editor': function (req, res) {
if (req.params.id !== undefined) {
res.render('editor', {
bodyClass: 'editor',
adminNav: setSelected(adminNavbar, 'content')
});
} else {
res.render('editor', {
bodyClass: 'editor',
adminNav: setSelected(adminNavbar, 'add')
});
}
},
'content': function (req, res) {
/*jslint unparam:true*/
res.render('content', {
bodyClass: 'manage',
adminNav: setSelected(adminNavbar, 'content')
});
},
'settings': function (req, res, next) {
// TODO: Centralise list/enumeration of settings panes, so we don't
// run into trouble in future.
var allowedSections = ['', 'general', 'user'],
section = req.url.replace(/(^\/ghost\/settings[\/]*|\/$)/ig, '');
if (allowedSections.indexOf(section) < 0) {
return next();
}
res.render('settings', {
bodyClass: 'settings',
adminNav: setSelected(adminNavbar, 'settings')
});
},
'debug': { /* ugly temporary stuff for managing the app before it's properly finished */
index: function (req, res) {
/*jslint unparam:true*/
res.render('debug', {
bodyClass: 'settings',
adminNav: setSelected(adminNavbar, 'settings')
});
}
}
};
module.exports = adminControllers;<|fim▁end|>
|
var token = req.params.token,
newPassword = req.param('newpassword'),
ne2Password = req.param('ne2password');
|
<|file_name|>util.ts<|end_file_name|><|fim▁begin|>/**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {syntaxError} from '@angular/compiler';
import * as path from 'path';
import ts from 'typescript';
import {CompilerOptions, DEFAULT_ERROR_CODE, Diagnostic, SOURCE} from './api';
export const GENERATED_FILES = /(.*?)\.(ngfactory|shim\.ngstyle|ngstyle|ngsummary)\.(js|d\.ts|ts)$/;
export const DTS = /\.d\.ts$/;
export const TS = /^(?!.*\.d\.ts$).*\.ts$/;
export const enum StructureIsReused {
Not = 0,
SafeModules = 1,
Completely = 2
}<|fim▁hole|> return (program as any).structureIsReused;
}
export function error(msg: string): never {
throw new Error(`Internal error: ${msg}`);
}
export function userError(msg: string): never {
throw syntaxError(msg);
}
export function createMessageDiagnostic(messageText: string): ts.Diagnostic&Diagnostic {
return {
file: undefined,
start: undefined,
length: undefined,
category: ts.DiagnosticCategory.Message,
messageText,
code: DEFAULT_ERROR_CODE,
source: SOURCE,
};
}
export function isInRootDir(fileName: string, options: CompilerOptions) {
return !options.rootDir || pathStartsWithPrefix(options.rootDir, fileName);
}
export function relativeToRootDirs(filePath: string, rootDirs: string[]): string {
if (!filePath) return filePath;
for (const dir of rootDirs || []) {
const rel = pathStartsWithPrefix(dir, filePath);
if (rel) {
return rel;
}
}
return filePath;
}
function pathStartsWithPrefix(prefix: string, fullPath: string): string|null {
const rel = path.relative(prefix, fullPath);
return rel.startsWith('..') ? null : rel;
}
/**
* Converts a ng.Diagnostic into a ts.Diagnostic.
* This looses some information, and also uses an incomplete object as `file`.
*
* I.e. only use this where the API allows only a ts.Diagnostic.
*/
export function ngToTsDiagnostic(ng: Diagnostic): ts.Diagnostic {
let file: ts.SourceFile|undefined;
let start: number|undefined;
let length: number|undefined;
if (ng.span) {
// Note: We can't use a real ts.SourceFile,
// but we can at least mirror the properties `fileName` and `text`, which
// are mostly used for error reporting.
file = {fileName: ng.span.start.file.url, text: ng.span.start.file.content} as ts.SourceFile;
start = ng.span.start.offset;
length = ng.span.end.offset - start;
}
return {
file,
messageText: ng.messageText,
category: ng.category,
code: ng.code,
start,
length,
};
}
/**
* Strip multiline comment start and end markers from the `commentText` string.
*
* This will also strip the JSDOC comment start marker (`/**`).
*/
export function stripComment(commentText: string): string {
return commentText.replace(/^\/\*\*?/, '').replace(/\*\/$/, '').trim();
}<|fim▁end|>
|
// Note: This is an internal property in TypeScript. Use it only for assertions and tests.
export function tsStructureIsReused(program: ts.Program): StructureIsReused {
|
<|file_name|>cleanup.py<|end_file_name|><|fim▁begin|>"""Runs the Treadmill container cleanup job.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import click
from treadmill import appenv
from treadmill import cleanup
from treadmill import cli<|fim▁hole|>
def init():
"""Top level command handler."""
@click.group(name='cleanup')
def cleanup_grp():
"""Cleanup click group."""
@cleanup_grp.command('watcher')
@click.option('--approot', type=click.Path(exists=True),
envvar='TREADMILL_APPROOT', required=True)
def cleanup_watcher(approot):
"""Start cleanup watcher."""
tm_env = appenv.AppEnvironment(root=approot)
cleaner = cleanup.Cleanup(tm_env)
cleaner.run()
@cleanup_grp.command('instance')
@click.option('--approot', type=click.Path(exists=True),
envvar='TREADMILL_APPROOT', required=True)
@click.option('--runtime', envvar='TREADMILL_RUNTIME', required=True)
@click.option('--runtime-param', type=cli.LIST, required=False)
@click.argument('instance', nargs=1)
def cleanup_instance(approot, runtime, instance, runtime_param):
"""Actually do the cleanup of the instance.
"""
param = utils.equals_list2dict(runtime_param or [])
tm_env = appenv.AppEnvironment(root=approot)
cleaner = cleanup.Cleanup(tm_env)
cleaner.invoke(runtime, instance, param)
del cleanup_watcher
del cleanup_instance
return cleanup_grp<|fim▁end|>
|
from treadmill import utils
|
<|file_name|>HeadData.tsx<|end_file_name|><|fim▁begin|>/* Copyright (C) 2019, 2020 Monomax Software Pty Ltd<|fim▁hole|> * it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Dnote is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Dnote. If not, see <https://www.gnu.org/licenses/>.
*/
import React from 'react';
import Helmet from 'react-helmet';
interface Props {}
const HeaderData: React.FunctionComponent<Props> = () => {
return (
<Helmet>
<title>Books</title>
</Helmet>
);
};
export default HeaderData;<|fim▁end|>
|
*
* This file is part of Dnote.
*
* Dnote is free software: you can redistribute it and/or modify
|
<|file_name|>RoundingMethod.java<|end_file_name|><|fim▁begin|>package xde.lincore.mcscript.math;
public enum RoundingMethod {
Round, Floor, Ceil, CastInt;
public int round(final double value) {
switch (this) {
case Round:
return (int) Math.round(value);
case Floor:
return (int) Math.floor(value);
case Ceil:
return (int) Math.ceil(value);
case CastInt:
return (int) value;
default:
throw new UnsupportedOperationException();
}
}
public long roundToLong(final double value) {
switch (this) {
case Round:
return Math.round(value);
case Floor:
return (long) Math.floor(value);
case Ceil:
return (long) Math.ceil(value);
case CastInt:
return (long) value;
default:
throw new UnsupportedOperationException();<|fim▁hole|>}<|fim▁end|>
|
}
}
|
<|file_name|>override_autoescaped.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
templatetricks.override_autoescaped
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Override which templates are autoescaped
http://flask.pocoo.org/snippets/41/
"""
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from flask import Flask
class JHtmlEscapingFlask(Flask):
def select_jinja_autoescape(self, filename):
if filename.endswith('.jhtml'):
return True
return Flask.select_jinja_autoescape(self, filename)<|fim▁hole|><|fim▁end|>
|
app = JHtmlEscapingFlask(__name__)
|
<|file_name|>test_analysis_signal.py<|end_file_name|><|fim▁begin|>'''Test the analysis.signal module.'''
from __future__ import absolute_import, print_function, division
import pytest
import numpy as np
import gridcells.analysis.signal as asignal
from gridcells.analysis.signal import (local_extrema, local_maxima,
local_minima, ExtremumTypes,
LocalExtrema)
RTOL = 1e-10
def _data_generator(n_items, sz):
'''Generate pairs of test vectors.'''
it = 0
while it < n_items:
N1 = np.random.randint(sz) + 1
N2 = np.random.randint(sz) + 1
if N1 == 0 and N2 == 0:
continue
a1 = np.random.rand(N1)
a2 = np.random.rand(N2)
yield (a1, a2)
it += 1
class TestCorrelation(object):
'''
Test the analysis.signal.corr function (and effectively the core of the
autoCorrelation) function.
'''
maxN = 500
maxLoops = 1000
def test_onesided(self):
'''Test the one-sided version of ``corr``.'''
for a1, a2 in _data_generator(self.maxLoops, self.maxN):
c_cpp = asignal.corr(a1, a2, mode='onesided')
c_np = np.correlate(a1, a2, mode='full')[::-1][a1.size - 1:]
np.testing.assert_allclose(c_cpp, c_np, rtol=RTOL)
def test_twosided(self):
'''Test the two-sided version of ``corr``.'''
for a1, a2 in _data_generator(self.maxLoops, self.maxN):
c_cpp = asignal.corr(a1, a2, mode='twosided')
c_np = np.correlate(a1, a2, mode='full')[::-1]
np.testing.assert_allclose(c_cpp, c_np, rtol=RTOL)
def test_range(self):
'''Test the ranged version of ``corr``.'''
# Half the range of both signals
for a1, a2 in _data_generator(self.maxLoops, self.maxN):
if a1.size <= 1 or a2.size <= 1:
continue
lag_start = - (a1.size // 2)
lag_end = a2.size // 2
c_np_centre = a1.size - 1
c_cpp = asignal.corr(a1, a2, mode='range', lag_start=lag_start,
lag_end=lag_end)
c_np = np.correlate(a1, a2, mode='full')[::-1]
np.testing.assert_allclose(
c_cpp,
c_np[c_np_centre + lag_start:c_np_centre + lag_end + 1],
rtol=RTOL)
def test_zero_len(self):
'''Test that an exception is raised when inputs have zero length.'''
a1 = np.array([])
a2 = np.arange(10)
# corr(a1, a2)
lag_start = 0
lag_end = 0
for mode in ("onesided", "twosided", "range"):
with pytest.raises(TypeError):
asignal.corr(a1, a2, mode, lag_start, lag_end)
with pytest.raises(TypeError):
asignal.corr(a2, a1, mode, lag_start, lag_end)
with pytest.raises(TypeError):
asignal.corr(a1, a1, mode, lag_start, lag_end)
def test_non_double(self):
'''Test the corr function when dtype is not double.'''
a1 = np.array([1, 2, 3], dtype=int)
asignal.corr(a1, a1, mode='twosided')
<|fim▁hole|> maxN = 500
maxLoops = 1000
def test_default_params(self):
'''Test default parameters.'''
a = np.arange(10)
c_cpp = asignal.acorr(a)
c_np = np.correlate(a, a, mode='full')[::-1][a.size - 1:]
np.testing.assert_allclose(c_cpp, c_np, rtol=RTOL)
def test_onesided(self):
'''Test the one-sided version of ``corr``.'''
a = np.arange(10)
c_cpp = asignal.acorr(a, mode='onesided', max_lag=5)
c_np = np.correlate(a, a, mode='full')[::-1][a.size - 1:a.size - 1 + 6]
np.testing.assert_allclose(c_cpp, c_np, rtol=RTOL)
def test_twosided(self):
'''Test the two-sided version of ``corr``.'''
a = np.arange(10)
c_cpp = asignal.acorr(a, mode='twosided', max_lag=5)
c_np = np.correlate(a, a, mode='full')[::-1][a.size - 6:a.size + 5]
np.testing.assert_allclose(c_cpp, c_np, rtol=RTOL)
def test_norm(self):
'''Test normalization.'''
# Simple array
a = np.arange(10)
c_cpp = asignal.acorr(a, mode='twosided', norm=True)
c_np = np.correlate(a, a, mode='full')[::-1]
np.testing.assert_allclose(c_cpp, c_np / np.max(c_np), rtol=RTOL)
# A zero array will return zero
zero_array = np.zeros(13)
c_cpp = asignal.acorr(zero_array, mode='twosided', norm=True)
assert np.all(c_cpp == 0.)
def generate_sin(n_half_cycles, resolution=100):
'''Generate a sine function with a number of (full) half cycles.
Note that the positions of the extrema might be shifted +/- 1 with respect
to the actual real sin because of possible rounding errors.
Parameters
----------
n_half_cycles : int
Number of half cycles to generate. Does not have to be even.
resolution : int
Number of data points for each half cycle.
'''
if n_half_cycles < 1:
raise ValueError()
if resolution < 1:
raise ValueError()
f = 1. / (2 * resolution)
t = np.arange(n_half_cycles * resolution, dtype=float)
sig = np.sin(2 * np.pi * f * t)
extrema_positions = np.array(np.arange(n_half_cycles) * resolution +
resolution / 2,
dtype=int)
extrema_types = []
current_type = ExtremumTypes.MAX
for _ in range(n_half_cycles):
extrema_types.append(current_type)
if current_type is ExtremumTypes.MAX:
current_type = ExtremumTypes.MIN
else:
current_type = ExtremumTypes.MAX
return (sig, extrema_positions, np.array(extrema_types))
class TestLocalExtrema(object):
'''Test computation of local extrema.'''
def test_local_extrema(self):
for n_extrema in [1, 2, 51]:
sig, extrema_idx, extrema_types = generate_sin(n_extrema)
extrema = local_extrema(sig)
assert len(extrema) == n_extrema
assert np.all(extrema_idx[extrema_types == ExtremumTypes.MIN] ==
extrema.get_type(ExtremumTypes.MIN))
assert np.all(extrema_idx[extrema_types == ExtremumTypes.MAX] ==
extrema.get_type(ExtremumTypes.MAX))
def test_zero_array(self):
for func in [local_extrema, local_maxima, local_minima]:
extrema = func(np.empty(0))
assert len(extrema) == 0
def test_single_item(self):
'''This should return a zero length array.'''
for func in [local_extrema, local_maxima, local_minima]:
extrema = func(np.array([1.]))
assert len(extrema) == 0
def test_maxima(self):
# One maximum only
for n_extrema in [1, 2]:
sig, extrema_idx, extrema_types = generate_sin(n_extrema)
maxima = local_maxima(sig)
assert len(maxima) == 1
assert np.all(extrema_idx[extrema_types == ExtremumTypes.MAX] ==
maxima)
# 2 maxima
for n_extrema in [3, 4]:
sig, extrema_idx, extrema_types = generate_sin(n_extrema)
maxima = local_maxima(sig)
assert len(maxima) == 2
assert np.all(extrema_idx[extrema_types == ExtremumTypes.MAX] ==
maxima)
def test_minima(self):
# Only one maximum so should return empty
n_extrema = 1
sig, extrema_idx, extrema_types = generate_sin(n_extrema)
minima = local_minima(sig)
assert len(minima) == 0
assert np.all(extrema_idx[extrema_types == ExtremumTypes.MIN] ==
minima)
# One maximum and minimum
n_extrema = 2
sig, extrema_idx, extrema_types = generate_sin(n_extrema)
minima = local_minima(sig)
assert len(minima) == 1
assert np.all(extrema_idx[extrema_types == ExtremumTypes.MIN] ==
minima)
# 2 minima
for n_extrema in [4, 5]:
sig, extrema_idx, extrema_types = generate_sin(n_extrema)
minima = local_minima(sig)
assert len(minima) == 2
assert np.all(extrema_idx[extrema_types == ExtremumTypes.MIN] ==
minima)
class TestLocalExtremaClass(object):
'''Test the local extremum object.'''
def test_empty(self):
extrema = LocalExtrema([], [])
assert len(extrema) == 0
assert len(extrema.get_type(ExtremumTypes.MIN)) == 0 # FIXME
def test_inconsistent_inputs(self):
with pytest.raises(IndexError):
extrema = LocalExtrema([], [1])
with pytest.raises(IndexError):
extrema = LocalExtrema(np.arange(10), [1])
def test_single_type(self):
N = 10
test_vector = np.arange(N)
for tested_type in ExtremumTypes:
extrema = LocalExtrema(test_vector, [tested_type] * N)
assert len(extrema) == N
for current_type in ExtremumTypes:
retrieved = extrema.get_type(current_type)
if current_type is tested_type:
assert len(retrieved) == N
assert np.all(retrieved == test_vector)
else:
assert len(retrieved) == 0
def test_mixed_types(self):
N = 10
test_vector = np.arange(10)
test_types = np.ones(N) * ExtremumTypes.MIN
test_types[0:10:2] = ExtremumTypes.MAX
extrema = LocalExtrema(test_vector, test_types)
assert len(extrema) == N
retrieved_min = extrema.get_type(ExtremumTypes.MIN)
assert np.all(retrieved_min == test_vector[1:10:2])
retrieved_max = extrema.get_type(ExtremumTypes.MAX)
assert np.all(retrieved_max == test_vector[0:10:2])
# Should not find any other types
for current_type in ExtremumTypes:
if (current_type is not ExtremumTypes.MIN and current_type is not
ExtremumTypes.MAX):
assert len(extrema.get_type(current_type)) == 0<|fim▁end|>
|
class TestAutoCorrelation(object):
'''Test the acorr function.'''
|
<|file_name|>types_swagger_doc_generated.go<|end_file_name|><|fim▁begin|>/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
// This file contains a collection of methods that can be used from go-restful to
// generate Swagger API documentation for its models. Please read this PR for more
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
//
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
// they are on one line! For multiple line or blocks that you want to ignore use ---.
// Any context after a --- is ignored.
//
// Those methods can be generated by using hack/update-generated-swagger-docs.sh
// AUTO-GENERATED FUNCTIONS START HERE
var map_CertificateSigningRequest = map[string]string{
"": "Describes a certificate signing request",
"spec": "The certificate request itself and any additional information.",
"status": "Derived information about the request.",
}
func (CertificateSigningRequest) SwaggerDoc() map[string]string {
return map_CertificateSigningRequest
}
var map_CertificateSigningRequestCondition = map[string]string{
"type": "request approval state, currently Approved or Denied.",
"reason": "brief reason for the request state",
"message": "human readable message with details about the request state",
"lastUpdateTime": "timestamp for the last update to this condition",
}
func (CertificateSigningRequestCondition) SwaggerDoc() map[string]string {<|fim▁hole|> "": "This information is immutable after the request is created. Only the Request and ExtraInfo fields can be set on creation, other fields are derived by Kubernetes and cannot be modified by users.",
"request": "Base64-encoded PKCS#10 CSR data",
"usages": "allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\n https://tools.ietf.org/html/rfc5280#section-4.2.1.12",
"username": "Information about the requesting user (if relevant) See user.Info interface for details",
}
func (CertificateSigningRequestSpec) SwaggerDoc() map[string]string {
return map_CertificateSigningRequestSpec
}
var map_CertificateSigningRequestStatus = map[string]string{
"conditions": "Conditions applied to the request, such as approval or denial.",
"certificate": "If request was approved, the controller will place the issued certificate here.",
}
func (CertificateSigningRequestStatus) SwaggerDoc() map[string]string {
return map_CertificateSigningRequestStatus
}
// AUTO-GENERATED FUNCTIONS END HERE<|fim▁end|>
|
return map_CertificateSigningRequestCondition
}
var map_CertificateSigningRequestSpec = map[string]string{
|
<|file_name|>Streamer1.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the<|fim▁hole|># permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test for BitBucket PR 126:
SConf doesn't work well with 'io' module on pre-3.0 Python. This is because
io.StringIO (used by SCons.SConf.Streamer) accepts only unicode strings.
Non-unicode input causes it to raise an exception.
"""
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """
# SConstruct
#
# The CheckHello should return 'yes' if everything works fine. Otherwise it
# returns 'failed'.
#
def hello(target, source, env):
import traceback
try:
print 'hello!\\n' # this breaks the script
with open(env.subst('$TARGET', target = target),'w') as f:
f.write('yes')
except:
# write to file, as stdout/stderr is broken
traceback.print_exc(file=open('traceback','w'))
return 0
def CheckHello(context):
import sys
context.Display('Checking whether hello works... ')
stat,out = context.TryAction(hello,'','.in')
if stat and out:
context.Result(out)
else:
context.Result('failed')
return out
env = Environment()
cfg = Configure(env)
cfg.AddTest('CheckHello', CheckHello)
cfg.CheckHello()
env = cfg.Finish()
""")
test.run(arguments = '.')
test.must_contain_all_lines(test.stdout(), ['Checking whether hello works... yes'])
test.must_not_exist('traceback')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:<|fim▁end|>
|
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
|
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
"""
Copyright 2016 ARC Centre of Excellence for Climate Systems Science
author: Scott Wales <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
<|fim▁hole|>WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from subprocess import Popen, PIPE
from textwrap import dedent
from os import environ, path
from distutils.util import strtobool
import ldap
import getpass
from . import auth, gpg
def colour(text, colour):
if colour == 'red':
code = '\033[31;1m'
elif colour == 'green':
code = '\033[32m'
elif colour == 'blue':
code = '\033[93m'
else:
raise Exception
reset = '\033[m'
return code + text + reset
def info(text):
print("%s: %s"%(colour('INFO','blue'),text))
def warning(text):
print("%s: %s"%(colour('WARN','red'),text))
def todo(text):
print("%s: %s"%(colour('TODO','green'),text))
class SetupError(Exception):
"""
Indicates user needs to take action before setup can complete
"""
pass
def userinfo():
"""
Get current user's common name and email from LDAP
Returns: Tuple of (name, email)
"""
l = ldap.initialize(ldap.get_option(ldap.OPT_URI))
people = 'ou=People,dc=apac,dc=edu,dc=au'
info = l.search_s(people, ldap.SCOPE_SUBTREE, '(uid=%s)'%getpass.getuser())
return (info[0][1]['cn'][0],info[0][1]['mail'][0])
def prompt_bool(prompt):
"""
Ask a yes/no question
Returns: true/false answer
"""
raw_value = raw_input(prompt + ' [yes/no] ')
try:
return strtobool(raw_value)
except ValueError:
return ask_bool(prompt)
def prompt_or_default(prompt, default):
"""
Ask a question with a default answer
Returns: answer or default
"""
response = raw_input('%s [%s]: '%(prompt,default)).strip()
if response == '':
response = default
return response
def gpg_startup():
agent = dedent("""
[ -f ~/.gpg-agent-info ] && source ~/.gpg-agent-info
if [ -S "${GPG_AGENT_INFO%%:*}" ]; then
export GPG_AGENT_INFO
else
eval $( gpg-agent --daemon --allow-preset-passphrase --batch --max-cache-ttl 43200 --write-env-file ~/.gpg-agent-info )
fi
""")
home = environ['HOME']
for f in ['.profile','.bash_profile']:
p = path.join(home,f)
if path.exists(p):
# Check if gpg-agent is already referenced
grep = Popen(['grep','gpg-agent',p],stdout=PIPE)
grep.communicate()
if grep.returncode == 0:
warning('GPG Agent is referenced in ~/%s but is not currently running. '%f+
'Try relogging to start it again, if that doesn\'t work please contact the helpdesk')
continue
# Add script to file
with open(p,'a') as profile:
profile.write(agent)
todo('GPG Agent has been added to your startup scripts. '+
'Please log out of Accessdev then back in again to make sure it has been activated\n')
def check_gpg_agent():
"""
Make sure GPG-Agent is running
If the environment variable is not found add activation script to the
users's .profile
"""
try:
gpg.send('GETINFO version')
info('GPG Agent is running')
except Exception:
gpg_startup()
raise SetupError
def register_mosrs_account():
name, email = userinfo()
name = prompt_or_default('What is your name?',name)
email = prompt_or_default('What is your work email address?',email)
request = Popen(['mail', '-s','MOSRS account request for %s'%name, '[email protected]'], stdin=PIPE)
request.communicate(dedent("""
ACCESS user %s (NCI id %s, email <%s>) would like to request an account on MOSRS.
Can the sponsor for their institution please submit a request on their behalf at
https://code.metoffice.gov.uk/trac/admin/newticket?type=account-request
You can check if they have an existing account at
https://code.metoffice.gov.uk/trac/home/wiki/UserList
"""%(name, environ['USER'], email)))
print('\n')
info('Submitting MOSRS account request for %s <%s> to access_help'%(name,email))
info('Once your account has been activated (will take at least one UK business day) '+
'you will receive an email detailing how to set up your password\n')
def setup_mosrs_account():
"""
Setup Mosrs
"""
check_gpg_agent()
mosrs_request = None
while mosrs_request not in ['yes', 'no', 'y', 'n']:
mosrs_request = prompt_or_default("Do you have a MOSRS account", "yes")
mosrs_request = mosrs_request.lower()
if mosrs_request.startswith('y'):
auth.check_or_update()
else:
print(dedent(
"""
If you need to access new versions of the UM please send a
request to '[email protected]' saying that you'd like a MOSRS account
Once you have an account run this script again
"""
))
print('\n')
def check_raijin_ssh():
"""
Raijin has been decommissioned. There should no longer be any calls to this
procedure. In case there is, I'm leaving this stub in.
"""
raise ValueError("raijin should no longer be used. Please contact CMS")
def check_gadi_ssh():
"""
Test Rose/Cylc can be found on Gadi
"""
print('Testing Rose can be accessed on Gadi...')
# ssh -oBatchMode=yes /projects/access/bin/cylc --version
ssh = Popen(['ssh','-oBatchMode=yes','gadi','/projects/access/bin/cylc --version'])
result = ssh.wait()
if result == 0:
print('Successfully found Rose\n')
else:
warning('Unable to connect to Gadi')
warning('Follow the instructions at https://accessdev.nci.org.au/trac/wiki/Guides/SSH to set up a SSH agent\n')
raise SetupError
def accesssvn_setup():
"""
Setup GPG for access-svn access
"""
try:
check_gpg_agent()
print('\n')
print('To store your password for 12 hours run:')
print(' access-auth\n')
except SetupError:
todo('Once this has been done please run this setup script again\n')
def main():
print('\n')
print('Welcome to Accessdev, the user interface and control server for the ACCESS model at NCI')
print('This script will set up your account to use Rose and the UM\n')
try:
setup_mosrs_account()
check_gadi_ssh()
# Account successfully created
print('You are now able to use Rose and the UM. To see a list of available experiments run:')
print(' rosie go\n')
print('Your password will be cached for a maximum of 12 hours. To store your password again run:')
print(' mosrs-auth\n')
except SetupError:
todo('Once this has been done please run this setup script again\n')
finally:
print('You can ask for help with the ACCESS systems by emailing "[email protected]"\n')
if __name__ == '__main__':
main()<|fim▁end|>
|
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
|
<|file_name|>a361.js<|end_file_name|><|fim▁begin|>//~ name a361
<|fim▁hole|>alert(a361);
//~ component a362.js<|fim▁end|>
| |
<|file_name|>signus.rs<|end_file_name|><|fim▁begin|>extern crate libc;
use std::sync::mpsc::channel;
use std::ffi::CString;
use indy::api::signus::{
indy_sign,
indy_create_and_store_my_did,
indy_store_their_did,
indy_replace_keys,
indy_verify_signature,
indy_encrypt,
indy_decrypt
};
use indy::api::ErrorCode;
use utils::callback::CallbackUtils;<|fim▁hole|>impl SignusUtils {
pub fn sign(wallet_handle: i32, their_did: &str, msg: &[u8]) -> Result<Vec<u8>, ErrorCode> {
let (sender, receiver) = channel();
let cb = Box::new(move |err, signature| {
sender.send((err, signature)).unwrap();
});
let (command_handle, cb) = CallbackUtils::closure_to_sign_cb(cb);
let their_did = CString::new(their_did).unwrap();
let err =
indy_sign(command_handle,
wallet_handle,
their_did.as_ptr(),
msg.as_ptr() as *const u8,
msg.len() as u32,
cb);
if err != ErrorCode::Success {
return Err(err);
}
let (err, signature) = receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap();
if err != ErrorCode::Success {
return Err(err);
}
Ok(signature)
}
pub fn create_and_store_my_did(wallet_handle: i32, seed: Option<&str>) -> Result<(String, String, String), ErrorCode> {
let (create_and_store_my_did_sender, create_and_store_my_did_receiver) = channel();
let create_and_store_my_did_cb = Box::new(move |err, did, verkey, public_key| {
create_and_store_my_did_sender.send((err, did, verkey, public_key)).unwrap();
});
let (create_and_store_my_did_command_handle, create_and_store_my_did_callback) = CallbackUtils::closure_to_create_and_store_my_did_cb(create_and_store_my_did_cb);
let my_did_json = seed.map_or("{}".to_string(), |seed| format!("{{\"seed\":\"{}\" }}", seed));
let my_did_json = CString::new(my_did_json).unwrap();
let err =
indy_create_and_store_my_did(create_and_store_my_did_command_handle,
wallet_handle,
my_did_json.as_ptr(),
create_and_store_my_did_callback);
if err != ErrorCode::Success {
return Err(err);
}
let (err, my_did, my_verkey, my_pk) = create_and_store_my_did_receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap();
if err != ErrorCode::Success {
return Err(err);
}
Ok((my_did, my_verkey, my_pk))
}
pub fn create_my_did(wallet_handle: i32, my_did_json: &str) -> Result<(String, String, String), ErrorCode> {
let (sender, receiver) = channel();
let cb = Box::new(move |err, did, verkey, public_key| {
sender.send((err, did, verkey, public_key)).unwrap();
});
let (command_handle, cb) = CallbackUtils::closure_to_create_and_store_my_did_cb(cb);
let my_did_json = CString::new(my_did_json).unwrap();
let err =
indy_create_and_store_my_did(command_handle,
wallet_handle,
my_did_json.as_ptr(),
cb);
if err != ErrorCode::Success {
return Err(err);
}
let (err, my_did, my_verkey, my_pk) = receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap();
if err != ErrorCode::Success {
return Err(err);
}
Ok((my_did, my_verkey, my_pk))
}
pub fn store_their_did(wallet_handle: i32, identity_json: &str) -> Result<(), ErrorCode> {
let (sender, receiver) = channel();
let cb = Box::new(move |err| {
sender.send((err)).unwrap();
});
let (command_handle, cb) = CallbackUtils::closure_to_store_their_did_cb(cb);
let identity_json = CString::new(identity_json).unwrap();
let err =
indy_store_their_did(command_handle,
wallet_handle,
identity_json.as_ptr(),
cb);
if err != ErrorCode::Success {
return Err(err);
}
let err = receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap();
if err != ErrorCode::Success {
return Err(err);
}
Ok(())
}
pub fn store_their_did_from_parts(wallet_handle: i32, their_did: &str, their_pk: &str, their_verkey: &str, endpoint: &str) -> Result<(), ErrorCode> {
let (store_their_did_sender, store_their_did_receiver) = channel();
let store_their_did_cb = Box::new(move |err| { store_their_did_sender.send((err)).unwrap(); });
let (store_their_did_command_handle, store_their_did_callback) = CallbackUtils::closure_to_store_their_did_cb(store_their_did_cb);
let their_identity_json = format!("{{\"did\":\"{}\",\
\"pk\":\"{}\",\
\"verkey\":\"{}\",\
\"endpoint\":\"{}\"\
}}",
their_did, their_pk, their_verkey, endpoint);
let their_identity_json = CString::new(their_identity_json).unwrap();
let err =
indy_store_their_did(store_their_did_command_handle,
wallet_handle,
their_identity_json.as_ptr(),
store_their_did_callback);
if err != ErrorCode::Success {
return Err(err);
}
let err = store_their_did_receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap();
if err != ErrorCode::Success {
return Err(err);
}
Ok(())
}
pub fn replace_keys(wallet_handle: i32, did: &str, identity_json: &str) -> Result<(String, String), ErrorCode> {
let (sender, receiver) = channel();
let cb = Box::new(move |err, verkey, public_key| {
sender.send((err, verkey, public_key)).unwrap();
});
let (command_handle, cb) = CallbackUtils::closure_to_replace_keys_cb(cb);
let did = CString::new(did).unwrap();
let identity_json = CString::new(identity_json).unwrap();
let err =
indy_replace_keys(command_handle,
wallet_handle,
did.as_ptr(),
identity_json.as_ptr(),
cb);
if err != ErrorCode::Success {
return Err(err);
}
let (err, my_verkey, my_pk) = receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap();
if err != ErrorCode::Success {
return Err(err);
}
Ok((my_verkey, my_pk))
}
pub fn verify(wallet_handle: i32, pool_handle: i32, did: &str, msg: &[u8], signature: &[u8]) -> Result<bool, ErrorCode> {
let (sender, receiver) = channel();
let cb = Box::new(move |err, valid| {
sender.send((err, valid)).unwrap();
});
let (command_handle, cb) = CallbackUtils::closure_to_verify_signature_cb(cb);
let did = CString::new(did).unwrap();
let err =
indy_verify_signature(command_handle,
wallet_handle,
pool_handle,
did.as_ptr(),
msg.as_ptr() as *const u8,
msg.len() as u32,
signature.as_ptr() as *const u8,
signature.len() as u32,
cb);
if err != ErrorCode::Success {
return Err(err);
}
let (err, valid) = receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap();
if err != ErrorCode::Success {
return Err(err);
}
Ok(valid)
}
pub fn encrypt(wallet_handle: i32, pool_handle: i32, my_did: &str, did: &str, msg: &[u8]) -> Result<(Vec<u8>, Vec<u8>), ErrorCode> {
let (sender, receiver) = channel();
let cb = Box::new(move |err, encrypted_msg, nonce| {
sender.send((err, encrypted_msg, nonce)).unwrap();
});
let (command_handle, cb) = CallbackUtils::closure_to_encrypt_cb(cb);
let my_did = CString::new(my_did).unwrap();
let did = CString::new(did).unwrap();
let err =
indy_encrypt(command_handle,
wallet_handle,
pool_handle,
my_did.as_ptr(),
did.as_ptr(),
msg.as_ptr() as *const u8,
msg.len() as u32,
cb);
if err != ErrorCode::Success {
return Err(err);
}
let (err, encrypted_msg, nonce) = receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap();
if err != ErrorCode::Success {
return Err(err);
}
Ok((encrypted_msg, nonce))
}
pub fn decrypt(wallet_handle: i32, my_did: &str, did: &str, encrypted_msg: &[u8], nonce: &[u8]) -> Result<Vec<u8>, ErrorCode> {
let (sender, receiver) = channel();
let cb = Box::new(move |err, decrypted_msg| {
sender.send((err, decrypted_msg)).unwrap();
});
let (command_handle, cb) = CallbackUtils::closure_to_decrypt_cb(cb);
let my_did = CString::new(my_did).unwrap();
let did = CString::new(did).unwrap();
let err =
indy_decrypt(command_handle,
wallet_handle,
my_did.as_ptr(),
did.as_ptr(),
encrypted_msg.as_ptr() as *const u8,
encrypted_msg.len() as u32,
nonce.as_ptr() as *const u8,
nonce.len() as u32,
cb);
if err != ErrorCode::Success {
return Err(err);
}
let (err, decrypted_msg) = receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap();
if err != ErrorCode::Success {
return Err(err);
}
Ok(decrypted_msg)
}
}<|fim▁end|>
|
use utils::timeout::TimeoutUtils;
pub struct SignusUtils {}
|
<|file_name|>manageiq_alerts.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Red Hat Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: manageiq_alerts
short_description: Configuration of alerts in ManageIQ
extends_documentation_fragment: manageiq
version_added: '2.5'
author: Elad Alfassa (@elad661) <[email protected]
description:
- The manageiq_alerts module supports adding, updating and deleting alerts in ManageIQ.
options:
state:
description:
- absent - alert should not exist,
- present - alert should exist,
required: False
choices: ['absent', 'present']
default: 'present'
description:
description:
- The unique alert description in ManageIQ.
- Required when state is "absent" or "present".
resource_type:
description:
- The entity type for the alert in ManageIQ. Required when state is "present".
choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster',
'ExtManagementSystem', 'MiddlewareServer']
expression_type:
description:
- Expression type.
default: hash
choices: ["hash", "miq"]
expression:
description:
- The alert expression for ManageIQ.
- Can either be in the "Miq Expression" format or the "Hash Expression format".
- Required if state is "present".
enabled:
description:
- Enable or disable the alert. Required if state is "present".
type: bool
options:
description:
- Additional alert options, such as notification type and frequency
'''
EXAMPLES = '''
- name: Add an alert with a "hash expression" to ManageIQ
manageiq_alerts:
state: present
description: Test Alert 01
options:
notifications:
email:
to: ["[email protected]"]
from: "[email protected]"
resource_type: ContainerNode
expression:
eval_method: hostd_log_threshold
mode: internal
options: {}
enabled: true
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
validate_certs: False
- name: Add an alert with a "miq expression" to ManageIQ
manageiq_alerts:
state: present
description: Test Alert 02
options:
notifications:
email:
to: ["[email protected]"]
from: "[email protected]"
resource_type: Vm
expression_type: miq
expression:
and:
- CONTAINS:
tag: Vm.managed-environment
value: prod
- not:
CONTAINS:
tag: Vm.host.managed-environment
value: prod
enabled: true
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
validate_certs: False
- name: Delete an alert from ManageIQ
manageiq_alerts:
state: absent
description: Test Alert 01
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
validate_certs: False
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.manageiq import ManageIQ, manageiq_argument_spec
class ManageIQAlert(object):
""" Represent a ManageIQ alert. Can be initialized with both the format
we recieve from the server and the format we get from the user.
"""
def __init__(self, alert):
self.description = alert['description']
self.db = alert['db']
self.enabled = alert['enabled']
self.options = alert['options']
self.hash_expression = None
self.miq_expressipn = None
if 'hash_expression' in alert:
self.hash_expression = alert['hash_expression']
if 'miq_expression' in alert:
self.miq_expression = alert['miq_expression']
if 'exp' in self.miq_expression:
# miq_expression is a field that needs a special case, because
# it's returned surrounded by a dict named exp even though we don't
# send it with that dict.
self.miq_expression = self.miq_expression['exp']
def __eq__(self, other):
""" Compare two ManageIQAlert objects
"""
return self.__dict__ == other.__dict__
class ManageIQAlerts(object):
""" Object to execute alert management operations in manageiq.
"""
def __init__(self, manageiq):
self.manageiq = manageiq
self.module = self.manageiq.module
self.api_url = self.manageiq.api_url
self.client = self.manageiq.client
self.alerts_url = '{api_url}/alert_definitions'.format(api_url=self.api_url)
def get_alerts(self):
""" Get all alerts from ManageIQ
"""
try:
response = self.client.get(self.alerts_url + '?expand=resources')
except Exception as e:
self.module.fail_json(msg="Failed to query alerts: {error}".format(error=e))
return response.get('resources', [])
def validate_hash_expression(self, expression):
""" Validate a 'hash expression' alert definition
"""
# hash expressions must have the following fields
for key in ['options', 'eval_method', 'mode']:
if key not in expression:
msg = "Hash expression is missing required field {key}".format(key=key)
self.module.fail_json(msg)
def create_alert_dict(self, params):
""" Create a dict representing an alert
"""
if params['expression_type'] == 'hash':
# hash expression supports depends on https://github.com/ManageIQ/manageiq-api/pull/76
self.validate_hash_expression(params['expression'])
expression_type = 'hash_expression'
else:
# actually miq_expression, but we call it "expression" for backwards-compatibility
expression_type = 'expression'
# build the alret
alert = dict(description=params['description'],
db=params['resource_type'],
options=params['options'],
enabled=params['enabled'])
# add the actual expression.
alert.update({expression_type: params['expression']})
return alert
def add_alert(self, alert):
""" Add a new alert to ManageIQ
"""
try:
result = self.client.post(self.alerts_url, action='create', resource=alert)
msg = "Alert {description} created successfully: {details}"
msg = msg.format(description=alert['description'], details=result)
return dict(changed=True, msg=msg)
except Exception as e:
msg = "Creating alert {description} failed: {error}"
if "Resource expression needs be specified" in str(e):
# Running on an older version of ManageIQ and trying to create a hash expression
msg = msg.format(description=alert['description'],
error="Your version of ManageIQ does not support hash_expression")
else:
msg = msg.format(description=alert['description'], error=e)
self.module.fail_json(msg=msg)
def delete_alert(self, alert):
""" Delete an alert
"""
try:
result = self.client.post('{url}/{id}'.format(url=self.alerts_url,
id=alert['id']),
action="delete")
msg = "Alert {description} deleted: {details}"
msg = msg.format(description=alert['description'], details=result)
return dict(changed=True, msg=msg)
except Exception as e:
msg = "Deleting alert {description} failed: {error}"
msg = msg.format(description=alert['description'], error=e)
self.module.fail_json(msg=msg)
def update_alert(self, existing_alert, new_alert):
""" Update an existing alert with the values from `new_alert`
"""
new_alert_obj = ManageIQAlert(new_alert)
if new_alert_obj == ManageIQAlert(existing_alert):
# no change needed - alerts are identical
return dict(changed=False, msg="No update needed")
else:
try:
url = '{url}/{id}'.format(url=self.alerts_url, id=existing_alert['id'])
result = self.client.post(url, action="edit", resource=new_alert)
# make sure that the update was indeed successful by comparing
# the result to the expected result.
if new_alert_obj == ManageIQAlert(result):
# success!
msg = "Alert {description} upated successfully: {details}"
msg = msg.format(description=existing_alert['description'], details=result)
return dict(changed=True, msg=msg)
else:
# unexpected result
msg = "Updating alert {description} failed, unexpected result {details}"
msg = msg.format(description=existing_alert['description'], details=result)
self.module.fail_json(msg=msg)
except Exception as e:
msg = "Updating alert {description} failed: {error}"
if "Resource expression needs be specified" in str(e):
# Running on an older version of ManageIQ and trying to update a hash expression
msg = msg.format(description=existing_alert['description'],
error="Your version of ManageIQ does not support hash_expression")
else:
msg = msg.format(description=existing_alert['description'], error=e)
self.module.fail_json(msg=msg)
def main():
argument_spec = dict(
description=dict(type='str'),
resource_type=dict(type='str', choices=['Vm',
'ContainerNode',
'MiqServer',
'Host',
'Storage',
'EmsCluster',
'ExtManagementSystem',
'MiddlewareServer']),
expression_type=dict(type='str', default='hash', choices=['miq', 'hash']),
expression=dict(type='dict'),
options=dict(type='dict'),
enabled=dict(type='bool'),
state=dict(require=False, default='present',
choices=['present', 'absent']),
)
# add the manageiq connection arguments to the arguments
argument_spec.update(manageiq_argument_spec())
module = AnsibleModule(argument_spec=argument_spec,
required_if=[('state', 'present', ['description',
'resource_type',
'expression',
'enabled',
'options']),
('state', 'absent', ['description'])])
state = module.params['state']
description = module.params['description']
manageiq = ManageIQ(module)
manageiq_alerts = ManageIQAlerts(manageiq)
existing_alert = manageiq.find_collection_resource_by("alert_definitions",
description=description)
# we need to add or update the alert
if state == "present":
alert = manageiq_alerts.create_alert_dict(module.params)
if not existing_alert:<|fim▁hole|> res_args = manageiq_alerts.add_alert(alert)
else:
# an alert with this description exists, we might need to update it
res_args = manageiq_alerts.update_alert(existing_alert, alert)
# this alert should not exist
elif state == "absent":
# if we have an alert with this description, delete it
if existing_alert:
res_args = manageiq_alerts.delete_alert(existing_alert)
else:
# it doesn't exist, and that's okay
msg = "Alert '{description}' does not exist in ManageIQ"
msg = msg.format(description=description)
res_args = dict(changed=False, msg=msg)
module.exit_json(**res_args)
if __name__ == "__main__":
main()<|fim▁end|>
|
# an alert with this description doesn't exist yet, let's create it
|
<|file_name|>HMMPowerSupplyMap.py<|end_file_name|><|fim▁begin|>'''
HMMPowerSupplyMap
'''
from Products.DataCollector.plugins.CollectorPlugin import (
SnmpPlugin, GetTableMap, GetMap
)
from DeviceDefine import HMMSTATUS, HMMPRESENCE, HMMPOWERMODE, HMMLOCATION
class HMMPowerSupplyMap(SnmpPlugin):
'''
HMMPowerSupplyMap
'''
relname = 'hmmpowerSupplys'
modname = 'ZenPacks.community.HuaweiServer.HMMPowerSupply'
snmpGetTableMaps = (
GetTableMap(
'hmmPowerSupplyTable', '1.3.6.1.4.1.2011.2.82.1.82.6.2001.1', {
'.1': 'powerIndex',
'.2': 'powerPresence',
'.3': 'powerState',
'.4': 'powerRatingPower',
'.5': 'powerMode',
'.8': 'powerRuntimePower',
}
),
GetTableMap(
'hmmPSUTable', '1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1', {
'.1': 'psuIndex',
'.2': 'psuLocation',
'.3': 'psuHealth',
}
),
)
snmpGetMap = GetMap({
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.1.1': 'psuIndex1',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.2.1': 'psuLocation1',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.3.1': 'psuHealth1',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.1.2': 'psuIndex2',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.2.2': 'psuLocation2',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.3.2': 'psuHealth2',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.1.3': 'psuIndex3',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.2.3': 'psuLocation3',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.3.3': 'psuHealth3',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.1.4': 'psuIndex4',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.2.4': 'psuLocation4',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.3.4': 'psuHealth4',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.1.5': 'psuIndex5',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.2.5': 'psuLocation5',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.3.5': 'psuHealth5',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.1.6': 'psuIndex6',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.2.6': 'psuLocation6',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.3.6': 'psuHealth6',
})
def process(self, device, results, log):
'''
process oid
<|fim▁hole|>
log = log
device = device
temp_sensors = results[1].get('hmmPowerSupplyTable', {})
getdata = results[0]
psumap = {}
# psu_tables = results[1].get('hmmPSUTable', {})
# for snmpindex, row in psu_tables.items():
# name = str(row.get('psuIndex'))
# if not name:
# log.warn('Skipping hmmPowerSupplyTable with no name')
# continue
#
# psumap[int(name)] = [HMMLOCATION.get(row.get('psuLocation'), ''),
# HMMSTATUS.get(row.get('psuHealth'), 'normal')]
for row in range(1, 7):
rindex = 'psuIndex'+str(row)
rlocation = 'psuLocation'+str(row)
rhealth = 'psuHealth'+str(row)
psumap[row] = [HMMLOCATION.get(getdata.get(rlocation), ''),
HMMSTATUS.get(getdata.get(rhealth), 'normal')]
relmap = self.relMap()
for snmpindex, row in temp_sensors.items():
name = str(row.get('powerIndex'))
if not name:
log.warn('Skipping hmmPSUTable with no name')
continue
if 1 != int(row.get('powerPresence')):
continue
psustatus = ''
psulocation = ''
if (int(name)) in psumap:
psulocation = psumap[int(name)][0]
psustatus = psumap[int(name)][1]
relmap.append(self.objectMap({
'id': self.prepId('PS_'+name),
'title': 'PS_'+name,
'snmpindex': snmpindex.strip('.'),
'hpspresence': HMMPRESENCE.get(row.get('powerPresence'),
'unknown'),
'hpsratingPower': row.get('powerRatingPower'),
'hpsruntimePower': row.get('powerRuntimePower'),
'hpsstatus': psustatus,
'hpslocation': psulocation,
'hpspowerMode': HMMPOWERMODE.get(
row.get('powerMode'), row.get('powerMode')),
}))
return relmap<|fim▁end|>
|
'''
|
<|file_name|>GetNetworkProfileRequestMarshaller.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.alexaforbusiness.model.transform;
import javax.annotation.Generated;
import com.amazonaws.SdkClientException;
import com.amazonaws.services.alexaforbusiness.model.*;
import com.amazonaws.protocol.*;
import com.amazonaws.annotation.SdkInternalApi;
/**
* GetNetworkProfileRequestMarshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
@SdkInternalApi
public class GetNetworkProfileRequestMarshaller {
<|fim▁hole|>
public static GetNetworkProfileRequestMarshaller getInstance() {
return instance;
}
/**
* Marshall the given parameter object.
*/
public void marshall(GetNetworkProfileRequest getNetworkProfileRequest, ProtocolMarshaller protocolMarshaller) {
if (getNetworkProfileRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(getNetworkProfileRequest.getNetworkProfileArn(), NETWORKPROFILEARN_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
}<|fim▁end|>
|
private static final MarshallingInfo<String> NETWORKPROFILEARN_BINDING = MarshallingInfo.builder(MarshallingType.STRING)
.marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("NetworkProfileArn").build();
private static final GetNetworkProfileRequestMarshaller instance = new GetNetworkProfileRequestMarshaller();
|
<|file_name|>tree.js<|end_file_name|><|fim▁begin|>// ========================================================================
// SproutCore -- JavaScript Application Framework
// Copyright ©2006-2011, Strobe Inc. and contributors.
// Portions copyright ©2008 Apple Inc. All rights reserved.
// ========================================================================
sc_require('controllers/object');
sc_require('mixins/selection_support');
sc_require('private/tree_item_observer');
/**
@class
A TreeController manages a tree of model objects that you might want to
display in the UI using a collection view. For the most part, you should
work with a TreeController much like you would an ObjectController, except
that the TreeController will also provide an arrangedObjects property that
can be used as the content of a CollectionView.
TODO: Document More
@extends SC.ObjectController
@extends SC.SelectionSupport
@since SproutCore 1.0
*/
SC.TreeController = SC.ObjectController.extend(SC.SelectionSupport,
/** @scope SC.TreeController.prototype */ {
// ..........................................................
// PROPERTIES
//
/**
Set to YES if you want the top-level items in the tree to be displayed as
group items in the collection view.
@property {Boolean}
*/
treeItemIsGrouped: NO,
/**
If your content support expanding and collapsing of content, then set this
property to the name of the key on your model that should be used to <|fim▁hole|> @property {String}
*/
treeItemIsExpandedKey: "treeItemIsExpanded",
/**
Set to the name of the property on your content object that holds the
children array for each tree node. The default is "treeItemChildren".
@property {String}
*/
treeItemChildrenKey: "treeItemChildren",
/**
Returns an SC.Array object that actually will represent the tree as a
flat array suitable for use by a CollectionView. Other than binding this
property as the content of a CollectionView, you generally should not
use this property directly. Instead, work on the tree content using the
TreeController like you would any other ObjectController.
@property {SC.Array}
*/
arrangedObjects: function() {
var ret, content = this.get('content');
if (content) {
ret = SC.TreeItemObserver.create({ item: content, delegate: this });
} else ret = null; // empty!
this._sctc_arrangedObjects = ret ;
return ret ;
}.property().cacheable(),
// ..........................................................
// PRIVATE
//
/**
@private
Manually invalidate the arrangedObjects cache so that we can teardown
any existing value. We do it via an observer so that this will fire
immediately instead of waiting on some other component to get
arrangedObjects again.
*/
_sctc_invalidateArrangedObjects: function() {
this.propertyWillChange('arrangedObjects');
var ret = this._sctc_arrangedObjects;
if (ret) ret.destroy();
this._sctc_arrangedObjects = null;
this.propertyDidChange('arrangedObjects');
}.observes('content', 'treeItemIsExpandedKey', 'treeItemChildrenKey', 'treeItemIsGrouped'),
_sctc_arrangedObjectsContentDidChange: function() {
this.updateSelectionAfterContentChange();
}.observes('*arrangedObjects.[]'),
/**
@private
Returns the first item in arrangeObjects that is not a group. This uses
a brute force approach right now; we assume you probably don't have a lot
of groups up front.
*/
firstSelectableObject: function() {
var objects = this.get('arrangedObjects'),
indexes, len, idx = 0;
if (!objects) return null; // fast track
indexes = objects.contentGroupIndexes(null, objects);
len = objects.get('length');
while(indexes.contains(idx) && (idx<len)) idx++;
return idx>=len ? null : objects.objectAt(idx);
}.property()
});<|fim▁end|>
|
determine the expansion state of the item. The default is
"treeItemIsExpanded"
|
<|file_name|>create_trace_graphviz.py<|end_file_name|><|fim▁begin|>"""
Read in the output from the trace-inputlocator script and create a GraphViz file.
Pass as input the path to the yaml output of the trace-inputlocator script via config file.
The output is written to the trace-inputlocator location.
WHY? because the trace-inputlocator only has the GraphViz output of the last call to the script. This
version re-creates the trace-data from the (merged) yaml file (the yaml output is merged if pre-existing in the output
file).<|fim▁hole|>
import yaml
import cea.config
from cea.tests.trace_inputlocator import create_graphviz_output
def main(config):
with open(config.trace_inputlocator.yaml_output_file, 'r') as f:
yaml_data = yaml.safe_load(f)
trace_data = []
for script in yaml_data.keys():
for direction in ('input', 'output'):
for locator, file in yaml_data[script][direction]:
trace_data.append((direction, script, locator, file))
create_graphviz_output(trace_data, config.trace_inputlocator.graphviz_output_file)
if __name__ == '__main__':
main(cea.config.Configuration())<|fim▁end|>
|
"""
|
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models
from linda_app.models import DatasourceDescription<|fim▁hole|>
# A single test over a data source
class EndpointTest(models.Model):
execution_time = models.DateTimeField(auto_now_add=True) # test timestamp
datasource = models.ForeignKey(DatasourceDescription) # tested datasource
up = models.BooleanField(default=False) # was the endpoint up? - simple select query
response_time = models.IntegerField(blank=True, null=True) # response time for a simple select query
supports_minus = models.BooleanField(default=True, blank=True) # did the endpoint support SparQL features 1.1 like MINUS?<|fim▁end|>
| |
<|file_name|>api.py<|end_file_name|><|fim▁begin|># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012-2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all requests relating to compute resources (e.g. guest VMs,
networking and storage of VMs, and compute hosts on which they run)."""
import base64
import functools
import re
import string
import uuid
from oslo.config import cfg
import six
from nova import availability_zones
from nova import block_device
from nova.cells import opts as cells_opts
from nova.compute import flavors
from nova.compute import instance_actions
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import crypto
from nova.db import base
from nova import exception
from nova import hooks
from nova.image import glance
from nova import network
from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova.network.security_group import security_group_base
from nova import notifications
from nova import notifier
from nova.objects import aggregate as aggregate_obj
from nova.objects import base as obj_base
from nova.objects import flavor as flavor_obj
from nova.objects import instance as instance_obj
from nova.objects import instance_action
from nova.objects import instance_info_cache
from nova.objects import keypair as keypair_obj
from nova.objects import migration as migration_obj
from nova.objects import security_group as security_group_obj
from nova.objects import service as service_obj
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
import nova.policy
from nova import quota
from nova import servicegroup
from nova import utils
from nova import volume
LOG = logging.getLogger(__name__)
get_notifier = functools.partial(notifier.get_notifier, service='compute')
wrap_exception = functools.partial(exception.wrap_exception,
get_notifier=get_notifier)
compute_opts = [
cfg.BoolOpt('allow_resize_to_same_host',
default=False,
help='Allow destination machine to match source for resize. '
'Useful when testing in single-host environments.'),
cfg.BoolOpt('allow_migrate_to_same_host',
default=False,
help='Allow migrate machine to the same host. '
'Useful when testing in single-host environments.'),
cfg.StrOpt('default_schedule_zone',
help='availability zone to use when user doesn\'t specify one'),
cfg.ListOpt('non_inheritable_image_properties',
default=['cache_in_nova',
'bittorrent'],
help='These are image properties which a snapshot should not'
' inherit from an instance'),
cfg.StrOpt('null_kernel',
default='nokernel',
help='kernel image that indicates not to use a kernel, but to '
'use a raw disk image instead'),
cfg.StrOpt('multi_instance_display_name_template',
default='%(name)s-%(uuid)s',
help='When creating multiple instances with a single request '
'using the os-multiple-create API extension, this '
'template will be used to build the display name for '
'each instance. The benefit is that the instances '
'end up with different hostnames. To restore legacy '
'behavior of every instance having the same name, set '
'this option to "%(name)s". Valid keys for the '
'template are: name, uuid, count.'),
cfg.IntOpt('max_local_block_devices',
default=3,
help='Maximum number of devices that will result '
'in a local image being created on the hypervisor node. '
'Setting this to 0 means nova will allow only '
'boot from volume. A negative number means unlimited.'),
]
CONF = cfg.CONF
CONF.register_opts(compute_opts)
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
MAX_USERDATA_SIZE = 65535
QUOTAS = quota.QUOTAS
RO_SECURITY_GROUPS = ['default']
def check_instance_state(vm_state=None, task_state=(None,),
must_have_launched=True):
"""Decorator to check VM and/or task state before entry to API functions.
If the instance is in the wrong state, or has not been successfully
started at least once the wrapper will raise an exception.
"""
if vm_state is not None and not isinstance(vm_state, set):
vm_state = set(vm_state)
if task_state is not None and not isinstance(task_state, set):
task_state = set(task_state)
def outer(f):
@functools.wraps(f)
def inner(self, context, instance, *args, **kw):
if vm_state is not None and instance['vm_state'] not in vm_state:
raise exception.InstanceInvalidState(
attr='vm_state',
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method=f.__name__)
if (task_state is not None and
instance['task_state'] not in task_state):
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance['uuid'],
state=instance['task_state'],
method=f.__name__)
if must_have_launched and not instance['launched_at']:
raise exception.InstanceInvalidState(
attr=None,
not_launched=True,
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method=f.__name__)
return f(self, context, instance, *args, **kw)
return inner
return outer
def check_instance_host(function):
@functools.wraps(function)
def wrapped(self, context, instance, *args, **kwargs):
if not instance['host']:
raise exception.InstanceNotReady(instance_id=instance['uuid'])
return function(self, context, instance, *args, **kwargs)
return wrapped
def check_instance_lock(function):
@functools.wraps(function)
def inner(self, context, instance, *args, **kwargs):
if instance['locked'] and not context.is_admin:
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
return function(self, context, instance, *args, **kwargs)
return inner
def policy_decorator(scope):
"""Check corresponding policy prior of wrapped method to execution."""
def outer(func):
@functools.wraps(func)
def wrapped(self, context, target, *args, **kwargs):
check_policy(context, func.__name__, target, scope)
return func(self, context, target, *args, **kwargs)
return wrapped
return outer
wrap_check_policy = policy_decorator(scope='compute')
wrap_check_security_groups_policy = policy_decorator(
scope='compute:security_groups')
def check_policy(context, action, target, scope='compute'):
_action = '%s:%s' % (scope, action)
nova.policy.enforce(context, _action, target)
def check_instance_cell(fn):
def _wrapped(self, context, instance, *args, **kwargs):
self._validate_cell(instance, fn.__name__)
return fn(self, context, instance, *args, **kwargs)
_wrapped.__name__ = fn.__name__
return _wrapped
def _diff_dict(orig, new):
"""
Return a dict describing how to change orig to new. The keys
correspond to values that have changed; the value will be a list
of one or two elements. The first element of the list will be
either '+' or '-', indicating whether the key was updated or
deleted; if the key was updated, the list will contain a second
element, giving the updated value.
"""
# Figure out what keys went away
result = dict((k, ['-']) for k in set(orig.keys()) - set(new.keys()))
# Compute the updates
for key, value in new.items():
if key not in orig or value != orig[key]:
result[key] = ['+', value]
return result
class API(base.Base):
"""API for interacting with the compute manager."""
def __init__(self, image_service=None, network_api=None, volume_api=None,
security_group_api=None, **kwargs):
self.image_service = (image_service or
glance.get_default_image_service())
self.network_api = network_api or network.API()
self.volume_api = volume_api or volume.API()
self.security_group_api = (security_group_api or
openstack_driver.get_openstack_security_group_driver())
self.consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self._compute_task_api = None
self.servicegroup_api = servicegroup.API()
self.notifier = notifier.get_notifier('compute', CONF.host)
super(API, self).__init__(**kwargs)
@property
def compute_task_api(self):
if self._compute_task_api is None:
# TODO(alaski): Remove calls into here from conductor manager so
# that this isn't necessary. #1180540
from nova import conductor
self._compute_task_api = conductor.ComputeTaskAPI()
return self._compute_task_api
@property
def cell_type(self):
try:
return getattr(self, '_cell_type')
except AttributeError:
self._cell_type = cells_opts.get_cell_type()
return self._cell_type
def _cell_read_only(self, cell_name):
"""Is the target cell in a read-only mode?"""
# FIXME(comstud): Add support for this.
return False
def _validate_cell(self, instance, method):
if self.cell_type != 'api':
return
cell_name = instance['cell_name']
if not cell_name:
raise exception.InstanceUnknownCell(
instance_uuid=instance['uuid'])
if self._cell_read_only(cell_name):
raise exception.InstanceInvalidState(
attr="vm_state",
instance_uuid=instance['uuid'],
state="temporary_readonly",
method=method)
def _record_action_start(self, context, instance, action):
instance_action.InstanceAction.action_start(context,
instance['uuid'],
action,
want_result=False)
def _check_injected_file_quota(self, context, injected_files):
"""Enforce quota limits on injected files.
Raises a QuotaError if any limit is exceeded.
"""
if injected_files is None:
return
# Check number of files first
try:
QUOTAS.limit_check(context, injected_files=len(injected_files))
except exception.OverQuota:
raise exception.OnsetFileLimitExceeded()
# OK, now count path and content lengths; we're looking for
# the max...
max_path = 0
max_content = 0
for path, content in injected_files:
max_path = max(max_path, len(path))
max_content = max(max_content, len(content))
try:
QUOTAS.limit_check(context, injected_file_path_bytes=max_path,
injected_file_content_bytes=max_content)
except exception.OverQuota as exc:
# Favor path limit over content limit for reporting
# purposes
if 'injected_file_path_bytes' in exc.kwargs['overs']:
raise exception.OnsetFilePathLimitExceeded()
else:
raise exception.OnsetFileContentLimitExceeded()
def _check_num_instances_quota(self, context, instance_type, min_count,
max_count):
"""Enforce quota limits on number of instances created."""
# Determine requested cores and ram
req_cores = max_count * instance_type['vcpus']
req_ram = max_count * instance_type['memory_mb']
# Check the quota
try:
reservations = QUOTAS.reserve(context, instances=max_count,
cores=req_cores, ram=req_ram)
except exception.OverQuota as exc:
# OK, we exceeded quota; let's figure out why...
quotas = exc.kwargs['quotas']
overs = exc.kwargs['overs']
headroom = exc.kwargs['headroom']
allowed = headroom['instances']
# Reduce 'allowed' instances in line with the cores & ram headroom
if instance_type['vcpus']:
allowed = min(allowed,
headroom['cores'] // instance_type['vcpus'])
if instance_type['memory_mb']:
allowed = min(allowed,
headroom['ram'] // instance_type['memory_mb'])
# Convert to the appropriate exception message
if allowed <= 0:
msg = _("Cannot run any more instances of this type.")
allowed = 0
elif min_count <= allowed <= max_count:
# We're actually OK, but still need reservations
return self._check_num_instances_quota(context, instance_type,
min_count, allowed)
else:
msg = (_("Can only run %s more instances of this type.") %
allowed)
resource = overs[0]
used = quotas[resource] - headroom[resource]
total_allowed = used + headroom[resource]
overs = ','.join(overs)
params = {'overs': overs, 'pid': context.project_id,
'min_count': min_count, 'max_count': max_count,
'msg': msg}
if min_count == max_count:
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to run %(min_count)d instances. %(msg)s"),
params)
else:
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to run between %(min_count)d and"
" %(max_count)d instances. %(msg)s"),
params)
num_instances = (str(min_count) if min_count == max_count else
"%s-%s" % (min_count, max_count))
requested = dict(instances=num_instances, cores=req_cores,
ram=req_ram)
raise exception.TooManyInstances(overs=overs,
req=requested[resource],
used=used, allowed=total_allowed,
resource=resource)
return max_count, reservations
def _check_metadata_properties_quota(self, context, metadata=None):
"""Enforce quota limits on metadata properties."""
if not metadata:
metadata = {}
if not isinstance(metadata, dict):
msg = (_("Metadata type should be dict."))
raise exception.InvalidMetadata(reason=msg)
num_metadata = len(metadata)
try:
QUOTAS.limit_check(context, metadata_items=num_metadata)
except exception.OverQuota as exc:
LOG.warn(_("Quota exceeded for %(pid)s, tried to set "
"%(num_metadata)s metadata properties"),
{'pid': context.project_id,
'num_metadata': num_metadata})
quota_metadata = exc.kwargs['quotas']['metadata_items']
raise exception.MetadataLimitExceeded(allowed=quota_metadata)
# Because metadata is stored in the DB, we hard-code the size limits
# In future, we may support more variable length strings, so we act
# as if this is quota-controlled for forwards compatibility
for k, v in metadata.iteritems():
if not isinstance(k, six.string_types):
msg = _("Metadata property key '%s' is not a string.") % k
raise exception.InvalidMetadata(reason=msg)
if not isinstance(v, six.string_types):
msg = (_("Metadata property value '%(v)s' for key '%(k)s' is "
"not a string.") % {'v': v, 'k': k})
raise exception.InvalidMetadata(reason=msg)
if len(k) == 0:
msg = _("Metadata property key blank")
raise exception.InvalidMetadata(reason=msg)
if len(k) > 255:
msg = _("Metadata property key greater than 255 characters")
raise exception.InvalidMetadataSize(reason=msg)
if len(v) > 255:
msg = _("Metadata property value greater than 255 characters")
raise exception.InvalidMetadataSize(reason=msg)
def _check_requested_secgroups(self, context, secgroups):
"""
Check if the security group requested exists and belongs to
the project.
"""
for secgroup in secgroups:
# NOTE(sdague): default is handled special
if secgroup == "default":
continue
if not self.security_group_api.get(context, secgroup):
raise exception.SecurityGroupNotFoundForProject(
project_id=context.project_id, security_group_id=secgroup)
def _check_requested_networks(self, context, requested_networks,
max_count):
"""
Check if the networks requested belongs to the project
and the fixed IP address for each network provided is within
same the network block
"""
return self.network_api.validate_networks(context, requested_networks,
max_count)
@staticmethod
def _handle_kernel_and_ramdisk(context, kernel_id, ramdisk_id, image):
"""Choose kernel and ramdisk appropriate for the instance.
The kernel and ramdisk can be chosen in one of three ways:
1. Passed in with create-instance request.
2. Inherited from image.
3. Forced to None by using `null_kernel` FLAG.
"""
# Inherit from image if not specified
image_properties = image.get('properties', {})
if kernel_id is None:
kernel_id = image_properties.get('kernel_id')
if ramdisk_id is None:
ramdisk_id = image_properties.get('ramdisk_id')
# Force to None if using null_kernel
if kernel_id == str(CONF.null_kernel):
kernel_id = None
ramdisk_id = None
# Verify kernel and ramdisk exist (fail-fast)
if kernel_id is not None:
image_service, kernel_id = glance.get_remote_image_service(
context, kernel_id)
image_service.show(context, kernel_id)
if ramdisk_id is not None:
image_service, ramdisk_id = glance.get_remote_image_service(
context, ramdisk_id)
image_service.show(context, ramdisk_id)
return kernel_id, ramdisk_id
@staticmethod
def _handle_availability_zone(context, availability_zone):
# NOTE(vish): We have a legacy hack to allow admins to specify hosts
# via az using az:host:node. It might be nice to expose an
# api to specify specific hosts to force onto, but for
# now it just supports this legacy hack.
# NOTE(deva): It is also possible to specify az::node, in which case
# the host manager will determine the correct host.
forced_host = None
forced_node = None
if availability_zone and ':' in availability_zone:
c = availability_zone.count(':')
if c == 1:
availability_zone, forced_host = availability_zone.split(':')
elif c == 2:
if '::' in availability_zone:
availability_zone, forced_node = \
availability_zone.split('::')
else:
availability_zone, forced_host, forced_node = \
availability_zone.split(':')
else:
raise exception.InvalidInput(
reason="Unable to parse availability_zone")
if not availability_zone:
availability_zone = CONF.default_schedule_zone
if forced_host:
check_policy(context, 'create:forced_host', {})
if forced_node:
check_policy(context, 'create:forced_host', {})
return availability_zone, forced_host, forced_node
def _ensure_auto_disk_config_is_valid(self, auto_disk_config_img,
auto_disk_config, image):
auto_disk_config_disabled = \
utils.is_auto_disk_config_disabled(auto_disk_config_img)
if auto_disk_config_disabled and auto_disk_config:
raise exception.AutoDiskConfigDisabledByImage(image=image)
def _inherit_properties_from_image(self, image, auto_disk_config):
image_properties = image.get('properties', {})
auto_disk_config_img = \
utils.get_auto_disk_config_from_image_props(image_properties)
self._ensure_auto_disk_config_is_valid(auto_disk_config_img,
auto_disk_config,
image.get("id"))
if auto_disk_config is None:
auto_disk_config = strutils.bool_from_string(auto_disk_config_img)
return {
'os_type': image_properties.get('os_type'),
'architecture': image_properties.get('architecture'),
'vm_mode': image_properties.get('vm_mode'),
'auto_disk_config': auto_disk_config
}
def _apply_instance_name_template(self, context, instance, index):
params = {
'uuid': instance['uuid'],
'name': instance['display_name'],
'count': index + 1,
}
try:
new_name = (CONF.multi_instance_display_name_template %
params)
except (KeyError, TypeError):
LOG.exception(_('Failed to set instance name using '
'multi_instance_display_name_template.'))
new_name = instance['display_name']
instance.display_name = new_name
if not instance.get('hostname', None):
instance.hostname = utils.sanitize_hostname(new_name)
instance.save()
return instance
def _check_config_drive(self, config_drive):
if config_drive:
try:
bool_val = strutils.bool_from_string(config_drive,
strict=True)
except ValueError:
raise exception.ConfigDriveInvalidValue(option=config_drive)
else:
bool_val = False
# FIXME(comstud): Bug ID 1193438 filed for this. This looks silly,
# but this is because the config drive column is a String. False
# is represented by using an empty string. And for whatever
# reason, we rely on the DB to cast True to a String.
return True if bool_val else ''
def _check_requested_image(self, context, image_id, image, instance_type):
if not image:
# Image checks don't apply when building from volume
return
if image['status'] != 'active':
raise exception.ImageNotActive(image_id=image_id)
if instance_type['memory_mb'] < int(image.get('min_ram') or 0):
raise exception.FlavorMemoryTooSmall()
# NOTE(johannes): root_gb is allowed to be 0 for legacy reasons
# since libvirt interpreted the value differently than other
# drivers. A value of 0 means don't check size.
root_gb = instance_type['root_gb']
if root_gb:
if int(image.get('size') or 0) > root_gb * (1024 ** 3):
raise exception.FlavorDiskTooSmall()
if int(image.get('min_disk') or 0) > root_gb:
raise exception.FlavorDiskTooSmall()
def _check_and_transform_bdm(self, base_options, image_meta, min_count,
max_count, block_device_mapping, legacy_bdm):
# NOTE (ndipanov): Assume root dev name is 'vda' if not supplied.
# It's needed for legacy conversion to work.
root_device_name = (base_options.get('root_device_name') or 'vda')
image_ref = base_options.get('image_ref', '')
# Get the block device mappings defined by the image.
image_defined_bdms = \
image_meta.get('properties', {}).get('block_device_mapping', [])
if legacy_bdm:
block_device_mapping += image_defined_bdms
block_device_mapping = block_device.from_legacy_mapping(
block_device_mapping, image_ref, root_device_name)
elif image_defined_bdms:
# NOTE (ndipanov): For now assume that image mapping is legacy
block_device_mapping += block_device.from_legacy_mapping(
image_defined_bdms, None, root_device_name)
if min_count > 1 or max_count > 1:
if any(map(lambda bdm: bdm['source_type'] == 'volume',
block_device_mapping)):
msg = _('Cannot attach one or more volumes to multiple'
' instances')
raise exception.InvalidRequest(msg)
return block_device_mapping
def _get_image(self, context, image_href):
if not image_href:
return None, {}
(image_service, image_id) = glance.get_remote_image_service(
context, image_href)
image = image_service.show(context, image_id)
return image_id, image
def _checks_for_create_and_rebuild(self, context, image_id, image,
instance_type, metadata,
files_to_inject):
self._check_metadata_properties_quota(context, metadata)
self._check_injected_file_quota(context, files_to_inject)
if image_id is not None:
self._check_requested_image(context, image_id,
image, instance_type)
def _validate_and_build_base_options(self, context, instance_type,
boot_meta, image_href, image_id,
kernel_id, ramdisk_id, display_name,
display_description, key_name,
key_data, security_groups,
availability_zone, forced_host,
user_data, metadata, injected_files,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping,
auto_disk_config, reservation_id,
max_count):
"""Verify all the input parameters regardless of the provisioning
strategy being performed.
"""
if availability_zone:
available_zones = availability_zones.\
get_availability_zones(context.elevated(), True)
if forced_host is None and availability_zone not in \
available_zones:
msg = _('The requested availability zone is not available')
raise exception.InvalidRequest(msg)
if instance_type['disabled']:
raise exception.FlavorNotFound(flavor_id=instance_type['id'])
if user_data:
l = len(user_data)
if l > MAX_USERDATA_SIZE:
# NOTE(mikal): user_data is stored in a text column, and
# the database might silently truncate if its over length.
raise exception.InstanceUserDataTooLarge(
length=l, maxsize=MAX_USERDATA_SIZE)
try:
base64.decodestring(user_data)
except base64.binascii.Error:
raise exception.InstanceUserDataMalformed()
self._checks_for_create_and_rebuild(context, image_id, boot_meta,
instance_type, metadata, injected_files)
self._check_requested_secgroups(context, security_groups)
# Note: max_count is the number of instances requested by the user,
# max_network_count is the maximum number of instances taking into
# account any network quotas
max_network_count = self._check_requested_networks(context,
requested_networks, max_count)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, kernel_id, ramdisk_id, boot_meta)
config_drive = self._check_config_drive(config_drive)
if key_data is None and key_name:
key_pair = keypair_obj.KeyPair.get_by_name(context,
context.user_id,
key_name)
key_data = key_pair.public_key
root_device_name = block_device.properties_root_device_name(
boot_meta.get('properties', {}))
system_metadata = flavors.save_flavor_info(
dict(), instance_type)
base_options = {
'reservation_id': reservation_id,
'image_ref': image_href,
'kernel_id': kernel_id or '',
'ramdisk_id': ramdisk_id or '',
'power_state': power_state.NOSTATE,
'vm_state': vm_states.BUILDING,
'config_drive': config_drive,
'user_id': context.user_id,
'project_id': context.project_id,
'instance_type_id': instance_type['id'],
'memory_mb': instance_type['memory_mb'],
'vcpus': instance_type['vcpus'],
'root_gb': instance_type['root_gb'],
'ephemeral_gb': instance_type['ephemeral_gb'],
'display_name': display_name,
'display_description': display_description or '',
'user_data': user_data,
'key_name': key_name,
'key_data': key_data,
'locked': False,
'metadata': metadata or {},
'access_ip_v4': access_ip_v4,
'access_ip_v6': access_ip_v6,
'availability_zone': availability_zone,
'root_device_name': root_device_name,
'progress': 0,
'system_metadata': system_metadata}
options_from_image = self._inherit_properties_from_image(
boot_meta, auto_disk_config)
base_options.update(options_from_image)
# return the validated options and maximum number of instances allowed
# by the network quotas
return base_options, max_network_count
def _build_filter_properties(self, context, scheduler_hints, forced_host,
forced_node, instance_type):
filter_properties = dict(scheduler_hints=scheduler_hints)
filter_properties['instance_type'] = instance_type
if forced_host:
filter_properties['force_hosts'] = [forced_host]
if forced_node:
filter_properties['force_nodes'] = [forced_node]
return filter_properties
def _provision_instances(self, context, instance_type, min_count,
max_count, base_options, boot_meta, security_groups,
block_device_mapping):
# Reserve quotas
num_instances, quota_reservations = self._check_num_instances_quota(
context, instance_type, min_count, max_count)
LOG.debug(_("Going to run %s instances...") % num_instances)
instances = []
try:
for i in xrange(num_instances):
instance = instance_obj.Instance()
instance.update(base_options)
instance = self.create_db_entry_for_new_instance(
context, instance_type, boot_meta, instance,
security_groups, block_device_mapping,
num_instances, i)
instances.append(instance)
# send a state update notification for the initial create to
# show it going from non-existent to BUILDING
notifications.send_update_with_states(context, instance, None,
vm_states.BUILDING, None, None, service="api")
# In the case of any exceptions, attempt DB cleanup and rollback the
# quota reservations.
except Exception:
with excutils.save_and_reraise_exception():
try:
for instance in instances:
try:
instance.destroy()
except exception.ObjectActionError:
pass
finally:
QUOTAS.rollback(context, quota_reservations)
# Commit the reservations
QUOTAS.commit(context, quota_reservations)
return instances
def _get_bdm_image_metadata(self, context, block_device_mapping,
legacy_bdm=True):
"""If we are booting from a volume, we need to get the
volume details from Cinder and make sure we pass the
metadata back accordingly.
"""
if not block_device_mapping:
return {}
for bdm in block_device_mapping:
if legacy_bdm and bdm.get('device_name') != 'vda':
continue
elif not legacy_bdm and bdm.get('boot_index') != 0:
continue
if bdm.get('image_id'):
try:
image_id = bdm['image_id']
image_meta = self.image_service.show(context, image_id)
return image_meta.get('properties', {})
except Exception:
raise exception.InvalidBDMImage(id=image_id)
elif bdm.get('volume_id'):
try:
volume_id = bdm['volume_id']
volume = self.volume_api.get(context, volume_id)
return volume.get('volume_image_metadata', {})
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
return {}
def _create_instance(self, context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_groups,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
reservation_id=None, scheduler_hints=None,
legacy_bdm=True):
"""Verify all the input parameters regardless of the provisioning
strategy being performed and schedule the instance(s) for
creation.
"""
# Normalize and setup some parameters
if reservation_id is None:
reservation_id = utils.generate_uid('r')
security_groups = security_groups or ['default']
min_count = min_count or 1
max_count = max_count or min_count
block_device_mapping = block_device_mapping or []
if not instance_type:
instance_type = flavors.get_default_flavor()
if image_href:
image_id, boot_meta = self._get_image(context, image_href)
else:
image_id = None
boot_meta = {}
boot_meta['properties'] = \
self._get_bdm_image_metadata(context,
block_device_mapping, legacy_bdm)
self._check_auto_disk_config(image=boot_meta,
auto_disk_config=auto_disk_config)
handle_az = self._handle_availability_zone
availability_zone, forced_host, forced_node = handle_az(context,
availability_zone)
base_options, max_net_count = self._validate_and_build_base_options(
context,
instance_type, boot_meta, image_href, image_id, kernel_id,
ramdisk_id, display_name, display_description,
key_name, key_data, security_groups, availability_zone,
forced_host, user_data, metadata, injected_files, access_ip_v4,
access_ip_v6, requested_networks, config_drive,
block_device_mapping, auto_disk_config, reservation_id,
max_count)
# max_net_count is the maximum number of instances requested by the
# user adjusted for any network quota constraints, including
# considertaion of connections to each requested network
if max_net_count == 0:
raise exception.PortLimitExceeded()
elif max_net_count < max_count:
LOG.debug(_("max count reduced from %(max_count)d to "
"%(max_net_count)d due to network port quota"),
{'max_count': max_count,
'max_net_count': max_net_count})
max_count = max_net_count
block_device_mapping = self._check_and_transform_bdm(
base_options, boot_meta, min_count, max_count,
block_device_mapping, legacy_bdm)
instances = self._provision_instances(context, instance_type,
min_count, max_count, base_options, boot_meta, security_groups,
block_device_mapping)
filter_properties = self._build_filter_properties(context,
scheduler_hints, forced_host, forced_node, instance_type)
for instance in instances:
self._record_action_start(context, instance,
instance_actions.CREATE)
self.compute_task_api.build_instances(context,
instances=instances, image=boot_meta,
filter_properties=filter_properties,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
legacy_bdm=False)
return (instances, reservation_id)
@staticmethod
def _volume_size(instance_type, bdm):
size = bdm.get('volume_size')
if size is None and bdm.get('source_type') == 'blank':
if bdm.get('guest_format') == 'swap':
size = instance_type.get('swap', 0)
else:
size = instance_type.get('ephemeral_gb', 0)
return size
def _prepare_image_mapping(self, instance_type, instance_uuid, mappings):
"""Extract and format blank devices from image mappings."""
prepared_mappings = []
for bdm in block_device.mappings_prepend_dev(mappings):
LOG.debug(_("Image bdm %s"), bdm, instance_uuid=instance_uuid)
virtual_name = bdm['virtual']
if virtual_name == 'ami' or virtual_name == 'root':
continue
if not block_device.is_swap_or_ephemeral(virtual_name):
continue
guest_format = bdm.get('guest_format')
if virtual_name == 'swap':
guest_format = 'swap'
if not guest_format:
guest_format = CONF.default_ephemeral_format
values = block_device.BlockDeviceDict({
'device_name': bdm['device'],
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': guest_format,
'delete_on_termination': True,
'boot_index': -1})
values['volume_size'] = self._volume_size(
instance_type, values)
if values['volume_size'] == 0:
continue
prepared_mappings.append(values)
return prepared_mappings
def _update_block_device_mapping(self, elevated_context,
instance_type, instance_uuid,
block_device_mapping):
"""tell vm driver to attach volume at boot time by updating
BlockDeviceMapping
"""
LOG.debug(_("block_device_mapping %s"), block_device_mapping,
instance_uuid=instance_uuid)
for bdm in block_device_mapping:
bdm['volume_size'] = self._volume_size(instance_type, bdm)
if bdm.get('volume_size') == 0:
continue
bdm['instance_uuid'] = instance_uuid
self.db.block_device_mapping_update_or_create(elevated_context,
bdm,
legacy=False)
def _validate_bdm(self, context, instance, instance_type, all_mappings):
def _subsequent_list(l):
return all(el + 1 == l[i + 1] for i, el in enumerate(l[:-1]))
# Make sure that the boot indexes make sense
boot_indexes = sorted([bdm['boot_index']
for bdm in all_mappings
if bdm.get('boot_index') is not None
and bdm.get('boot_index') >= 0])
if 0 not in boot_indexes or not _subsequent_list(boot_indexes):
raise exception.InvalidBDMBootSequence()
for bdm in all_mappings:
# NOTE(vish): For now, just make sure the volumes are accessible.
# Additionally, check that the volume can be attached to this
# instance.
snapshot_id = bdm.get('snapshot_id')
volume_id = bdm.get('volume_id')
image_id = bdm.get('image_id')
if (image_id is not None and
image_id != instance.get('image_ref')):
try:
self._get_image(context, image_id)
except Exception:
raise exception.InvalidBDMImage(id=image_id)
elif volume_id is not None:
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context,
volume,
instance=instance)
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
elif snapshot_id is not None:
try:
self.volume_api.get_snapshot(context, snapshot_id)
except Exception:
raise exception.InvalidBDMSnapshot(id=snapshot_id)
ephemeral_size = sum(bdm.get('volume_size') or 0
for bdm in all_mappings
if block_device.new_format_is_ephemeral(bdm))
if ephemeral_size > instance_type['ephemeral_gb']:
raise exception.InvalidBDMEphemeralSize()
# There should be only one swap
swap_list = [bdm for bdm in all_mappings
if block_device.new_format_is_swap(bdm)]
if len(swap_list) > 1:
msg = _("More than one swap drive requested.")
raise exception.InvalidBDMFormat(details=msg)
if swap_list:
swap_size = swap_list[0].get('volume_size') or 0
if swap_size > instance_type['swap']:
raise exception.InvalidBDMSwapSize()
max_local = CONF.max_local_block_devices
if max_local >= 0:
num_local = len([bdm for bdm in all_mappings
if bdm.get('destination_type') == 'local'])
if num_local > max_local:
raise exception.InvalidBDMLocalsLimit()
def _populate_instance_for_bdm(self, context, instance, instance_type,
image, block_device_mapping):
"""Populate instance block device mapping information."""
instance_uuid = instance['uuid']
image_properties = image.get('properties', {})
image_mapping = image_properties.get('mappings', [])
if image_mapping:
image_mapping = self._prepare_image_mapping(instance_type,
instance_uuid, image_mapping)
self._validate_bdm(context, instance, instance_type,
block_device_mapping + image_mapping)
for mapping in (image_mapping, block_device_mapping):
if not mapping:
continue
self._update_block_device_mapping(context,
instance_type, instance_uuid, mapping)
def _populate_instance_shutdown_terminate(self, instance, image,
block_device_mapping):
"""Populate instance shutdown_terminate information."""
image_properties = image.get('properties', {})
if (block_device_mapping or
image_properties.get('mappings') or
image_properties.get('block_device_mapping')):
instance.shutdown_terminate = False
def _populate_instance_names(self, instance, num_instances):
"""Populate instance display_name and hostname."""
display_name = instance.get('display_name')
if instance.obj_attr_is_set('hostname'):
hostname = instance.get('hostname')
else:
hostname = None
if display_name is None:
display_name = self._default_display_name(instance['uuid'])
instance.display_name = display_name
if hostname is None and num_instances == 1:
# NOTE(russellb) In the multi-instance case, we're going to
# overwrite the display_name using the
# multi_instance_display_name_template. We need the default
# display_name set so that it can be used in the template, though.
# Only set the hostname here if we're only creating one instance.
# Otherwise, it will be built after the template based
# display_name.
hostname = display_name
instance.hostname = utils.sanitize_hostname(hostname)
def _default_display_name(self, instance_uuid):
return "Server %s" % instance_uuid
def _populate_instance_for_create(self, instance, image,
index, security_groups, instance_type):
"""Build the beginning of a new instance."""
if not instance.obj_attr_is_set('uuid'):
# Generate the instance_uuid here so we can use it
# for additional setup before creating the DB entry.
instance['uuid'] = str(uuid.uuid4())
instance.launch_index = index
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SCHEDULING
info_cache = instance_info_cache.InstanceInfoCache()
info_cache.instance_uuid = instance.uuid
info_cache.network_info = network_model.NetworkInfo()
instance.info_cache = info_cache
# Store image properties so we can use them later
# (for notifications, etc). Only store what we can.
if not instance.obj_attr_is_set('system_metadata'):
instance.system_metadata = {}
# Make sure we have the dict form that we need for instance_update.
instance['system_metadata'] = utils.instance_sys_meta(instance)
system_meta = utils.get_system_metadata_from_image(
image, instance_type)
# In case we couldn't find any suitable base_image
system_meta.setdefault('image_base_image_ref', instance['image_ref'])
instance['system_metadata'].update(system_meta)
self.security_group_api.populate_security_groups(instance,
security_groups)
return instance
#NOTE(bcwaldon): No policy check since this is only used by scheduler and
# the compute api. That should probably be cleaned up, though.
def create_db_entry_for_new_instance(self, context, instance_type, image,
instance, security_group, block_device_mapping, num_instances,
index):
"""Create an entry in the DB for this new instance,
including any related table updates (such as security group,
etc).
This is called by the scheduler after a location for the
instance has been determined.
"""
self._populate_instance_for_create(instance, image, index,
security_group, instance_type)
self._populate_instance_names(instance, num_instances)
self._populate_instance_shutdown_terminate(instance, image,
block_device_mapping)
self.security_group_api.ensure_default(context)
instance.create(context)
if num_instances > 1:
# NOTE(russellb) We wait until this spot to handle
# multi_instance_display_name_template, because we need
# the UUID from the instance.
instance = self._apply_instance_name_template(context, instance,
index)
# NOTE (ndipanov): This can now raise exceptions but the instance
# has been created, so delete it and re-raise so
# that other cleanup can happen.
try:
self._populate_instance_for_bdm(context, instance,
instance_type, image, block_device_mapping)
except exception.InvalidBDM:
with excutils.save_and_reraise_exception():
self.db.instance_destroy(context, instance['uuid'])
return instance
def _check_create_policies(self, context, availability_zone,
requested_networks, block_device_mapping):
"""Check policies for create()."""
target = {'project_id': context.project_id,
'user_id': context.user_id,
'availability_zone': availability_zone}
check_policy(context, 'create', target)
if requested_networks:
check_policy(context, 'create:attach_network', target)
if block_device_mapping:
check_policy(context, 'create:attach_volume', target)
def _check_multiple_instances_neutron_ports(self, requested_networks):
"""Check whether multiple instances are created from port id(s)."""
for net, ip, port in requested_networks:
if port:
msg = _("Unable to launch multiple instances with"
" a single configured port ID. Please launch your"
" instance one by one with different ports.")
raise exception.MultiplePortsNotApplicable(reason=msg)
@hooks.add_hook("create_instance")
def create(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
min_count=None, max_count=None,
display_name=None, display_description=None,
key_name=None, key_data=None, security_group=None,
availability_zone=None, user_data=None, metadata=None,
injected_files=None, admin_password=None,
block_device_mapping=None, access_ip_v4=None,
access_ip_v6=None, requested_networks=None, config_drive=None,
auto_disk_config=None, scheduler_hints=None, legacy_bdm=True):
"""
Provision instances, sending instance information to the
scheduler. The scheduler will determine where the instance(s)
go and will handle creating the DB entries.
Returns a tuple of (instances, reservation_id)
"""
self._check_create_policies(context, availability_zone,
requested_networks, block_device_mapping)
if requested_networks and max_count > 1 and utils.is_neutron():
self._check_multiple_instances_neutron_ports(requested_networks)
return self._create_instance(
context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_group,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
scheduler_hints=scheduler_hints,
legacy_bdm=legacy_bdm)
def trigger_provider_fw_rules_refresh(self, context):
"""Called when a rule is added/removed from a provider firewall."""
services = service_obj.ServiceList.get_all_by_topic(context,
CONF.compute_topic)
for service in services:
host_name = service.host
self.compute_rpcapi.refresh_provider_fw_rules(context, host_name)
@wrap_check_policy
def update(self, context, instance, **kwargs):
"""Updates the instance in the datastore.
:param context: The security context
:param instance: The instance to update
:param kwargs: All additional keyword args are treated
as data fields of the instance to be
updated
:returns: A reference to the updated instance
"""
refs = self._update(context, instance, **kwargs)
return refs[1]
def _update(self, context, instance, **kwargs):
# Update the instance record and send a state update notification
# if task or vm state changed
old_ref, instance_ref = self.db.instance_update_and_get_original(
context, instance['uuid'], kwargs)
notifications.send_update(context, old_ref,
instance_ref, service="api")
return dict(old_ref.iteritems()), dict(instance_ref.iteritems())
def _check_auto_disk_config(self, instance=None, image=None,
**extra_instance_updates):
auto_disk_config = extra_instance_updates.get("auto_disk_config")
if auto_disk_config is None:
return
if not image and not instance:
return
if image:
image_props = image.get("properties", {})
auto_disk_config_img = \
utils.get_auto_disk_config_from_image_props(image_props)
image_ref = image.get("id")
else:
sys_meta = utils.instance_sys_meta(instance)
image_ref = sys_meta.get('image_base_image_ref')
auto_disk_config_img = \
utils.get_auto_disk_config_from_instance(sys_meta=sys_meta)
self._ensure_auto_disk_config_is_valid(auto_disk_config_img,
auto_disk_config,
image_ref)
def _delete(self, context, instance, delete_type, cb, **instance_attrs):
if instance.disable_terminate:
LOG.info(_('instance termination disabled'),
instance=instance)
return
host = instance['host']
bdms = block_device.legacy_mapping(
self.db.block_device_mapping_get_all_by_instance(
context, instance.uuid))
reservations = None
if context.is_admin and context.project_id != instance.project_id:
project_id = instance.project_id
else:
project_id = context.project_id
if context.user_id != instance.user_id:
user_id = instance.user_id
else:
user_id = context.user_id
try:
# NOTE(maoy): no expected_task_state needs to be set
instance.update(instance_attrs)
instance.progress = 0
instance.save()
new_type_id = instance.instance_type_id
# NOTE(comstud): If we delete the instance locally, we'll
# commit the reservations here. Otherwise, the manager side
# will commit or rollback the reservations based on success.
reservations = self._create_reservations(context,
instance,
new_type_id,
project_id, user_id)
if self.cell_type == 'api':
# NOTE(comstud): If we're in the API cell, we need to
# skip all remaining logic and just call the callback,
# which will cause a cast to the child cell. Also,
# commit reservations here early until we have a better
# way to deal with quotas with cells.
cb(context, instance, bdms, reservations=None)
if reservations:
QUOTAS.commit(context,
reservations,
project_id=project_id,
user_id=user_id)
return
if not host:
try:
compute_utils.notify_about_instance_usage(
self.notifier, context, instance,
"%s.start" % delete_type)
instance.destroy()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance,
"%s.end" % delete_type,
system_metadata=instance.system_metadata)
if reservations:
QUOTAS.commit(context,
reservations,
project_id=project_id,
user_id=user_id)
return
except exception.ObjectActionError:
instance.refresh()
if instance.vm_state == vm_states.RESIZED:
self._confirm_resize_on_deleting(context, instance)
is_up = False
try:
service = service_obj.Service.get_by_compute_host(
context.elevated(), instance.host)
if self.servicegroup_api.service_is_up(service):
is_up = True
self._record_action_start(context, instance,
instance_actions.DELETE)
cb(context, instance, bdms, reservations=reservations)
except exception.ComputeHostNotFound:
pass
if not is_up:
# If compute node isn't up, just delete from DB
self._local_delete(context, instance, bdms, delete_type, cb)
if reservations:
QUOTAS.commit(context,
reservations,
project_id=project_id,
user_id=user_id)
reservations = None
except exception.InstanceNotFound:
# NOTE(comstud): Race condition. Instance already gone.
if reservations:
QUOTAS.rollback(context,
reservations,
project_id=project_id,
user_id=user_id)
except Exception:
with excutils.save_and_reraise_exception():
if reservations:
QUOTAS.rollback(context,
reservations,
project_id=project_id,
user_id=user_id)
def _confirm_resize_on_deleting(self, context, instance):
# If in the middle of a resize, use confirm_resize to
# ensure the original instance is cleaned up too
mig_cls = migration_obj.Migration
migration = None
for status in ('finished', 'confirming'):
try:
migration = mig_cls.get_by_instance_and_status(
context.elevated(), instance.uuid, status)
LOG.info(_('Found an unconfirmed migration during delete, '
'id: %(id)s, status: %(status)s') %
{'id': migration.id,
'status': migration.status},
context=context, instance=instance)
break
except exception.MigrationNotFoundByStatus:
pass
if not migration:
LOG.info(_('Instance may have been confirmed during delete'),
context=context, instance=instance)
return
src_host = migration.source_compute
# Call since this can race with the terminate_instance.
# The resize is done but awaiting confirmation/reversion,
# so there are two cases:
# 1. up-resize: here -instance['vcpus'/'memory_mb'] match
# the quota usages accounted for this instance,
# so no further quota adjustment is needed
# 2. down-resize: here -instance['vcpus'/'memory_mb'] are
# shy by delta(old, new) from the quota usages accounted
# for this instance, so we must adjust
try:
deltas = self._downsize_quota_delta(context, instance)
except KeyError:
LOG.info(_('Migration %s may have been confirmed during delete') %
migration.id, context=context, instance=instance)
return
downsize_reservations = self._reserve_quota_delta(context,
deltas)
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance, migration,
src_host, downsize_reservations,
cast=False)
def _create_reservations(self, context, old_instance, new_instance_type_id,
project_id, user_id):
instance_vcpus = old_instance['vcpus']
instance_memory_mb = old_instance['memory_mb']
# NOTE(wangpan): if the instance is resizing, and the resources
# are updated to new instance type, we should use
# the old instance type to create reservation.
# see https://bugs.launchpad.net/nova/+bug/1099729 for more details
if old_instance['task_state'] in (task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH):
Migration = migration_obj.Migration
try:
migration = Migration.get_by_instance_and_status(
context.elevated(), old_instance.uuid, 'post-migrating')
except exception.MigrationNotFoundByStatus:
migration = None
if (migration and
new_instance_type_id ==
migration.new_instance_type_id):
old_inst_type_id = migration.old_instance_type_id
try:
old_inst_type = flavors.get_flavor(old_inst_type_id)
except exception.FlavorNotFound:
LOG.warning(_("Flavor %d not found"), old_inst_type_id)
pass
else:
instance_vcpus = old_inst_type['vcpus']
instance_memory_mb = old_inst_type['memory_mb']
LOG.debug(_("going to delete a resizing instance"))
reservations = QUOTAS.reserve(context,
project_id=project_id,
user_id=user_id,
instances=-1,
cores=-instance_vcpus,
ram=-instance_memory_mb)
return reservations
def _local_delete(self, context, instance, bdms, delete_type, cb):
LOG.warning(_("instance's host %s is down, deleting from "
"database") % instance['host'], instance=instance)
instance.info_cache.delete()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "%s.start" % delete_type)
elevated = context.elevated()
if self.cell_type != 'api':
self.network_api.deallocate_for_instance(elevated,
instance)
# cleanup volumes
for bdm in bdms:
if bdm['volume_id']:
# NOTE(vish): We don't have access to correct volume
# connector info, so just pass a fake
# connector. This can be improved when we
# expose get_volume_connector to rpc.
connector = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'}
try:
self.volume_api.terminate_connection(context,
bdm['volume_id'],
connector)
self.volume_api.detach(elevated, bdm['volume_id'])
if bdm['delete_on_termination']:
self.volume_api.delete(context, bdm['volume_id'])
except Exception as exc:
err_str = _("Ignoring volume cleanup failure due to %s")
LOG.warn(err_str % exc, instance=instance)
self.db.block_device_mapping_destroy(context, bdm['id'])
cb(context, instance, bdms, local=True)
sys_meta = instance.system_metadata
instance.destroy()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "%s.end" % delete_type,
system_metadata=sys_meta)
def _do_delete(self, context, instance, bdms, reservations=None,
local=False):
if local:
instance.vm_state = vm_states.DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.terminate_instance(context, instance, bdms,
reservations=reservations)
def _do_soft_delete(self, context, instance, bdms, reservations=None,
local=False):
if local:
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.soft_delete_instance(context, instance,
reservations=reservations)
# NOTE(maoy): we allow delete to be called no matter what vm_state says.
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=True)
def soft_delete(self, context, instance):
"""Terminate an instance."""
LOG.debug(_('Going to try to soft delete instance'),
instance=instance)
self._delete(context, instance, 'soft_delete', self._do_soft_delete,
task_state=task_states.SOFT_DELETING,
deleted_at=timeutils.utcnow())
def _delete_instance(self, context, instance):
self._delete(context, instance, 'delete', self._do_delete,
task_state=task_states.DELETING)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=False)
def delete(self, context, instance):
"""Terminate an instance."""
LOG.debug(_("Going to try to terminate instance"), instance=instance)
self._delete_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SOFT_DELETED])
def restore(self, context, instance):
"""Restore a previously deleted (but not reclaimed) instance."""
# Reserve quotas
flavor = instance.get_flavor()
num_instances, quota_reservations = self._check_num_instances_quota(
context, flavor, 1, 1)
self._record_action_start(context, instance, instance_actions.RESTORE)
try:
if instance['host']:
instance = self.update(context, instance,
task_state=task_states.RESTORING,
expected_task_state=[None],
deleted_at=None)
self.compute_rpcapi.restore_instance(context, instance)
else:
self.update(context,
instance,
vm_state=vm_states.ACTIVE,
task_state=None,
expected_task_state=[None],
deleted_at=None)
QUOTAS.commit(context, quota_reservations)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, quota_reservations)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SOFT_DELETED],
must_have_launched=False)
def force_delete(self, context, instance):
"""Force delete a previously deleted (but not reclaimed) instance."""
self._delete_instance(context, instance)
def force_stop(self, context, instance, do_cast=True):
LOG.debug(_("Going to try to stop instance"), instance=instance)
instance.task_state = task_states.POWERING_OFF
instance.progress = 0
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.STOP)
self.compute_rpcapi.stop_instance(context, instance, do_cast=do_cast)
@wrap_check_policy
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED,
vm_states.ERROR],
task_state=[None])
def stop(self, context, instance, do_cast=True):
"""Stop an instance."""
self.force_stop(context, instance, do_cast)
@wrap_check_policy
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.STOPPED])
def start(self, context, instance):
"""Start an instance."""
LOG.debug(_("Going to try to start instance"), instance=instance)
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.START)
# TODO(yamahata): injected_files isn't supported right now.
# It is used only for osapi. not for ec2 api.
# availability_zone isn't used by run_instance.
self.compute_rpcapi.start_instance(context, instance)
#NOTE(bcwaldon): no policy check here since it should be rolled in to
# search_opts in get_all
def get_active_by_window(self, context, begin, end=None, project_id=None):
"""Get instances that were continuously active over a window."""
return self.db.instance_get_active_by_window_joined(context, begin,
end, project_id)
#NOTE(bcwaldon): this doesn't really belong in this class
def get_instance_type(self, context, instance_type_id):
"""Get an instance type by instance type id."""
return flavors.get_flavor(instance_type_id, ctxt=context)
def get(self, context, instance_id, want_objects=False):
"""Get a single instance with the given instance_id."""
# NOTE(ameade): we still need to support integer ids for ec2
expected_attrs = ['metadata', 'system_metadata',
'security_groups', 'info_cache']
try:
if uuidutils.is_uuid_like(instance_id):
instance = instance_obj.Instance.get_by_uuid(
context, instance_id, expected_attrs=expected_attrs)
elif utils.is_int_like(instance_id):
instance = instance_obj.Instance.get_by_id(
context, instance_id, expected_attrs=expected_attrs)
else:
raise exception.InstanceNotFound(instance_id=instance_id)
except exception.InvalidID:
raise exception.InstanceNotFound(instance_id=instance_id)
check_policy(context, 'get', instance)
if not want_objects:
instance = obj_base.obj_to_primitive(instance)
return instance
def get_all(self, context, search_opts=None, sort_key='created_at',
sort_dir='desc', limit=None, marker=None, want_objects=False):
"""Get all instances filtered by one of the given parameters.
If there is no filter and the context is an admin, it will retrieve
all instances in the system.
Deleted instances will be returned by default, unless there is a
search option that says otherwise.
The results will be returned sorted in the order specified by the
'sort_dir' parameter using the key specified in the 'sort_key'
parameter.
"""
#TODO(bcwaldon): determine the best argument for target here
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
check_policy(context, "get_all", target)
if search_opts is None:
search_opts = {}
LOG.debug(_("Searching by: %s") % str(search_opts))
# Fixups for the DB call
filters = {}
def _remap_flavor_filter(flavor_id):
flavor = flavor_obj.Flavor.get_by_flavor_id(context, flavor_id)
filters['instance_type_id'] = flavor.id
def _remap_fixed_ip_filter(fixed_ip):
# Turn fixed_ip into a regexp match. Since '.' matches
# any character, we need to use regexp escaping for it.
filters['ip'] = '^%s$' % fixed_ip.replace('.', '\\.')
# search_option to filter_name mapping.
filter_mapping = {
'image': 'image_ref',
'name': 'display_name',
'tenant_id': 'project_id',
'flavor': _remap_flavor_filter,
'fixed_ip': _remap_fixed_ip_filter}
# copy from search_opts, doing various remappings as necessary
for opt, value in search_opts.iteritems():
# Do remappings.
# Values not in the filter_mapping table are copied as-is.
# If remapping is None, option is not copied
# If the remapping is a string, it is the filter_name to use
try:
remap_object = filter_mapping[opt]
except KeyError:
filters[opt] = value
else:
# Remaps are strings to translate to, or functions to call
# to do the translating as defined by the table above.
if isinstance(remap_object, six.string_types):
filters[remap_object] = value
else:
try:
remap_object(value)
# We already know we can't match the filter, so
# return an empty list
except ValueError:
return []
inst_models = self._get_instances_by_filters(context, filters,
sort_key, sort_dir,
limit=limit,
marker=marker)
if want_objects:
return inst_models
# Convert the models to dictionaries
instances = []
for inst_model in inst_models:
instances.append(obj_base.obj_to_primitive(inst_model))
return instances
def _get_instances_by_filters(self, context, filters,
sort_key, sort_dir,
limit=None,
marker=None):
if 'ip6' in filters or 'ip' in filters:
res = self.network_api.get_instance_uuids_by_ip_filter(context,
filters)
# NOTE(jkoelker) It is possible that we will get the same
# instance uuid twice (one for ipv4 and ipv6)
uuids = set([r['instance_uuid'] for r in res])
filters['uuid'] = uuids
fields = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
return instance_obj.InstanceList.get_by_filters(
context, filters=filters, sort_key=sort_key, sort_dir=sort_dir,
limit=limit, marker=marker, expected_attrs=fields)
@wrap_check_policy
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def backup(self, context, instance, name, backup_type, rotation,
extra_properties=None):
"""Backup the given instance
:param instance: nova.db.sqlalchemy.models.Instance
:param name: name of the backup
:param backup_type: 'daily' or 'weekly'
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
:param extra_properties: dict of extra image properties to include
when creating the image.
:returns: A dict containing image metadata
"""
props_copy = dict(extra_properties, backup_type=backup_type)
image_meta = self._create_image(context, instance, name,
'backup', extra_properties=props_copy)
# NOTE(comstud): Any changes to this method should also be made
# to the backup_instance() method in nova/cells/messaging.py
instance.task_state = task_states.IMAGE_BACKUP
instance.save(expected_task_state=[None])
self.compute_rpcapi.backup_instance(context, instance,
image_meta['id'],
backup_type,
rotation)
return image_meta
@wrap_check_policy
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def snapshot(self, context, instance, name, extra_properties=None):
"""Snapshot the given instance.
:param instance: nova.db.sqlalchemy.models.Instance
:param name: name of the snapshot
:param extra_properties: dict of extra image properties to include
when creating the image.
:returns: A dict containing image metadata
"""
image_meta = self._create_image(context, instance, name,
'snapshot',
extra_properties=extra_properties)
# NOTE(comstud): Any changes to this method should also be made
# to the snapshot_instance() method in nova/cells/messaging.py
instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING
instance.save(expected_task_state=[None])
self.compute_rpcapi.snapshot_instance(context, instance,
image_meta['id'])
return image_meta
def _create_image(self, context, instance, name, image_type,
extra_properties=None):
"""Create new image entry in the image service. This new image
will be reserved for the compute manager to upload a snapshot
or backup.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param name: string for name of the snapshot
:param image_type: snapshot | backup
:param extra_properties: dict of extra image properties to include
"""
if extra_properties is None:
extra_properties = {}
instance_uuid = instance['uuid']
properties = {
'instance_uuid': instance_uuid,
'user_id': str(context.user_id),
'image_type': image_type,
}
image_ref = instance.image_ref
sent_meta = compute_utils.get_image_metadata(
context, self.image_service, image_ref, instance)
sent_meta['name'] = name
sent_meta['is_public'] = False
# The properties set up above and in extra_properties have precedence
properties.update(extra_properties or {})
sent_meta['properties'].update(properties)
return self.image_service.create(context, sent_meta)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def snapshot_volume_backed(self, context, instance, image_meta, name,
extra_properties=None):
"""Snapshot the given volume-backed instance.
:param instance: nova.db.sqlalchemy.models.Instance
:param image_meta: metadata for the new image
:param name: name of the backup or snapshot
:param extra_properties: dict of extra image properties to include
:returns: the new image metadata
"""
image_meta['name'] = name
properties = image_meta['properties']
if instance['root_device_name']:
properties['root_device_name'] = instance['root_device_name']
properties.update(extra_properties or {})
# TODO(xqueralt): Use new style BDM in volume snapshots
bdms = self.get_instance_bdms(context, instance)
mapping = []
for bdm in bdms:
if bdm['no_device']:
continue
# Clean the BDM of the database related fields to prevent
# duplicates in the future (e.g. the id was being preserved)
for field in block_device.BlockDeviceDict._db_only_fields:
bdm.pop(field, None)
volume_id = bdm.get('volume_id')
if volume_id:
# create snapshot based on volume_id
volume = self.volume_api.get(context, volume_id)
# NOTE(yamahata): Should we wait for snapshot creation?
# Linux LVM snapshot creation completes in
# short time, it doesn't matter for now.
name = _('snapshot for %s') % image_meta['name']
snapshot = self.volume_api.create_snapshot_force(
context, volume['id'], name, volume['display_description'])
bdm['snapshot_id'] = snapshot['id']
# Clean the extra volume related fields that will be generated
# when booting from the new snapshot.
bdm.pop('volume_id')
bdm.pop('connection_info')
mapping.append(bdm)
for m in block_device.mappings_prepend_dev(properties.get('mappings',
[])):
virtual_name = m['virtual']
if virtual_name in ('ami', 'root'):
continue
assert block_device.is_swap_or_ephemeral(virtual_name)
device_name = m['device']
if device_name in [b['device_name'] for b in mapping
if not b.get('no_device', False)]:
continue
# NOTE(yamahata): swap and ephemeral devices are specified in
# AMI, but disabled for this instance by user.
# So disable those device by no_device.
mapping.append({'device_name': device_name, 'no_device': True})
if mapping:
properties['block_device_mapping'] = mapping
for attr in ('status', 'location', 'id'):
image_meta.pop(attr, None)
# the new image is simply a bucket of properties (particularly the
# block device mapping, kernel and ramdisk IDs) with no image data,
# hence the zero size
image_meta['size'] = 0
return self.image_service.create(context, image_meta, data='')
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED,
vm_states.ERROR],
task_state=[None, task_states.REBOOTING,
task_states.REBOOTING_HARD,
task_states.RESUMING,
task_states.UNPAUSING,
task_states.PAUSING,
task_states.SUSPENDING])
def reboot(self, context, instance, reboot_type):
"""Reboot the given instance."""
if (reboot_type == 'SOFT' and
(instance['vm_state'] in [vm_states.STOPPED,
vm_states.PAUSED,
vm_states.SUSPENDED,
vm_states.ERROR])):
raise exception.InstanceInvalidState(
attr='vm_state',
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method='reboot')
if ((reboot_type == 'SOFT' and
instance['task_state'] == task_states.REBOOTING) or
(reboot_type == 'HARD' and
instance['task_state'] == task_states.REBOOTING_HARD)):
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance['uuid'],
state=instance['task_state'],
method='reboot')
state = {'SOFT': task_states.REBOOTING,
'HARD': task_states.REBOOTING_HARD}[reboot_type]
instance.task_state = state
instance.save(expected_task_state=[None, task_states.REBOOTING])
self._record_action_start(context, instance, instance_actions.REBOOT)
self.compute_rpcapi.reboot_instance(context, instance=instance,
block_device_info=None,
reboot_type=reboot_type)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR],
task_state=[None])
def rebuild(self, context, instance, image_href, admin_password,
files_to_inject=None, **kwargs):
"""Rebuild the given instance with the provided attributes."""
orig_image_ref = instance.image_ref or ''
files_to_inject = files_to_inject or []
metadata = kwargs.get('metadata', {})
image_id, image = self._get_image(context, image_href)
self._check_auto_disk_config(image=image, **kwargs)
flavor = instance.get_flavor()
self._checks_for_create_and_rebuild(context, image_id, image,
flavor, metadata, files_to_inject)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, None, None, image)
def _reset_image_metadata():
"""
Remove old image properties that we're storing as instance
system metadata. These properties start with 'image_'.
Then add the properties for the new image.
"""
# FIXME(comstud): There's a race condition here in that if
# the system_metadata for this instance is updated after
# we do the previous save() and before we update.. those
# other updates will be lost. Since this problem exists in
# a lot of other places, I think it should be addressed in
# a DB layer overhaul.
orig_sys_metadata = dict(instance.system_metadata)
# Remove the old keys
for key in instance.system_metadata.keys():
if key.startswith(utils.SM_IMAGE_PROP_PREFIX):
del instance.system_metadata[key]
# Add the new ones
new_sys_metadata = utils.get_system_metadata_from_image(
image, flavor)
instance.system_metadata.update(new_sys_metadata)
instance.save()
return orig_sys_metadata
instance.task_state = task_states.REBUILDING
instance.image_ref = image_href
instance.kernel_id = kernel_id or ""
instance.ramdisk_id = ramdisk_id or ""
instance.progress = 0
instance.update(kwargs)
instance.save(expected_task_state=[None])
# On a rebuild, since we're potentially changing images, we need to
# wipe out the old image properties that we're storing as instance
# system metadata... and copy in the properties for the new image.
orig_sys_metadata = _reset_image_metadata()
bdms = block_device.legacy_mapping(
self.db.block_device_mapping_get_all_by_instance(
context,
instance.uuid))
self._record_action_start(context, instance, instance_actions.REBUILD)
self.compute_rpcapi.rebuild_instance(context, instance=instance,
new_pass=admin_password, injected_files=files_to_inject,
image_ref=image_href, orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms,
kwargs=kwargs)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED])
def revert_resize(self, context, instance):
"""Reverts a resize, deleting the 'new' instance in the process."""
elevated = context.elevated()
migration = migration_obj.Migration.get_by_instance_and_status(
elevated, instance.uuid, 'finished')
# reverse quota reservation for increased resource usage
deltas = self._reverse_upsize_quota_delta(context, migration)
reservations = self._reserve_quota_delta(context, deltas)
instance.task_state = task_states.RESIZE_REVERTING
try:
instance.save(expected_task_state=[None])
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, reservations)
migration.status = 'reverting'
migration.save()
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable and reservations:
QUOTAS.commit(context, reservations)
reservations = []
self._record_action_start(context, instance,
instance_actions.REVERT_RESIZE)
self.compute_rpcapi.revert_resize(context, instance,
migration,
migration.dest_compute,
reservations)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED])
def confirm_resize(self, context, instance, migration=None):
"""Confirms a migration/resize and deletes the 'old' instance."""
elevated = context.elevated()
if migration is None:
migration = migration_obj.Migration.get_by_instance_and_status(
elevated, instance.uuid, 'finished')
# reserve quota only for any decrease in resource usage
deltas = self._downsize_quota_delta(context, instance)
reservations = self._reserve_quota_delta(context, deltas)
migration.status = 'confirming'
migration.save()
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable and reservations:
QUOTAS.commit(context, reservations)
reservations = []
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance,
migration,
migration.source_compute,
reservations)
@staticmethod
def _resize_quota_delta(context, new_flavor,
old_flavor, sense, compare):
"""
Calculate any quota adjustment required at a particular point
in the resize cycle.
:param context: the request context
:param new_instance_type: the target instance type
:param old_instance_type: the original instance type
:param sense: the sense of the adjustment, 1 indicates a
forward adjustment, whereas -1 indicates a
reversal of a prior adjustment
:param compare: the direction of the comparison, 1 indicates
we're checking for positive deltas, whereas
-1 indicates negative deltas
"""
def _quota_delta(resource):
return sense * (new_flavor[resource] - old_flavor[resource])
deltas = {}
if compare * _quota_delta('vcpus') > 0:
deltas['cores'] = _quota_delta('vcpus')
if compare * _quota_delta('memory_mb') > 0:
deltas['ram'] = _quota_delta('memory_mb')
return deltas
@staticmethod
def _upsize_quota_delta(context, new_flavor, old_flavor):
"""
Calculate deltas required to adjust quota for an instance upsize.
"""
return API._resize_quota_delta(context, new_flavor, old_flavor, 1, 1)
@staticmethod
def _reverse_upsize_quota_delta(context, migration_ref):
"""
Calculate deltas required to reverse a prior upsizing
quota adjustment.
"""
old_flavor = flavor_obj.Flavor.get_by_id(
context, migration_ref['old_instance_type_id'])
new_flavor = flavor_obj.Flavor.get_by_id(
context, migration_ref['new_instance_type_id'])
return API._resize_quota_delta(context, new_flavor, old_flavor, -1, -1)
@staticmethod
def _downsize_quota_delta(context, instance):
"""
Calculate deltas required to adjust quota for an instance downsize.
"""
old_flavor = instance.get_flavor('old')
new_flavor = instance.get_flavor('new')
return API._resize_quota_delta(context, new_flavor, old_flavor, 1, -1)
@staticmethod
def _reserve_quota_delta(context, deltas, project_id=None):
if not deltas:
return
return QUOTAS.reserve(context, project_id=project_id, **deltas)
@staticmethod
def _resize_cells_support(context, reservations, instance,
current_instance_type, new_instance_type):
"""Special API cell logic for resize."""
if reservations:
# With cells, the best we can do right now is commit the
# reservations immediately...
QUOTAS.commit(context, reservations,
project_id=instance.project_id)
# NOTE(johannes/comstud): The API cell needs a local migration
# record for later resize_confirm and resize_reverts to deal
# with quotas. We don't need source and/or destination
# information, just the old and new flavors. Status is set to
# 'finished' since nothing else will update the status along
# the way.
mig = migration_obj.Migration()
mig.instance_uuid = instance.uuid
mig.old_instance_type_id = current_instance_type['id']
mig.new_instance_type_id = new_instance_type['id']
mig.status = 'finished'
mig.create(context.elevated())
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
task_state=[None])
def resize(self, context, instance, flavor_id=None,
**extra_instance_updates):
"""Resize (ie, migrate) a running instance.
If flavor_id is None, the process is considered a migration, keeping
the original flavor_id. If flavor_id is not None, the instance should
be migrated to a new host and resized to the new flavor_id.
"""
self._check_auto_disk_config(instance, **extra_instance_updates)
current_instance_type = flavors.extract_flavor(instance)
# If flavor_id is not provided, only migrate the instance.
if not flavor_id:
LOG.debug(_("flavor_id is None. Assuming migration."),
instance=instance)
new_instance_type = current_instance_type
else:
new_instance_type = flavors.get_flavor_by_flavor_id(
flavor_id, read_deleted="no")
current_instance_type_name = current_instance_type['name']
new_instance_type_name = new_instance_type['name']
LOG.debug(_("Old instance type %(current_instance_type_name)s, "
" new instance type %(new_instance_type_name)s"),
{'current_instance_type_name': current_instance_type_name,
'new_instance_type_name': new_instance_type_name},
instance=instance)
if not new_instance_type:
raise exception.FlavorNotFound(flavor_id=flavor_id)
same_instance_type = (current_instance_type['id'] ==
new_instance_type['id'])
# NOTE(sirp): We don't want to force a customer to change their flavor
# when Ops is migrating off of a failed host.
if not same_instance_type and new_instance_type.get('disabled'):
raise exception.FlavorNotFound(flavor_id=flavor_id)
if same_instance_type and flavor_id and self.cell_type != 'compute':
raise exception.CannotResizeToSameFlavor()
# ensure there is sufficient headroom for upsizes
deltas = self._upsize_quota_delta(context, new_instance_type,
current_instance_type)
try:
reservations = self._reserve_quota_delta(context, deltas,
project_id=instance[
'project_id'])
except exception.OverQuota as exc:
quotas = exc.kwargs['quotas']
overs = exc.kwargs['overs']
headroom = exc.kwargs['headroom']
resource = overs[0]
used = quotas[resource] - headroom[resource]
total_allowed = used + headroom[resource]
overs = ','.join(overs)
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to resize instance."),
{'overs': overs, 'pid': context.project_id})
raise exception.TooManyInstances(overs=overs,
req=deltas[resource],
used=used, allowed=total_allowed,
resource=resource)
instance.task_state = task_states.RESIZE_PREP
instance.progress = 0
instance.update(extra_instance_updates)
instance.save(expected_task_state=[None])
filter_properties = {'ignore_hosts': []}
if not CONF.allow_resize_to_same_host:
filter_properties['ignore_hosts'].append(instance['host'])
# Here when flavor_id is None, the process is considered as migrate.
if (not flavor_id and not CONF.allow_migrate_to_same_host):
filter_properties['ignore_hosts'].append(instance['host'])
if self.cell_type == 'api':
# Commit reservations early and create migration record.
self._resize_cells_support(context, reservations, instance,
current_instance_type,
new_instance_type)
reservations = []
self._record_action_start(context, instance, instance_actions.RESIZE)
scheduler_hint = {'filter_properties': filter_properties}
self.compute_task_api.resize_instance(context, instance,
extra_instance_updates, scheduler_hint=scheduler_hint,
flavor=new_instance_type, reservations=reservations)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED],
task_state=[None])
def shelve(self, context, instance):
"""Shelve an instance.
Shuts down an instance and frees it up to be removed from the
hypervisor.
"""
instance.task_state = task_states.SHELVING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.SHELVE)
image_id = None
if not self.is_volume_backed_instance(context, instance):
name = '%s-shelved' % instance['display_name']
image_meta = self._create_image(context, instance, name,
'snapshot')
image_id = image_meta['id']
self.compute_rpcapi.shelve_instance(context, instance=instance,
image_id=image_id)
else:
self.compute_rpcapi.shelve_offload_instance(context,
instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED], task_state=[None])
def shelve_offload(self, context, instance):
"""Remove a shelved instance from the hypervisor."""
instance.task_state = task_states.SHELVING_OFFLOADING
instance.save(expected_task_state=[None])
self.compute_rpcapi.shelve_offload_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED], task_state=[None])
def unshelve(self, context, instance):
"""Restore a shelved instance."""
instance.task_state = task_states.UNSHELVING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNSHELVE)
self.compute_task_api.unshelve_instance(context, instance)
@wrap_check_policy
@check_instance_lock
def add_fixed_ip(self, context, instance, network_id):
"""Add fixed_ip from specified network to given instance."""
self.compute_rpcapi.add_fixed_ip_to_instance(context,
instance=instance, network_id=network_id)
@wrap_check_policy
@check_instance_lock
def remove_fixed_ip(self, context, instance, address):
"""Remove fixed_ip from specified network to given instance."""
self.compute_rpcapi.remove_fixed_ip_from_instance(context,
instance=instance, address=address)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED])
def pause(self, context, instance):
"""Pause the given instance."""
instance.task_state = task_states.PAUSING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.PAUSE)
self.compute_rpcapi.pause_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.PAUSED])
def unpause(self, context, instance):
"""Unpause the given instance."""
instance.task_state = task_states.UNPAUSING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNPAUSE)
self.compute_rpcapi.unpause_instance(context, instance)
@wrap_check_policy
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
return self.compute_rpcapi.get_diagnostics(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED])
def suspend(self, context, instance):
"""Suspend the given instance."""
instance.task_state = task_states.SUSPENDING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.SUSPEND)
self.compute_rpcapi.suspend_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.SUSPENDED])
def resume(self, context, instance):
"""Resume the given instance."""
instance.task_state = task_states.RESUMING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.RESUME)
self.compute_rpcapi.resume_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def rescue(self, context, instance, rescue_password=None):
"""Rescue the given instance."""
bdms = self.get_instance_bdms(context, instance, legacy=False)
for bdm in bdms:
if bdm['volume_id']:
volume = self.volume_api.get(context, bdm['volume_id'])
self.volume_api.check_attached(context, volume)
# TODO(ndipanov): This check can be generalized as a decorator to
# check for valid combinations of src and dests - for now check
# if it's booted from volume only
if self.is_volume_backed_instance(context, instance, bdms):
reason = _("Cannot rescue a volume-backed instance")
raise exception.InstanceNotRescuable(instance_id=instance['uuid'],
reason=reason)
self.update(context,
instance,
task_state=task_states.RESCUING,
expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.RESCUE)
self.compute_rpcapi.rescue_instance(context, instance=instance,
rescue_password=rescue_password)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.RESCUED])
def unrescue(self, context, instance):
"""Unrescue the given instance."""
self.update(context,
instance,
task_state=task_states.UNRESCUING,
expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNRESCUE)
self.compute_rpcapi.unrescue_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE])
def set_admin_password(self, context, instance, password=None):
"""Set the root/admin password for the given instance."""
self.update(context,
instance,
task_state=task_states.UPDATING_PASSWORD,
expected_task_state=[None])
self._record_action_start(context, instance,
instance_actions.CHANGE_PASSWORD)
self.compute_rpcapi.set_admin_password(context,
instance=instance,
new_pass=password)
@wrap_check_policy
@check_instance_lock
def inject_file(self, context, instance, path, file_contents):
"""Write a file to the given instance."""
self.compute_rpcapi.inject_file(context, instance=instance, path=path,
file_contents=file_contents)
@wrap_check_policy
@check_instance_host
def get_vnc_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance['uuid'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_vnc_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_spice_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance['uuid'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_spice_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_console_output(self, context, instance, tail_length=None):
"""Get console output for an instance."""
return self.compute_rpcapi.get_console_output(context,
instance=instance, tail_length=tail_length)
@wrap_check_policy
def lock(self, context, instance):
"""Lock the given instance."""
# Only update the lock if we are an admin (non-owner)
is_owner = instance.project_id == context.project_id
if instance.locked and is_owner:
return
context = context.elevated()
LOG.debug(_('Locking'), context=context, instance=instance)
instance.locked = True
instance.locked_by = 'owner' if is_owner else 'admin'
instance.save()
@wrap_check_policy
def unlock(self, context, instance):
"""Unlock the given instance."""
# If the instance was locked by someone else, check
# that we're allowed to override the lock
is_owner = instance.project_id == context.project_id
expect_locked_by = 'owner' if is_owner else 'admin'
locked_by = instance.locked_by
if locked_by and locked_by != expect_locked_by:
check_policy(context, 'unlock_override', instance)
context = context.elevated()
LOG.debug(_('Unlocking'), context=context, instance=instance)
instance.locked = False
instance.locked_by = None
instance.save()
@wrap_check_policy
def get_lock(self, context, instance):
"""Return the boolean state of given instance's lock."""
return self.get(context, instance['uuid'])['locked']
@wrap_check_policy
@check_instance_lock
@check_instance_cell
def reset_network(self, context, instance):
"""Reset networking on the instance."""
self.compute_rpcapi.reset_network(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
def inject_network_info(self, context, instance):
"""Inject network info for the instance."""
self.compute_rpcapi.inject_network_info(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED],
task_state=None)
def attach_volume(self, context, instance, volume_id, device=None):
"""Attach an existing volume to an existing instance."""
# NOTE(vish): Fail fast if the device is not going to pass. This
# will need to be removed along with the test if we
# change the logic in the manager for what constitutes
# a valid device.
if device and not block_device.match_device(device):
raise exception.InvalidDevicePath(path=device)
# NOTE(vish): This is done on the compute host because we want
# to avoid a race where two devices are requested at
# the same time. When db access is removed from
# compute, the bdm will be created here and we will
# have to make sure that they are assigned atomically.
device = self.compute_rpcapi.reserve_block_device_name(
context, device=device, instance=instance, volume_id=volume_id)
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context, volume, instance=instance)
self.volume_api.reserve_volume(context, volume_id)
self.compute_rpcapi.attach_volume(context, instance=instance,
volume_id=volume_id, mountpoint=device)
except Exception:
with excutils.save_and_reraise_exception():
self.db.block_device_mapping_destroy_by_instance_and_device(
context, instance['uuid'], device)
return device
def _detach_volume(self, context, instance, volume):
"""Detach volume from instance. This method is separated to make
it easier for cells version to override.
"""
self.volume_api.check_detach(context, volume)
self.volume_api.begin_detaching(context, volume['id'])
self.compute_rpcapi.detach_volume(context, instance=instance,
volume_id=volume['id'])
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED],
task_state=None)
def detach_volume(self, context, instance, volume):
"""Detach a volume from an instance."""
if volume['attach_status'] == 'detached':
msg = _("Volume must be attached in order to detach.")
raise exception.InvalidVolume(reason=msg)
# The caller likely got the instance from volume['instance_uuid']
# in the first place, but let's sanity check.
if volume['instance_uuid'] != instance['uuid']:
raise exception.VolumeUnattached(volume_id=volume['id'])
self._detach_volume(context, instance, volume)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED],
task_state=None)
def swap_volume(self, context, instance, old_volume, new_volume):
"""Swap volume attached to an instance."""
if old_volume['attach_status'] == 'detached':
raise exception.VolumeUnattached(volume_id=old_volume['id'])
# The caller likely got the instance from volume['instance_uuid']
# in the first place, but let's sanity check.
if old_volume['instance_uuid'] != instance['uuid']:
msg = _("Old volume is attached to a different instance.")
raise exception.InvalidVolume(reason=msg)
if new_volume['attach_status'] == 'attached':
msg = _("New volume must be detached in order to swap.")<|fim▁hole|> raise exception.InvalidVolume(reason=msg)
if int(new_volume['size']) < int(old_volume['size']):
msg = _("New volume must be the same size or larger.")
raise exception.InvalidVolume(reason=msg)
self.volume_api.check_detach(context, old_volume)
self.volume_api.check_attach(context, new_volume, instance=instance)
self.volume_api.begin_detaching(context, old_volume['id'])
self.volume_api.reserve_volume(context, new_volume['id'])
try:
self.compute_rpcapi.swap_volume(
context, instance=instance,
old_volume_id=old_volume['id'],
new_volume_id=new_volume['id'])
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
self.volume_api.roll_detaching(context, old_volume['id'])
self.volume_api.unreserve_volume(context, new_volume['id'])
@wrap_check_policy
def attach_interface(self, context, instance, network_id, port_id,
requested_ip):
"""Use hotplug to add an network adapter to an instance."""
return self.compute_rpcapi.attach_interface(context,
instance=instance, network_id=network_id, port_id=port_id,
requested_ip=requested_ip)
@wrap_check_policy
def detach_interface(self, context, instance, port_id):
"""Detach an network adapter from an instance."""
self.compute_rpcapi.detach_interface(context, instance=instance,
port_id=port_id)
@wrap_check_policy
def get_instance_metadata(self, context, instance):
"""Get all metadata associated with an instance."""
rv = self.db.instance_metadata_get(context, instance['uuid'])
return dict(rv.iteritems())
def get_all_instance_metadata(self, context, search_filts):
return self._get_all_instance_metadata(
context, search_filts, metadata_type='metadata')
def get_all_system_metadata(self, context, search_filts):
return self._get_all_instance_metadata(
context, search_filts, metadata_type='system_metadata')
def _get_all_instance_metadata(self, context, search_filts, metadata_type):
"""Get all metadata."""
def _match_any(pattern_list, string):
return any([re.match(pattern, string)
for pattern in pattern_list])
def _filter_metadata(instance, search_filt, input_metadata):
uuids = search_filt.get('resource_id', [])
keys_filter = search_filt.get('key', [])
values_filter = search_filt.get('value', [])
output_metadata = {}
if uuids and instance.get('uuid') not in uuids:
return {}
for (k, v) in input_metadata.iteritems():
# Both keys and value defined -- AND
if ((keys_filter and values_filter) and
not _match_any(keys_filter, k) and
not _match_any(values_filter, v)):
continue
# Only keys or value is defined
elif ((keys_filter and not _match_any(keys_filter, k)) or
(values_filter and not _match_any(values_filter, v))):
continue
output_metadata[k] = v
return output_metadata
formatted_metadata_list = []
instances = self._get_instances_by_filters(context, filters={},
sort_key='created_at',
sort_dir='desc')
for instance in instances:
try:
check_policy(context, 'get_all_instance_%s' % metadata_type,
instance)
metadata = instance.get(metadata_type, {})
for filt in search_filts:
# By chaining the input to the output, the filters are
# ANDed together
metadata = _filter_metadata(instance, filt, metadata)
for (k, v) in metadata.iteritems():
formatted_metadata_list.append({'key': k, 'value': v,
'instance_id': instance.get('uuid')})
except exception.PolicyNotAuthorized:
# failed policy check - not allowed to
# read this metadata
continue
return formatted_metadata_list
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def delete_instance_metadata(self, context, instance, key):
"""Delete the given metadata item from an instance."""
self.db.instance_metadata_delete(context, instance['uuid'], key)
instance['metadata'] = {}
notifications.send_update(context, instance, instance)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff={key: ['-']})
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def update_instance_metadata(self, context, instance,
metadata, delete=False):
"""Updates or creates instance metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
orig = self.get_instance_metadata(context, instance)
if delete:
_metadata = metadata
else:
_metadata = orig.copy()
_metadata.update(metadata)
self._check_metadata_properties_quota(context, _metadata)
metadata = self.db.instance_metadata_update(context, instance['uuid'],
_metadata, True)
instance['metadata'] = metadata
notifications.send_update(context, instance, instance)
diff = _diff_dict(orig, _metadata)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff=diff)
return _metadata
def get_instance_faults(self, context, instances):
"""Get all faults for a list of instance uuids."""
if not instances:
return {}
for instance in instances:
check_policy(context, 'get_instance_faults', instance)
uuids = [instance['uuid'] for instance in instances]
return self.db.instance_fault_get_by_instance_uuids(context, uuids)
def get_instance_bdms(self, context, instance, legacy=True):
"""Get all bdm tables for specified instance."""
bdms = self.db.block_device_mapping_get_all_by_instance(context,
instance['uuid'])
if legacy:
return block_device.legacy_mapping(bdms)
return bdms
def is_volume_backed_instance(self, context, instance, bdms=None):
if not instance['image_ref']:
return True
if bdms is None:
bdms = self.get_instance_bdms(context, instance, legacy=False)
root_bdm = block_device.get_root_bdm(bdms)
if root_bdm and root_bdm.get('destination_type') == 'volume':
return True
return False
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE])
def live_migrate(self, context, instance, block_migration,
disk_over_commit, host_name):
"""Migrate a server lively to a new host."""
LOG.debug(_("Going to try to live migrate instance to %s"),
host_name or "another host", instance=instance)
instance.task_state = task_states.MIGRATING
instance.save(expected_task_state=[None])
self.compute_task_api.live_migrate_instance(context, instance,
host_name, block_migration=block_migration,
disk_over_commit=disk_over_commit)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
task_state=[None])
def evacuate(self, context, instance, host, on_shared_storage,
admin_password=None):
"""Running evacuate to target host.
Checking vm compute host state, if the host not in expected_state,
raising an exception.
"""
LOG.debug(_('vm evacuation scheduled'))
inst_host = instance['host']
service = service_obj.Service.get_by_compute_host(context, inst_host)
if self.servicegroup_api.service_is_up(service):
msg = (_('Instance compute service state on %s '
'expected to be down, but it was up.') % inst_host)
LOG.error(msg)
raise exception.ComputeServiceInUse(host=inst_host)
instance = self.update(context, instance, expected_task_state=[None],
task_state=task_states.REBUILDING)
self._record_action_start(context, instance, instance_actions.EVACUATE)
# NODE(danms): Transitional until evacuate supports objects
inst_obj = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance,
expected_attrs=['metadata', 'system_metadata'])
return self.compute_rpcapi.rebuild_instance(context,
instance=inst_obj,
new_pass=admin_password,
injected_files=None,
image_ref=None,
orig_image_ref=None,
orig_sys_metadata=None,
bdms=None,
recreate=True,
on_shared_storage=on_shared_storage,
host=host)
def get_migrations(self, context, filters):
"""Get all migrations for the given filters."""
return migration_obj.MigrationList.get_by_filters(context, filters)
@wrap_check_policy
def volume_snapshot_create(self, context, volume_id, create_info):
bdm = self.db.block_device_mapping_get_by_volume_id(context,
volume_id, ['instance'])
self.compute_rpcapi.volume_snapshot_create(context, bdm['instance'],
volume_id, create_info)
snapshot = {
'snapshot': {
'id': create_info.get('id'),
'volumeId': volume_id
}
}
return snapshot
@wrap_check_policy
def volume_snapshot_delete(self, context, volume_id, snapshot_id,
delete_info):
bdm = self.db.block_device_mapping_get_by_volume_id(context,
volume_id, ['instance'])
self.compute_rpcapi.volume_snapshot_delete(context, bdm['instance'],
volume_id, snapshot_id, delete_info)
class HostAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host operations."""
def __init__(self, rpcapi=None):
self.rpcapi = rpcapi or compute_rpcapi.ComputeAPI()
self.servicegroup_api = servicegroup.API()
super(HostAPI, self).__init__()
def _assert_host_exists(self, context, host_name, must_be_up=False):
"""Raise HostNotFound if compute host doesn't exist."""
service = service_obj.Service.get_by_compute_host(context, host_name)
if not service:
raise exception.HostNotFound(host=host_name)
if must_be_up and not self.servicegroup_api.service_is_up(service):
raise exception.ComputeServiceUnavailable(host=host_name)
return service['host']
@wrap_exception()
def set_host_enabled(self, context, host_name, enabled):
"""Sets the specified host's ability to accept new instances."""
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'enabled': enabled}
compute_utils.notify_about_host_update(context,
'set_enabled.start',
payload)
result = self.rpcapi.set_host_enabled(context, enabled=enabled,
host=host_name)
compute_utils.notify_about_host_update(context,
'set_enabled.end',
payload)
return result
def get_host_uptime(self, context, host_name):
"""Returns the result of calling "uptime" on the target host."""
host_name = self._assert_host_exists(context, host_name,
must_be_up=True)
return self.rpcapi.get_host_uptime(context, host=host_name)
@wrap_exception()
def host_power_action(self, context, host_name, action):
"""Reboots, shuts down or powers up the host."""
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'action': action}
compute_utils.notify_about_host_update(context,
'power_action.start',
payload)
result = self.rpcapi.host_power_action(context, action=action,
host=host_name)
compute_utils.notify_about_host_update(context,
'power_action.end',
payload)
return result
@wrap_exception()
def set_host_maintenance(self, context, host_name, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'mode': mode}
compute_utils.notify_about_host_update(context,
'set_maintenance.start',
payload)
result = self.rpcapi.host_maintenance_mode(context,
host_param=host_name, mode=mode, host=host_name)
compute_utils.notify_about_host_update(context,
'set_maintenance.end',
payload)
return result
def service_get_all(self, context, filters=None, set_zones=False):
"""Returns a list of services, optionally filtering the results.
If specified, 'filters' should be a dictionary containing services
attributes and matching values. Ie, to get a list of services for
the 'compute' topic, use filters={'topic': 'compute'}.
"""
if filters is None:
filters = {}
disabled = filters.pop('disabled', None)
if 'availability_zone' in filters:
set_zones = True
services = service_obj.ServiceList.get_all(context, disabled,
set_zones=set_zones)
ret_services = []
for service in services:
for key, val in filters.iteritems():
if service[key] != val:
break
else:
# All filters matched.
ret_services.append(service)
return ret_services
def service_get_by_compute_host(self, context, host_name):
"""Get service entry for the given compute hostname."""
return service_obj.Service.get_by_compute_host(context, host_name)
def service_update(self, context, host_name, binary, params_to_update):
"""Enable / Disable a service.
For compute services, this stops new builds and migrations going to
the host.
"""
service = service_obj.Service.get_by_args(context, host_name,
binary)
service.update(params_to_update)
service.save()
return service
def instance_get_all_by_host(self, context, host_name):
"""Return all instances on the given host."""
return self.db.instance_get_all_by_host(context, host_name)
def task_log_get_all(self, context, task_name, period_beginning,
period_ending, host=None, state=None):
"""Return the task logs within a given range, optionally
filtering by host and/or state.
"""
return self.db.task_log_get_all(context, task_name,
period_beginning,
period_ending,
host=host,
state=state)
def compute_node_get(self, context, compute_id):
"""Return compute node entry for particular integer ID."""
return self.db.compute_node_get(context, int(compute_id))
def compute_node_get_all(self, context):
return self.db.compute_node_get_all(context)
def compute_node_search_by_hypervisor(self, context, hypervisor_match):
return self.db.compute_node_search_by_hypervisor(context,
hypervisor_match)
def compute_node_statistics(self, context):
return self.db.compute_node_statistics(context)
class InstanceActionAPI(base.Base):
"""Sub-set of the Compute Manager API for managing instance actions."""
def actions_get(self, context, instance):
return instance_action.InstanceActionList.get_by_instance_uuid(
context, instance['uuid'])
def action_get_by_request_id(self, context, instance, request_id):
return instance_action.InstanceAction.get_by_request_id(
context, instance['uuid'], request_id)
def action_events_get(self, context, instance, action_id):
return instance_action.InstanceActionEventList.get_by_action(
context, action_id)
class AggregateAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host aggregates."""
def __init__(self, **kwargs):
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
super(AggregateAPI, self).__init__(**kwargs)
@wrap_exception()
def create_aggregate(self, context, aggregate_name, availability_zone):
"""Creates the model for the aggregate."""
aggregate = aggregate_obj.Aggregate()
aggregate.name = aggregate_name
if availability_zone:
aggregate.metadata = {'availability_zone': availability_zone}
aggregate.create(context)
aggregate = self._reformat_aggregate_info(aggregate)
# To maintain the same API result as before.
del aggregate['hosts']
del aggregate['metadata']
return aggregate
def get_aggregate(self, context, aggregate_id):
"""Get an aggregate by id."""
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
return self._reformat_aggregate_info(aggregate)
def get_aggregate_list(self, context):
"""Get all the aggregates."""
aggregates = aggregate_obj.AggregateList.get_all(context)
return [self._reformat_aggregate_info(agg) for agg in aggregates]
@wrap_exception()
def update_aggregate(self, context, aggregate_id, values):
"""Update the properties of an aggregate."""
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
if 'name' in values:
aggregate.name = values.pop('name')
if values:
aggregate.metadata = values
aggregate.save()
# If updated values include availability_zones, then the cache
# which stored availability_zones and host need to be reset
if values.get('availability_zone'):
availability_zones.reset_cache()
return self._reformat_aggregate_info(aggregate)
@wrap_exception()
def update_aggregate_metadata(self, context, aggregate_id, metadata):
"""Updates the aggregate metadata."""
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
aggregate.update_metadata(metadata)
return aggregate
@wrap_exception()
def delete_aggregate(self, context, aggregate_id):
"""Deletes the aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id}
compute_utils.notify_about_aggregate_update(context,
"delete.start",
aggregate_payload)
aggregate = aggregate_obj.Aggregate.get_by_id(context,
aggregate_id)
if len(aggregate.hosts) > 0:
raise exception.InvalidAggregateAction(action='delete',
aggregate_id=aggregate_id,
reason='not empty')
aggregate.destroy()
compute_utils.notify_about_aggregate_update(context,
"delete.end",
aggregate_payload)
def _check_az_for_host(self, aggregate_meta, host_az, aggregate_id):
# NOTE(mtreinish) The availability_zone key returns a set of
# zones so loop over each zone. However there should only
# ever be one zone in the set because an aggregate can only
# have a single availability zone set at one time.
for aggregate_az in aggregate_meta["availability_zone"]:
# NOTE(mtreinish) Ensure that the aggregate_az is not none
# if it is none then that is just a regular aggregate and
# it is valid to have a host in multiple aggregates.
if aggregate_az and aggregate_az != host_az:
msg = _("Host already in availability zone "
"%s") % host_az
action_name = "add_host_to_aggregate"
raise exception.InvalidAggregateAction(
action=action_name, aggregate_id=aggregate_id,
reason=msg)
@wrap_exception()
def add_host_to_aggregate(self, context, aggregate_id, host_name):
"""Adds the host to an aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"addhost.start",
aggregate_payload)
# validates the host; ComputeHostNotFound is raised if invalid
service_obj.Service.get_by_compute_host(context, host_name)
host_az = availability_zones.get_host_availability_zone(context,
host_name)
if host_az and host_az != CONF.default_availability_zone:
aggregate_meta = self.db.aggregate_metadata_get_by_metadata_key(
context, aggregate_id, 'availability_zone')
if aggregate_meta.get("availability_zone"):
self._check_az_for_host(aggregate_meta, host_az, aggregate_id)
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
aggregate.add_host(context, host_name)
#NOTE(jogo): Send message to host to support resource pools
self.compute_rpcapi.add_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
aggregate_payload.update({'name': aggregate['name']})
compute_utils.notify_about_aggregate_update(context,
"addhost.end",
aggregate_payload)
return self._reformat_aggregate_info(aggregate)
@wrap_exception()
def remove_host_from_aggregate(self, context, aggregate_id, host_name):
"""Removes host from the aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"removehost.start",
aggregate_payload)
# validates the host; ComputeHostNotFound is raised if invalid
service_obj.Service.get_by_compute_host(context, host_name)
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
aggregate.delete_host(host_name)
self.compute_rpcapi.remove_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
compute_utils.notify_about_aggregate_update(context,
"removehost.end",
aggregate_payload)
return self._reformat_aggregate_info(aggregate)
def _reformat_aggregate_info(self, aggregate):
"""Builds a dictionary with aggregate props, metadata and hosts."""
return dict(aggregate.iteritems())
class KeypairAPI(base.Base):
"""Subset of the Compute Manager API for managing key pairs."""
def _notify(self, context, event_suffix, keypair_name):
payload = {
'tenant_id': context.project_id,
'user_id': context.user_id,
'key_name': keypair_name,
}
notify = notifier.get_notifier(service='api')
notify.info(context, 'keypair.%s' % event_suffix, payload)
def _validate_new_key_pair(self, context, user_id, key_name):
safe_chars = "_- " + string.digits + string.ascii_letters
clean_value = "".join(x for x in key_name if x in safe_chars)
if clean_value != key_name:
raise exception.InvalidKeypair(
reason=_("Keypair name contains unsafe characters"))
if not 0 < len(key_name) < 256:
raise exception.InvalidKeypair(
reason=_('Keypair name must be between '
'1 and 255 characters long'))
count = QUOTAS.count(context, 'key_pairs', user_id)
try:
QUOTAS.limit_check(context, key_pairs=count + 1)
except exception.OverQuota:
raise exception.KeypairLimitExceeded()
@exception.wrap_exception(notifier=notifier.get_notifier(service='api'))
def import_key_pair(self, context, user_id, key_name, public_key):
"""Import a key pair using an existing public key."""
self._validate_new_key_pair(context, user_id, key_name)
self._notify(context, 'import.start', key_name)
fingerprint = crypto.generate_fingerprint(public_key)
keypair = keypair_obj.KeyPair()
keypair.user_id = user_id
keypair.name = key_name
keypair.fingerprint = fingerprint
keypair.public_key = public_key
keypair.create(context)
self._notify(context, 'import.end', key_name)
return keypair
@exception.wrap_exception(notifier=notifier.get_notifier(service='api'))
def create_key_pair(self, context, user_id, key_name):
"""Create a new key pair."""
self._validate_new_key_pair(context, user_id, key_name)
self._notify(context, 'create.start', key_name)
private_key, public_key, fingerprint = crypto.generate_key_pair()
keypair = keypair_obj.KeyPair()
keypair.user_id = user_id
keypair.name = key_name
keypair.fingerprint = fingerprint
keypair.public_key = public_key
keypair.create(context)
self._notify(context, 'create.end', key_name)
return keypair, private_key
@exception.wrap_exception(notifier=notifier.get_notifier(service='api'))
def delete_key_pair(self, context, user_id, key_name):
"""Delete a keypair by name."""
self._notify(context, 'delete.start', key_name)
keypair_obj.KeyPair.destroy_by_name(context, user_id, key_name)
self._notify(context, 'delete.end', key_name)
def get_key_pairs(self, context, user_id):
"""List key pairs."""
return keypair_obj.KeyPairList.get_by_user(context, user_id)
def get_key_pair(self, context, user_id, key_name):
"""Get a keypair by name."""
return keypair_obj.KeyPair.get_by_name(context, user_id, key_name)
class SecurityGroupAPI(base.Base, security_group_base.SecurityGroupBase):
"""
Sub-set of the Compute API related to managing security groups
and security group rules
"""
# The nova security group api does not use a uuid for the id.
id_is_uuid = False
def __init__(self, **kwargs):
super(SecurityGroupAPI, self).__init__(**kwargs)
self.security_group_rpcapi = compute_rpcapi.SecurityGroupAPI()
def validate_property(self, value, property, allowed):
"""
Validate given security group property.
:param value: the value to validate, as a string or unicode
:param property: the property, either 'name' or 'description'
:param allowed: the range of characters allowed
"""
try:
val = value.strip()
except AttributeError:
msg = _("Security group %s is not a string or unicode") % property
self.raise_invalid_property(msg)
if not val:
msg = _("Security group %s cannot be empty.") % property
self.raise_invalid_property(msg)
if allowed and not re.match(allowed, val):
# Some validation to ensure that values match API spec.
# - Alphanumeric characters, spaces, dashes, and underscores.
# TODO(Daviey): LP: #813685 extend beyond group_name checking, and
# probably create a param validator that can be used elsewhere.
msg = (_("Value (%(value)s) for parameter Group%(property)s is "
"invalid. Content limited to '%(allowed)s'.") %
{'value': value, 'allowed': allowed,
'property': property.capitalize()})
self.raise_invalid_property(msg)
if len(val) > 255:
msg = _("Security group %s should not be greater "
"than 255 characters.") % property
self.raise_invalid_property(msg)
def ensure_default(self, context):
"""Ensure that a context has a security group.
Creates a security group for the security context if it does not
already exist.
:param context: the security context
"""
self.db.security_group_ensure_default(context)
def create_security_group(self, context, name, description):
try:
reservations = QUOTAS.reserve(context, security_groups=1)
except exception.OverQuota:
msg = _("Quota exceeded, too many security groups.")
self.raise_over_quota(msg)
LOG.audit(_("Create Security Group %s"), name, context=context)
try:
self.ensure_default(context)
group = {'user_id': context.user_id,
'project_id': context.project_id,
'name': name,
'description': description}
try:
group_ref = self.db.security_group_create(context, group)
except exception.SecurityGroupExists:
msg = _('Security group %s already exists') % name
self.raise_group_already_exists(msg)
# Commit the reservation
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, reservations)
return group_ref
def update_security_group(self, context, security_group,
name, description):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = (_("Unable to update system group '%s'") %
security_group['name'])
self.raise_invalid_group(msg)
group = {'name': name,
'description': description}
columns_to_join = ['rules.grantee_group']
group_ref = self.db.security_group_update(context,
security_group['id'],
group,
columns_to_join=columns_to_join)
return group_ref
def get(self, context, name=None, id=None, map_exception=False):
self.ensure_default(context)
try:
if name:
return self.db.security_group_get_by_name(context,
context.project_id,
name)
elif id:
return self.db.security_group_get(context, id)
except exception.NotFound as exp:
if map_exception:
msg = exp.format_message()
self.raise_not_found(msg)
else:
raise
def list(self, context, names=None, ids=None, project=None,
search_opts=None):
self.ensure_default(context)
groups = []
if names or ids:
if names:
for name in names:
groups.append(self.db.security_group_get_by_name(context,
project,
name))
if ids:
for id in ids:
groups.append(self.db.security_group_get(context, id))
elif context.is_admin:
# TODO(eglynn): support a wider set of search options than just
# all_tenants, at least include the standard filters defined for
# the EC2 DescribeSecurityGroups API for the non-admin case also
if (search_opts and 'all_tenants' in search_opts):
groups = self.db.security_group_get_all(context)
else:
groups = self.db.security_group_get_by_project(context,
project)
elif project:
groups = self.db.security_group_get_by_project(context, project)
return groups
def destroy(self, context, security_group):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = _("Unable to delete system group '%s'") % \
security_group['name']
self.raise_invalid_group(msg)
if self.db.security_group_in_use(context, security_group['id']):
msg = _("Security group is still in use")
self.raise_invalid_group(msg)
# Get reservations
try:
reservations = QUOTAS.reserve(context, security_groups=-1)
except Exception:
reservations = None
LOG.exception(_("Failed to update usages deallocating "
"security group"))
LOG.audit(_("Delete security group %s"), security_group['name'],
context=context)
self.db.security_group_destroy(context, security_group['id'])
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations)
def is_associated_with_server(self, security_group, instance_uuid):
"""Check if the security group is already associated
with the instance. If Yes, return True.
"""
if not security_group:
return False
instances = security_group.get('instances')
if not instances:
return False
for inst in instances:
if (instance_uuid == inst['uuid']):
return True
return False
@wrap_check_security_groups_policy
def add_to_instance(self, context, instance, security_group_name):
"""Add security group to the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance['uuid']
#check if the security group is associated with the server
if self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_add_security_group(context.elevated(),
instance_uuid,
security_group['id'])
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
self.security_group_rpcapi.refresh_security_group_rules(context,
security_group['id'], host=instance['host'])
@wrap_check_security_groups_policy
def remove_from_instance(self, context, instance, security_group_name):
"""Remove the security group associated with the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance['uuid']
#check if the security group is associated with the server
if not self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupNotExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_remove_security_group(context.elevated(),
instance_uuid,
security_group['id'])
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
self.security_group_rpcapi.refresh_security_group_rules(context,
security_group['id'], host=instance['host'])
def get_rule(self, context, id):
self.ensure_default(context)
try:
return self.db.security_group_rule_get(context, id)
except exception.NotFound:
msg = _("Rule (%s) not found") % id
self.raise_not_found(msg)
def add_rules(self, context, id, name, vals):
"""Add security group rule(s) to security group.
Note: the Nova security group API doesn't support adding multiple
security group rules at once but the EC2 one does. Therefore,
this function is written to support both.
"""
count = QUOTAS.count(context, 'security_group_rules', id)
try:
projected = count + len(vals)
QUOTAS.limit_check(context, security_group_rules=projected)
except exception.OverQuota:
msg = _("Quota exceeded, too many security group rules.")
self.raise_over_quota(msg)
msg = _("Authorize security group ingress %s")
LOG.audit(msg, name, context=context)
rules = [self.db.security_group_rule_create(context, v) for v in vals]
self.trigger_rules_refresh(context, id=id)
return rules
def remove_rules(self, context, security_group, rule_ids):
msg = _("Revoke security group ingress %s")
LOG.audit(msg, security_group['name'], context=context)
for rule_id in rule_ids:
self.db.security_group_rule_destroy(context, rule_id)
# NOTE(vish): we removed some rules, so refresh
self.trigger_rules_refresh(context, id=security_group['id'])
def remove_default_rules(self, context, rule_ids):
for rule_id in rule_ids:
self.db.security_group_default_rule_destroy(context, rule_id)
def add_default_rules(self, context, vals):
rules = [self.db.security_group_default_rule_create(context, v)
for v in vals]
return rules
def default_rule_exists(self, context, values):
"""Indicates whether the specified rule values are already
defined in the default security group rules.
"""
for rule in self.db.security_group_default_rule_list(context):
is_duplicate = True
keys = ('cidr', 'from_port', 'to_port', 'protocol')
for key in keys:
if rule.get(key) != values.get(key):
is_duplicate = False
break
if is_duplicate:
return rule.get('id') or True
return False
def get_all_default_rules(self, context):
try:
rules = self.db.security_group_default_rule_list(context)
except Exception:
msg = 'cannot get default security group rules'
raise exception.SecurityGroupDefaultRuleNotFound(msg)
return rules
def get_default_rule(self, context, id):
try:
return self.db.security_group_default_rule_get(context, id)
except exception.NotFound:
msg = _("Rule (%s) not found") % id
self.raise_not_found(msg)
def validate_id(self, id):
try:
return int(id)
except ValueError:
msg = _("Security group id should be integer")
self.raise_invalid_property(msg)
def trigger_rules_refresh(self, context, id):
"""Called when a rule is added to or removed from a security_group."""
security_group = self.db.security_group_get(
context, id, columns_to_join=['instances'])
for instance in security_group['instances']:
if instance['host'] is not None:
self.security_group_rpcapi.refresh_instance_security_rules(
context, instance['host'], instance)
def trigger_members_refresh(self, context, group_ids):
"""Called when a security group gains a new or loses a member.
Sends an update request to each compute node for each instance for
which this is relevant.
"""
# First, we get the security group rules that reference these groups as
# the grantee..
security_group_rules = set()
for group_id in group_ids:
security_group_rules.update(
self.db.security_group_rule_get_by_security_group_grantee(
context,
group_id))
# ..then we distill the rules into the groups to which they belong..
security_groups = set()
for rule in security_group_rules:
security_group = self.db.security_group_get(
context, rule['parent_group_id'],
columns_to_join=['instances'])
security_groups.add(security_group)
# ..then we find the instances that are members of these groups..
instances = {}
for security_group in security_groups:
for instance in security_group['instances']:
if instance['uuid'] not in instances:
instances[instance['uuid']] = instance
# ..then we send a request to refresh the rules for each instance.
for instance in instances.values():
if instance['host']:
self.security_group_rpcapi.refresh_instance_security_rules(
context, instance['host'], instance)
def get_instance_security_groups(self, context, instance_uuid,
detailed=False):
if detailed:
return self.db.security_group_get_by_instance(context,
instance_uuid)
instance = self.db.instance_get_by_uuid(context, instance_uuid)
groups = instance.get('security_groups')
if groups:
return [{'name': group['name']} for group in groups]
def populate_security_groups(self, instance, security_groups):
if not security_groups:
# Make sure it's an empty list and not None
security_groups = []
instance.security_groups = security_group_obj.make_secgroup_list(
security_groups)<|fim▁end|>
| |
<|file_name|>mixed-field-qualifiers.rs<|end_file_name|><|fim▁begin|>#[macro_use]
extern crate typeinfo;<|fim▁hole|> struct Foo {
a: i32,
pub b: i32, //~ ERROR no rules expected the token `b`
}
}<|fim▁end|>
|
def! {
|
<|file_name|>Zombie.spec.ts<|end_file_name|><|fim▁begin|>import { Zombie } from './';
describe('Zombie', () => {
it('should set UUIDLeast', () => {
let zombie = new Zombie();
zombie.Tag.UUIDLeast = 'Test';
expect(zombie.Command).to.be('{UUIDLeast:"Test"}');
});
it('should set AttackTime', () => {
let zombie = new Zombie();
zombie.Tag.AttackTime = 50;
zombie.Tag.UUIDLeast = 'Test';
expect(zombie.Command).to.be('{AttackTime:50,UUIDLeast:"Test"}');
});
it('should set CanBreakDoors', () => {<|fim▁hole|> it('should add a Passenger', () => {
let zombie1 = new Zombie();
zombie1.Tag.AttackTime = 5;
zombie1.Tag.UUIDMost = '25';
let zombie2 = new Zombie();
zombie2.Tag.AddPassenger(zombie1);
expect(zombie2.Command).to.be('{Passengers:[{AttackTime:5,UUIDMost:"25",id:"minecraft:zombie"}]}');
});
});<|fim▁end|>
|
let zombie = new Zombie();
zombie.Tag.CanBreakDoors = true;
expect(zombie.Command).to.be('{CanBreakDoors:true}');
});
|
<|file_name|>ClosureDefinitionTranslator.java<|end_file_name|><|fim▁begin|>/*
* Renjin : JVM-based interpreter for the R language for the statistical analysis
* Copyright © 2010-2019 BeDataDriven Groep B.V. and contributors
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, a copy is available at
* https://www.gnu.org/licenses/gpl-2.0.txt
*/
package org.renjin.compiler.ir.tac.functions;
import org.renjin.compiler.ir.tac.IRBodyBuilder;
import org.renjin.compiler.ir.tac.expressions.Expression;
import org.renjin.compiler.ir.tac.expressions.NestedFunction;
import org.renjin.eval.EvalException;
import org.renjin.sexp.FunctionCall;
import org.renjin.sexp.PairList;
import org.renjin.sexp.SEXP;
/**
* Translator for the {@code function} function.
*/
public class ClosureDefinitionTranslator extends FunctionCallTranslator {
@Override
public Expression translateToExpression(IRBodyBuilder builder,
TranslationContext context, FunctionCall call) {
PairList formals = EvalException.checkedCast(call.getArgument(0));
SEXP body = call.getArgument(1);
SEXP source = call.getArgument(2);
return new NestedFunction(formals, body);
}
@Override
public void addStatement(IRBodyBuilder builder, TranslationContext context,<|fim▁hole|> // function(x) x*2
// has no effect.
}
}<|fim▁end|>
|
FunctionCall call) {
// a closure whose value is not used has no side effects
// E.g.
|
<|file_name|>test.conf.js<|end_file_name|><|fim▁begin|>/**
* @license angular-sortable-column
* (c) 2013 Knight Rider Consulting, Inc. http://www.knightrider.com
* License: MIT
*/
/**
*
* @author Dale "Ducky" Lotts
* @since 7/21/13
*/
basePath = '..';
files = [
JASMINE,
JASMINE_ADAPTER,<|fim▁hole|> 'bower_components/angular-mocks/angular-mocks.js',
'src/js/sortableColumn.js',
'test/*.spec.js'
];
// list of files to exclude
exclude = [
];
preprocessors = {
'**/src/js/*.js': 'coverage'
};
// test results reporter to use
// possible values: 'dots', 'progress', 'junit'
reporters = ['progress', 'coverage'];
// web server port
port = 9876;
// cli runner port
runnerPort = 9100;
// enable / disable colors in the output (reporters and logs)
colors = true;
// level of logging
// possible values: LOG_DISABLE || LOG_ERROR || LOG_WARN || LOG_INFO || LOG_DEBUG
logLevel = LOG_INFO;
// enable / disable watching file and executing tests whenever any file changes
autoWatch = false;
// Start these browsers, currently available:
// - Chrome
// - ChromeCanary
// - Firefox
// - Opera
// - Safari (only Mac)
// - PhantomJS
// - IE (only Windows)
browsers = ['Chrome'];
// If browser does not capture in given timeout [ms], kill it
captureTimeout = 60000;
// Continuous Integration mode
// if true, it capture browsers, run tests and exit
singleRun = true;<|fim▁end|>
|
'bower_components/jquery/dist/jquery.js',
'bower_components/angular/angular.js',
'bower_components/angular-route/angular-route.js',
|
<|file_name|>ListType.java<|end_file_name|><|fim▁begin|>package simpl.typing;
public final class ListType extends Type {
public Type t;
public ListType(Type t) {
this.t = t;
}
@Override
public boolean isEqualityType() {
// TODO Done
return t.isEqualityType();
}
@Override
public Substitution unify(Type t) throws TypeError {
// TODO Done
if (t instanceof TypeVar) {<|fim▁hole|> }
else if (t instanceof ListType) {
return this.t.unify(((ListType)t).t);
}
else {
throw new TypeMismatchError();
}
}
@Override
public boolean contains(TypeVar tv) {
// TODO Done
return t.contains(tv);
}
@Override
public Type replace(TypeVar a, Type t) {
// TODO Done
return new ListType(this.t.replace(a, t));
}
public String toString() {
return t + " list";
}
@Override
public boolean equals(Type t) {
if (t instanceof ListType) {
return this.t.equals(((ListType)t).t);
}
return false;
}
}<|fim▁end|>
|
return t.unify(this);
|
<|file_name|>editor.js<|end_file_name|><|fim▁begin|>var g_batchAssessmentEditor = null;
var g_tabAssessments = null;
var g_updatingAttendance = false;
var g_onRefresh = null;
var g_lockedCount = 0;
var g_btnSubmit = null;
var g_sectionAssessmentEditors = null;
var g_sectionAssessmentButtons = null;
function createAssessment(content, to) {
var assessmentJson = {};
assessmentJson["content"] = content;
assessmentJson["to"] = to;
return new Assessment(assessmentJson);
}
function SingleAssessmentEditor(){
this.participantId = 0;
this.name = "";
this.content = "";
this.lock = null;
}
function BatchAssessmentEditor(){
this.switchAttendance = null;
this.selectAll = null;
this.editors = null;
}
function generateAssessmentEditor(par, participant, activity, batchEditor){
var singleEditor = new SingleAssessmentEditor();
var row = $('<div>', {
"class": "assessment-input-row"
}).appendTo(par);
var avatar = $("<img>", {
src: participant.avatar,
"class": "assessment-avatar"
}).click(function(evt) {
evt.preventDefault();
window.location.hash = ("profile?" + g_keyVieweeId + "=" + participant.id.toString());
}).appendTo(row);
var name = $('<a>', {
href: "#",
text: participant.name
}).appendTo(row);
name.click(function(evt) {
evt.preventDefault();
window.location.hash = ("profile?" + g_keyVieweeId + "=" + participant.id.toString());
});
singleEditor.participantId = participant.id;
singleEditor.name = participant.name;
if ( activity.containsRelation() && ((activity.relation & assessed) == 0) ) generateUnassessedView(row, singleEditor, batchEditor);
else generateAssessedView(row, participant, activity);
if(g_loggedInUser != null && g_loggedInUser.id == participant.id) row.hide();
return singleEditor;
}
function generateAssessmentEditors(par, activity, batchEditor) {
par.empty();
var participants = activity.selectedParticipants;
var editors = new Array();
for(var i = 0; i < participants.length; i++){
var editor = generateAssessmentEditor(par, participants[i], activity, batchEditor);
editors.push(editor);
}
return editors;
}
function generateAssessmentButtons(par, activity, batchEditor){
par.empty();
if(batchEditor.editors == null || batchEditor.editors.length <= 1) return;
var row = $('<div>', {
"class": "assessment-button"
}).appendTo(par);
var btnCheckAll = $("<button>", {
text: TITLES["check_all"],
"class": "gray assessment-button"
}).appendTo(row);
btnCheckAll.click(batchEditor, function(evt){
evt.preventDefault();
for(var i = 0; i < evt.data.editors.length; i++) {
var editor = evt.data.editors[i];
editor.lock.prop("checked", true).change();
}
});
var btnUncheckAll = $("<button>", {
text: TITLES["uncheck_all"],
"class": "gray assessment-button"
}).appendTo(row);
btnUncheckAll.click(batchEditor, function(evt){
evt.preventDefault();
for(var i = 0; i < evt.data.editors.length; i++) {
var editor = evt.data.editors[i];
editor.lock.prop("checked", false).change();
}
});
g_btnSubmit = $("<button>", {
text: TITLES["submit"],
"class": "assessment-button positive-button"
}).appendTo(row);
g_btnSubmit.click({editor: batchEditor, activity: activity}, function(evt){
evt.preventDefault();
if (g_loggedInUser == null) return;
var aBatchEditor = evt.data.editor;
var aActivity = evt.data.activity;
var assessments = new Array();
for(var i = 0; i < aBatchEditor.editors.length; i++) {
var editor = aBatchEditor.editors[i];
var content = editor.content;
var to = editor.participantId;
if(to == g_loggedInUser.id) continue;
var assessment = createAssessment(content, to);
assessments.push(assessment);
}
if (assessments.length == 0) return;
var params = {};
var token = $.cookie(g_keyToken);
params[g_keyToken] = token;
params[g_keyActivityId] = aActivity.id;
params[g_keyBundle] = JSON.stringify(assessments);
var aButton = $(this);
disableField(aButton);
$.ajax({
type: "POST",
url: "/assessment/submit",
data: params,
success: function(data, status, xhr){
enableField(aButton);
if (isTokenExpired(data)) {
logout(null);
return;
}
alert(ALERTS["assessment_submitted"]);
aActivity.relation |= assessed;
refreshBatchEditor(aActivity);
},
error: function(xhr, status, err){
enableField(aButton);
alert(ALERTS["assessment_not_submitted"]);
}
});
}).appendTo(row);
disableField(g_btnSubmit);
}
function generateBatchAssessmentEditor(par, activity, onRefresh){
par.empty();
if(g_onRefresh == null) g_onRefresh = onRefresh;
g_lockedCount = 0; // clear lock count on batch editor generated
g_batchAssessmentEditor = new BatchAssessmentEditor();
if(activity == null) return g_batchAssessmentEditor;
var editors = [];
var sectionAll = $('<div>', {
"class": "assessment-container"
}).appendTo(par);
var initVal = false;
var disabled = false;
// Determine attendance switch initial state based on viewer-activity-relation
if (g_loggedInUser != null && activity.host.id == g_loggedInUser.id) {
// host cannot choose presence
initVal = true;
disabled = true;
} else if((activity.relation & present) > 0) {
// present participants
initVal = true;
} else if((activity.relation & selected) > 0 || (activity.relation & absent) > 0) {
// selected but not present
initVal = false;
} else {
disabled = true;
}
var attendanceSwitch = createBinarySwitch(sectionAll, disabled, initVal, TITLES["assessment_disabled"], TITLES["present"], TITLES["absent"], "switch-attendance");
g_sectionAssessmentEditors = $('<div>', {
style: "margin-top: 5pt"
}).appendTo(sectionAll);
g_sectionAssessmentButtons = $('<div>', {
style: "margin-top: 5pt"
}).appendTo(sectionAll);
if( g_loggedInUser != null && ( ((activity.relation & present) > 0) || (activity.containsRelation() == false) ) ) {
/*
* show list for logged-in users
*/
refreshBatchEditor(activity);
}
var onSuccess = function(data){
g_updatingAttendance = false;
// update activity.relation by returned value
var relationJson = data;
activity.relation = parseInt(relationJson[g_keyRelation]);
g_sectionAssessmentEditors.empty();
g_sectionAssessmentButtons.empty();
var value = getBinarySwitchState(attendanceSwitch);<|fim▁hole|> refreshBatchEditor(activity);
};
var onError = function(err){
g_updatingAttendance = false;
// reset switch status if updating attendance fails
var value = getBinarySwitchState(attendanceSwitch);
var resetVal = !value;
setBinarySwitch(attendanceSwitch, resetVal);
};
var onClick = function(evt){
evt.preventDefault();
if(activity.relation == invalid) return;
if(!activity.hasBegun()) {
alert(ALERTS["activity_not_begun"]);
return;
}
var value = getBinarySwitchState(attendanceSwitch);
var newVal = !value;
setBinarySwitch(attendanceSwitch, newVal);
attendance = activity.relation;
if(newVal) attendance = present;
else attendance = absent;
updateAttendance(activity.id, attendance, onSuccess, onError);
};
setBinarySwitchOnClick(attendanceSwitch, onClick);
return g_batchAssessmentEditor;
}
function updateAttendance(activityId, attendance, onSuccess, onError){
// prototypes: onSuccess(data), onError(err)
if(g_updatingAttendance) return;
var token = $.cookie(g_keyToken);
if(token == null) return;
var params={};
params[g_keyRelation] = attendance;
params[g_keyToken] = token;
params[g_keyActivityId] = activityId;
g_updatingAttendance = true;
$.ajax({
type: "PUT",
url: "/activity/mark",
data: params,
success: function(data, status, xhr) {
if (isTokenExpired(data)) {
logout(null);
return;
}
onSuccess(data);
},
error: function(xhr, status, err) {
onError(err);
}
});
}
function generateAssessedView(row, participant, activity) {
var btnView = $('<span>', {
text: TITLES["view_assessment"],
style: "display: inline; color: blue; margin-left: 5pt; cursor: pointer"
}).appendTo(row);
btnView.click(function(evt){
evt.preventDefault();
queryAssessmentsAndRefresh(participant.id, activity.id);
});
}
function generateUnassessedView(row, singleEditor, batchEditor) {
var lock = $('<input>', {
type: "checkbox",
"class": "left"
}).appendTo(row);
var contentInput = $('<input>', {
type: 'text'
}).appendTo(row);
contentInput.on("input paste keyup", singleEditor, function(evt){
evt.data.content = $(this).val();
});
lock.change({input: contentInput, editor: batchEditor}, function(evt){
var aInput = evt.data.input;
var aBatchEditor = evt.data.editor;
evt.preventDefault();
var checked = isChecked($(this));
if(!checked) {
enableField(aInput);
--g_lockedCount;
if(g_btnSubmit != null) disableField(g_btnSubmit);
} else {
disableField(aInput);
++g_lockedCount;
if(g_lockedCount >= (aBatchEditor.editors.length - 1) && g_btnSubmit != null) enableField(g_btnSubmit);
}
});
singleEditor.lock = lock;
}
function refreshBatchEditor(activity) {
if (!activity.hasBegun()) return;
if(g_batchAssessmentEditor == null || g_sectionAssessmentEditors == null || g_sectionAssessmentButtons == null) return;
var editors = generateAssessmentEditors(g_sectionAssessmentEditors, activity, g_batchAssessmentEditor);
g_batchAssessmentEditor.editors = editors;
g_sectionAssessmentButtons.empty();
if(!activity.containsRelation() || (activity.containsRelation() && (activity.relation & assessed) > 0) || g_batchAssessmentEditor.editors.length <= 1) return;
generateAssessmentButtons(g_sectionAssessmentButtons, activity, g_batchAssessmentEditor);
}<|fim▁end|>
|
if(!value) return;
// assessed participants cannot edit or re-submit assessments
|
<|file_name|>uimanager.spec.ts<|end_file_name|><|fim▁begin|>import { InternalUIConfig, PlayerWrapper, UIManager } from '../src/ts/uimanager';
import { PlayerAPI } from 'bitmovin-player';
import { MockHelper, TestingPlayerAPI } from './helper/MockHelper';
import { MobileV3PlayerEvent } from '../src/ts/mobilev3playerapi';
jest.mock('../src/ts/dom');
// This just simulates a Class that can be wrapped by our PlayerWrapper.
// To enable this simple class structure we need a lot of any casts in the tests.
class A {
private value: object = undefined;
get a() {
return this.value;<|fim▁hole|> }
// This is needed to change the actual value of the property
giveValueAValue() {
this.value = { foo: 'bar' };
}
}
class B extends A {
get b() {
return {};
}
}
class C extends B {
get c() {
return {};
}
}
describe('UIManager', () => {
describe('PlayerWrapper', () => {
let playerWrapper: PlayerWrapper;
describe('without inheritance', () => {
let superClassInstance: A;
beforeEach(() => {
const testInstance: PlayerAPI = new A() as any as PlayerAPI;
playerWrapper = new PlayerWrapper(testInstance);
(testInstance as any).giveValueAValue(); // Change the value of the actual property to simulate async loaded module
superClassInstance = playerWrapper.getPlayer() as any as A;
});
it('wraps functions', () => {
expect(superClassInstance.a).not.toBeUndefined();
});
});
describe('with inheritance', () => {
let inheritedClassInstance: C;
beforeEach(() => {
const testInstance: PlayerAPI = new C() as any as PlayerAPI;
playerWrapper = new PlayerWrapper(testInstance);
(testInstance as any).giveValueAValue(); // Change the value of the actual property to simulate async loaded module
inheritedClassInstance = playerWrapper.getPlayer() as any as C;
});
it('wraps functions of super class', () => {
expect(inheritedClassInstance.a).not.toBeUndefined();
});
});
});
describe('mobile v3 handling', () => {
let playerMock: TestingPlayerAPI;
beforeEach(() => {
playerMock = MockHelper.getPlayerMock();
// disable HTML element interactions
UIManager.prototype.switchToUiVariant = jest.fn();
});
describe('when a PlaylistTransition event is part of PlayerEvent', () => {
beforeEach(() => {
(playerMock.exports.PlayerEvent as any).SourceError = MobileV3PlayerEvent.SourceError;
(playerMock.exports.PlayerEvent as any).PlayerError = MobileV3PlayerEvent.PlayerError;
(playerMock.exports.PlayerEvent as any).PlaylistTransition = MobileV3PlayerEvent.PlaylistTransition;
});
it('attaches the listener', () => {
const onSpy = jest.spyOn(playerMock, 'on');
new UIManager(playerMock, MockHelper.generateDOMMock() as any);
expect(onSpy).toHaveBeenCalledWith('playlisttransition', expect.any(Function));
});
describe('and a PlaylistTransition event occurs', () => {
it('dispatches onUpdated', () => {
const uiManager = new UIManager(playerMock, MockHelper.generateDOMMock() as any);
let onUpdatedSpy = jest.fn();
(uiManager.getConfig() as InternalUIConfig).events.onUpdated.subscribe(onUpdatedSpy);
playerMock.eventEmitter.firePlaylistTransitionEvent();
expect(onUpdatedSpy).toHaveBeenCalled();
});
});
});
describe('when no PlaylistTransition event is part of PlayerEvent', () => {
beforeEach(() => {
delete (playerMock.exports.PlayerEvent as any).PlaylistTransition;
});
it('does not attach a listener', () => {
const onSpy = jest.spyOn(playerMock, 'on');
new UIManager(playerMock, MockHelper.generateDOMMock() as any);
expect(onSpy).not.toHaveBeenCalledWith('playlisttransition', expect.any(Function));
});
});
});
});<|fim▁end|>
| |
<|file_name|>Q00700_CursedLife.java<|end_file_name|><|fim▁begin|>/*
* This file is part of the L2J Mobius project.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package quests.Q00700_CursedLife;
import java.util.HashMap;
import java.util.Map;
import com.l2jmobius.gameserver.enums.QuestSound;
import com.l2jmobius.gameserver.model.actor.L2Npc;
import com.l2jmobius.gameserver.model.actor.instance.L2PcInstance;
import com.l2jmobius.gameserver.model.quest.Quest;
import com.l2jmobius.gameserver.model.quest.QuestState;
import com.l2jmobius.gameserver.model.quest.State;
import quests.Q10273_GoodDayToFly.Q10273_GoodDayToFly;
/**
* Cursed Life (700)
* @author xban1x
*/
public class Q00700_CursedLife extends Quest
{
// NPC
private static final int ORBYU = 32560;
// Monsters
private static final int ROK = 25624;
private static final Map<Integer, Integer[]> MONSTERS = new HashMap<>();
//@formatter:off
static
{
MONSTERS.put(22602, new Integer[] { 15, 139, 965}); // Mutant Bird lvl 1
MONSTERS.put(22603, new Integer[] { 15, 143, 999}); // Mutant Bird lvl 2
MONSTERS.put(25627, new Integer[] { 14, 125, 993}); // Mutant Bird lvl 3
MONSTERS.put(22604, new Integer[] { 5, 94, 994}); // Dra Hawk lvl 1
MONSTERS.put(22605, new Integer[] { 5, 99, 993}); // Dra Hawk lvl 2
MONSTERS.put(25628, new Integer[] { 3, 73, 991}); // Dra Hawk lvl 3
}
//@formatter:on
// Items
private static final int SWALLOWED_BONES = 13874;
private static final int SWALLOWED_STERNUM = 13873;
private static final int SWALLOWED_SKULL = 13872;
// Misc
private static final int MIN_LVL = 75;
private static final int SWALLOWED_BONES_ADENA = 500;
private static final int SWALLOWED_STERNUM_ADENA = 5000;
private static final int SWALLOWED_SKULL_ADENA = 50000;
private static final int BONUS = 16670;
public Q00700_CursedLife()
{
super(700, Q00700_CursedLife.class.getSimpleName(), "Cursed Life");
addStartNpc(ORBYU);
addTalkId(ORBYU);
addKillId(ROK);
addKillId(MONSTERS.keySet());
registerQuestItems(SWALLOWED_BONES, SWALLOWED_STERNUM, SWALLOWED_SKULL);
}
@Override
public String onAdvEvent(String event, L2Npc npc, L2PcInstance player)
{
QuestState st = getQuestState(player, false);
String htmltext = null;
if (st != null)
{
switch (event)
{
case "32560-02.htm":
{
st = player.getQuestState(Q10273_GoodDayToFly.class.getSimpleName());
htmltext = ((player.getLevel() < MIN_LVL) || (st == null) || (!st.isCompleted())) ? "32560-03.htm" : event;
break;
}
case "32560-04.htm":
case "32560-09.html":
{
htmltext = event;
break;
}
case "32560-05.htm":
{
st.startQuest();
htmltext = event;
break;
}
case "32560-10.html":
{
st.exitQuest(true, true);
htmltext = event;
break;
}
}
}
return htmltext;
}
@Override
public String onTalk(L2Npc npc, L2PcInstance player)
{
final QuestState st = getQuestState(player, true);
String htmltext = getNoQuestMsg(player);
if (st != null)
{
switch (st.getState())
{
case State.CREATED:
{<|fim▁hole|> case State.STARTED:
{
final long bones = st.getQuestItemsCount(SWALLOWED_BONES);
final long ribs = st.getQuestItemsCount(SWALLOWED_STERNUM);
final long skulls = st.getQuestItemsCount(SWALLOWED_SKULL);
final long sum = bones + ribs + skulls;
if (sum > 0)
{
st.giveAdena(((bones * SWALLOWED_BONES_ADENA) + (ribs * SWALLOWED_STERNUM_ADENA) + (skulls * SWALLOWED_SKULL_ADENA) + (sum >= 10 ? BONUS : 0)), true);
takeItems(player, -1, SWALLOWED_BONES, SWALLOWED_STERNUM, SWALLOWED_SKULL);
htmltext = sum < 10 ? "32560-07.html" : "32560-08.html";
}
else
{
htmltext = "32560-06.html";
}
break;
}
}
}
return htmltext;
}
@Override
public String onKill(L2Npc npc, L2PcInstance player, boolean isSummon)
{
final QuestState st = getQuestState(player, false);
if (st != null)
{
if (npc.getId() == ROK)
{
int amount = 0, chance = getRandom(1000);
if (chance < 700)
{
amount = 1;
}
else if (chance < 885)
{
amount = 2;
}
else if (chance < 949)
{
amount = 3;
}
else if (chance < 966)
{
amount = getRandom(5) + 4;
}
else if (chance < 985)
{
amount = getRandom(9) + 4;
}
else if (chance < 993)
{
amount = getRandom(7) + 13;
}
else if (chance < 997)
{
amount = getRandom(15) + 9;
}
else if (chance < 999)
{
amount = getRandom(23) + 53;
}
else
{
amount = getRandom(49) + 76;
}
st.giveItems(SWALLOWED_BONES, amount);
chance = getRandom(1000);
if (chance < 520)
{
amount = 1;
}
else if (chance < 771)
{
amount = 2;
}
else if (chance < 836)
{
amount = 3;
}
else if (chance < 985)
{
amount = getRandom(2) + 4;
}
else if (chance < 995)
{
amount = getRandom(4) + 5;
}
else
{
amount = getRandom(8) + 6;
}
st.giveItems(SWALLOWED_STERNUM, amount);
chance = getRandom(1000);
if (chance < 185)
{
amount = getRandom(2) + 1;
}
else if (chance < 370)
{
amount = getRandom(6) + 2;
}
else if (chance < 570)
{
amount = getRandom(6) + 7;
}
else if (chance < 850)
{
amount = getRandom(6) + 12;
}
else
{
amount = getRandom(6) + 17;
}
st.giveItems(SWALLOWED_SKULL, amount);
st.playSound(QuestSound.ITEMSOUND_QUEST_ITEMGET);
}
else
{
final Integer[] chances = MONSTERS.get(npc.getId());
final int chance = getRandom(1000);
if (chance < chances[0])
{
st.giveItems(SWALLOWED_BONES, 1);
st.playSound(QuestSound.ITEMSOUND_QUEST_ITEMGET);
}
else if (chance < chances[1])
{
st.giveItems(SWALLOWED_STERNUM, 1);
st.playSound(QuestSound.ITEMSOUND_QUEST_ITEMGET);
}
else if (chance < chances[2])
{
st.giveItems(SWALLOWED_SKULL, 1);
st.playSound(QuestSound.ITEMSOUND_QUEST_ITEMGET);
}
}
}
return super.onKill(npc, player, isSummon);
}
}<|fim▁end|>
|
htmltext = "32560-01.htm";
break;
}
|
<|file_name|>Problem.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.devicefarm.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.protocol.StructuredPojo;
import com.amazonaws.protocol.ProtocolMarshaller;
/**
* <p>
* Represents a specific warning or failure.
* </p>
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/devicefarm-2015-06-23/Problem" target="_top">AWS API
* Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class Problem implements Serializable, Cloneable, StructuredPojo {
/**
* <p>
* Information about the associated run.
* </p>
*/
private ProblemDetail run;
/**
* <p>
* Information about the associated job.
* </p>
*/
private ProblemDetail job;
/**
* <p>
* Information about the associated suite.
* </p>
*/
private ProblemDetail suite;
/**
* <p>
* Information about the associated test.
* </p>
*/
private ProblemDetail test;
/**
* <p>
* Information about the associated device.
* </p>
*/
private Device device;
/**
* <p>
* The problem's result.
* </p>
* <p>
* Allowed values include:
* </p>
* <ul>
* <li>
* <p>
* PENDING
* </p>
* </li>
* <li>
* <p>
* PASSED
* </p>
* </li>
* <li>
* <p>
* WARNED
* </p>
* </li>
* <li>
* <p>
* FAILED
* </p>
* </li>
* <li>
* <p>
* SKIPPED
* </p>
* </li>
* <li>
* <p>
* ERRORED
* </p>
* </li>
* <li>
* <p>
* STOPPED
* </p>
* </li>
* </ul>
*/
private String result;
/**
* <p>
* A message about the problem's result.
* </p>
*/
private String message;
/**
* <p>
* Information about the associated run.
* </p>
*
* @param run
* Information about the associated run.
*/
public void setRun(ProblemDetail run) {
this.run = run;
}
/**
* <p>
* Information about the associated run.
* </p>
*
* @return Information about the associated run.
*/
public ProblemDetail getRun() {
return this.run;
}
/**
* <p>
* Information about the associated run.
* </p>
*
* @param run
* Information about the associated run.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public Problem withRun(ProblemDetail run) {
setRun(run);
return this;
}
/**
* <p>
* Information about the associated job.
* </p>
*
* @param job
* Information about the associated job.
*/
public void setJob(ProblemDetail job) {
this.job = job;
}
/**
* <p>
* Information about the associated job.
* </p>
*
* @return Information about the associated job.
*/
public ProblemDetail getJob() {
return this.job;
}
/**
* <p>
* Information about the associated job.
* </p>
*
* @param job
* Information about the associated job.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public Problem withJob(ProblemDetail job) {
setJob(job);
return this;
}
/**
* <p>
* Information about the associated suite.
* </p>
*
* @param suite
* Information about the associated suite.
*/
public void setSuite(ProblemDetail suite) {
this.suite = suite;
}
/**
* <p>
* Information about the associated suite.
* </p>
*
* @return Information about the associated suite.
*/
public ProblemDetail getSuite() {
return this.suite;
}
/**
* <p>
* Information about the associated suite.
* </p>
*
* @param suite
* Information about the associated suite.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public Problem withSuite(ProblemDetail suite) {
setSuite(suite);
return this;
}
/**
* <p>
* Information about the associated test.
* </p>
*
* @param test
* Information about the associated test.
*/
public void setTest(ProblemDetail test) {
this.test = test;
}
/**
* <p>
* Information about the associated test.
* </p>
*
* @return Information about the associated test.
*/
public ProblemDetail getTest() {
return this.test;
}
/**
* <p>
* Information about the associated test.
* </p>
*
* @param test
* Information about the associated test.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public Problem withTest(ProblemDetail test) {
setTest(test);
return this;
}
/**
* <p>
* Information about the associated device.
* </p>
*
* @param device
* Information about the associated device.
*/
public void setDevice(Device device) {
this.device = device;
}
/**
* <p>
* Information about the associated device.
* </p>
*
* @return Information about the associated device.
*/
public Device getDevice() {
return this.device;
}
/**
* <p>
* Information about the associated device.
* </p>
*
* @param device
* Information about the associated device.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public Problem withDevice(Device device) {
setDevice(device);
return this;
}
/**
* <p>
* The problem's result.
* </p>
* <p>
* Allowed values include:
* </p>
* <ul>
* <li>
* <p>
* PENDING
* </p>
* </li>
* <li>
* <p>
* PASSED
* </p>
* </li>
* <li>
* <p>
* WARNED
* </p>
* </li>
* <li>
* <p>
* FAILED
* </p>
* </li>
* <li>
* <p>
* SKIPPED
* </p>
* </li>
* <li>
* <p>
* ERRORED
* </p>
* </li>
* <li>
* <p>
* STOPPED
* </p>
* </li>
* </ul>
*
* @param result
* The problem's result.</p>
* <p>
* Allowed values include:
* </p>
* <ul>
* <li>
* <p>
* PENDING
* </p>
* </li>
* <li>
* <p>
* PASSED
* </p>
* </li>
* <li>
* <p>
* WARNED
* </p>
* </li>
* <li>
* <p>
* FAILED
* </p>
* </li>
* <li>
* <p>
* SKIPPED
* </p>
* </li>
* <li>
* <p>
* ERRORED
* </p><|fim▁hole|> * <li>
* <p>
* STOPPED
* </p>
* </li>
* @see ExecutionResult
*/
public void setResult(String result) {
this.result = result;
}
/**
* <p>
* The problem's result.
* </p>
* <p>
* Allowed values include:
* </p>
* <ul>
* <li>
* <p>
* PENDING
* </p>
* </li>
* <li>
* <p>
* PASSED
* </p>
* </li>
* <li>
* <p>
* WARNED
* </p>
* </li>
* <li>
* <p>
* FAILED
* </p>
* </li>
* <li>
* <p>
* SKIPPED
* </p>
* </li>
* <li>
* <p>
* ERRORED
* </p>
* </li>
* <li>
* <p>
* STOPPED
* </p>
* </li>
* </ul>
*
* @return The problem's result.</p>
* <p>
* Allowed values include:
* </p>
* <ul>
* <li>
* <p>
* PENDING
* </p>
* </li>
* <li>
* <p>
* PASSED
* </p>
* </li>
* <li>
* <p>
* WARNED
* </p>
* </li>
* <li>
* <p>
* FAILED
* </p>
* </li>
* <li>
* <p>
* SKIPPED
* </p>
* </li>
* <li>
* <p>
* ERRORED
* </p>
* </li>
* <li>
* <p>
* STOPPED
* </p>
* </li>
* @see ExecutionResult
*/
public String getResult() {
return this.result;
}
/**
* <p>
* The problem's result.
* </p>
* <p>
* Allowed values include:
* </p>
* <ul>
* <li>
* <p>
* PENDING
* </p>
* </li>
* <li>
* <p>
* PASSED
* </p>
* </li>
* <li>
* <p>
* WARNED
* </p>
* </li>
* <li>
* <p>
* FAILED
* </p>
* </li>
* <li>
* <p>
* SKIPPED
* </p>
* </li>
* <li>
* <p>
* ERRORED
* </p>
* </li>
* <li>
* <p>
* STOPPED
* </p>
* </li>
* </ul>
*
* @param result
* The problem's result.</p>
* <p>
* Allowed values include:
* </p>
* <ul>
* <li>
* <p>
* PENDING
* </p>
* </li>
* <li>
* <p>
* PASSED
* </p>
* </li>
* <li>
* <p>
* WARNED
* </p>
* </li>
* <li>
* <p>
* FAILED
* </p>
* </li>
* <li>
* <p>
* SKIPPED
* </p>
* </li>
* <li>
* <p>
* ERRORED
* </p>
* </li>
* <li>
* <p>
* STOPPED
* </p>
* </li>
* @return Returns a reference to this object so that method calls can be chained together.
* @see ExecutionResult
*/
public Problem withResult(String result) {
setResult(result);
return this;
}
/**
* <p>
* The problem's result.
* </p>
* <p>
* Allowed values include:
* </p>
* <ul>
* <li>
* <p>
* PENDING
* </p>
* </li>
* <li>
* <p>
* PASSED
* </p>
* </li>
* <li>
* <p>
* WARNED
* </p>
* </li>
* <li>
* <p>
* FAILED
* </p>
* </li>
* <li>
* <p>
* SKIPPED
* </p>
* </li>
* <li>
* <p>
* ERRORED
* </p>
* </li>
* <li>
* <p>
* STOPPED
* </p>
* </li>
* </ul>
*
* @param result
* The problem's result.</p>
* <p>
* Allowed values include:
* </p>
* <ul>
* <li>
* <p>
* PENDING
* </p>
* </li>
* <li>
* <p>
* PASSED
* </p>
* </li>
* <li>
* <p>
* WARNED
* </p>
* </li>
* <li>
* <p>
* FAILED
* </p>
* </li>
* <li>
* <p>
* SKIPPED
* </p>
* </li>
* <li>
* <p>
* ERRORED
* </p>
* </li>
* <li>
* <p>
* STOPPED
* </p>
* </li>
* @see ExecutionResult
*/
public void setResult(ExecutionResult result) {
withResult(result);
}
/**
* <p>
* The problem's result.
* </p>
* <p>
* Allowed values include:
* </p>
* <ul>
* <li>
* <p>
* PENDING
* </p>
* </li>
* <li>
* <p>
* PASSED
* </p>
* </li>
* <li>
* <p>
* WARNED
* </p>
* </li>
* <li>
* <p>
* FAILED
* </p>
* </li>
* <li>
* <p>
* SKIPPED
* </p>
* </li>
* <li>
* <p>
* ERRORED
* </p>
* </li>
* <li>
* <p>
* STOPPED
* </p>
* </li>
* </ul>
*
* @param result
* The problem's result.</p>
* <p>
* Allowed values include:
* </p>
* <ul>
* <li>
* <p>
* PENDING
* </p>
* </li>
* <li>
* <p>
* PASSED
* </p>
* </li>
* <li>
* <p>
* WARNED
* </p>
* </li>
* <li>
* <p>
* FAILED
* </p>
* </li>
* <li>
* <p>
* SKIPPED
* </p>
* </li>
* <li>
* <p>
* ERRORED
* </p>
* </li>
* <li>
* <p>
* STOPPED
* </p>
* </li>
* @return Returns a reference to this object so that method calls can be chained together.
* @see ExecutionResult
*/
public Problem withResult(ExecutionResult result) {
this.result = result.toString();
return this;
}
/**
* <p>
* A message about the problem's result.
* </p>
*
* @param message
* A message about the problem's result.
*/
public void setMessage(String message) {
this.message = message;
}
/**
* <p>
* A message about the problem's result.
* </p>
*
* @return A message about the problem's result.
*/
public String getMessage() {
return this.message;
}
/**
* <p>
* A message about the problem's result.
* </p>
*
* @param message
* A message about the problem's result.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public Problem withMessage(String message) {
setMessage(message);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getRun() != null)
sb.append("Run: ").append(getRun()).append(",");
if (getJob() != null)
sb.append("Job: ").append(getJob()).append(",");
if (getSuite() != null)
sb.append("Suite: ").append(getSuite()).append(",");
if (getTest() != null)
sb.append("Test: ").append(getTest()).append(",");
if (getDevice() != null)
sb.append("Device: ").append(getDevice()).append(",");
if (getResult() != null)
sb.append("Result: ").append(getResult()).append(",");
if (getMessage() != null)
sb.append("Message: ").append(getMessage());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof Problem == false)
return false;
Problem other = (Problem) obj;
if (other.getRun() == null ^ this.getRun() == null)
return false;
if (other.getRun() != null && other.getRun().equals(this.getRun()) == false)
return false;
if (other.getJob() == null ^ this.getJob() == null)
return false;
if (other.getJob() != null && other.getJob().equals(this.getJob()) == false)
return false;
if (other.getSuite() == null ^ this.getSuite() == null)
return false;
if (other.getSuite() != null && other.getSuite().equals(this.getSuite()) == false)
return false;
if (other.getTest() == null ^ this.getTest() == null)
return false;
if (other.getTest() != null && other.getTest().equals(this.getTest()) == false)
return false;
if (other.getDevice() == null ^ this.getDevice() == null)
return false;
if (other.getDevice() != null && other.getDevice().equals(this.getDevice()) == false)
return false;
if (other.getResult() == null ^ this.getResult() == null)
return false;
if (other.getResult() != null && other.getResult().equals(this.getResult()) == false)
return false;
if (other.getMessage() == null ^ this.getMessage() == null)
return false;
if (other.getMessage() != null && other.getMessage().equals(this.getMessage()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getRun() == null) ? 0 : getRun().hashCode());
hashCode = prime * hashCode + ((getJob() == null) ? 0 : getJob().hashCode());
hashCode = prime * hashCode + ((getSuite() == null) ? 0 : getSuite().hashCode());
hashCode = prime * hashCode + ((getTest() == null) ? 0 : getTest().hashCode());
hashCode = prime * hashCode + ((getDevice() == null) ? 0 : getDevice().hashCode());
hashCode = prime * hashCode + ((getResult() == null) ? 0 : getResult().hashCode());
hashCode = prime * hashCode + ((getMessage() == null) ? 0 : getMessage().hashCode());
return hashCode;
}
@Override
public Problem clone() {
try {
return (Problem) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
@com.amazonaws.annotation.SdkInternalApi
@Override
public void marshall(ProtocolMarshaller protocolMarshaller) {
com.amazonaws.services.devicefarm.model.transform.ProblemMarshaller.getInstance().marshall(this, protocolMarshaller);
}
}<|fim▁end|>
|
* </li>
|
<|file_name|>ratio_layouted_frame.cpp<|end_file_name|><|fim▁begin|>/*
* Copyright (c) 2011, Dirk Thomas, TU Darmstadt
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the TU Darmstadt nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*<|fim▁hole|> * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <rqt_autopilot/ratio_layouted_frame.h>
#include <assert.h>
namespace rqt_autopilot {
RatioLayoutedFrame::RatioLayoutedFrame(QWidget* parent, Qt::WFlags flags)
: QFrame()
, aspect_ratio_(4, 3)
{
}
RatioLayoutedFrame::~RatioLayoutedFrame()
{
}
void RatioLayoutedFrame::resizeToFitAspectRatio()
{
QRect rect = contentsRect();
// reduce longer edge to aspect ration
double width = double(rect.width());
double height = double(rect.height());
if (width * aspect_ratio_.height() / height > aspect_ratio_.width())
{
// too large width
width = height * aspect_ratio_.width() / aspect_ratio_.height();
rect.setWidth(int(width));
}
else
{
// too large height
height = width * aspect_ratio_.height() / aspect_ratio_.width();
rect.setHeight(int(height));
}
// resize taking the border line into account
int border = lineWidth();
resize(rect.width() + 2 * border, rect.height() + 2 * border);
}
void RatioLayoutedFrame::setInnerFrameMinimumSize(const QSize& size)
{
int border = lineWidth();
QSize new_size = size;
new_size += QSize(2 * border, 2 * border);
setMinimumSize(new_size);
update();
}
void RatioLayoutedFrame::setInnerFrameMaximumSize(const QSize& size)
{
int border = lineWidth();
QSize new_size = size;
new_size += QSize(2 * border, 2 * border);
setMaximumSize(new_size);
update();
}
void RatioLayoutedFrame::setInnerFrameFixedSize(const QSize& size)
{
setInnerFrameMinimumSize(size);
setInnerFrameMaximumSize(size);
}
void RatioLayoutedFrame::setAspectRatio(unsigned short width, unsigned short height)
{
int divisor = greatestCommonDivisor(width, height);
if (divisor != 0) {
aspect_ratio_.setWidth(width / divisor);
aspect_ratio_.setHeight(height / divisor);
}
}
int RatioLayoutedFrame::greatestCommonDivisor(int a, int b)
{
if (b==0)
{
return a;
}
return greatestCommonDivisor(b, a % b);
}
}<|fim▁end|>
| |
<|file_name|>test_proxy_middleware.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at<|fim▁hole|># http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests to assert that proxy headers middleware works as expected.
"""
from oslo_config import cfg
from ironic.tests.unit.api import base
CONF = cfg.CONF
class TestProxyHeadersMiddleware(base.BaseApiTest):
"""Provide a basic smoke test to ensure proxy headers middleware works."""
def setUp(self):
CONF.set_override('public_endpoint', 'http://spam.ham/eggs',
group='api')
self.proxy_headers = {"X-Forwarded-Proto": "https",
"X-Forwarded-Host": "mycloud.com",
"X-Forwarded-Prefix": "/ironic"}
super(TestProxyHeadersMiddleware, self).setUp()
def test_proxy_headers_enabled(self):
"""Test enabled proxy headers middleware overriding public_endpoint"""
# NOTE(pas-ha) setting config option and re-creating app
# as the middleware registers its config option on instantiation
CONF.set_override('enable_proxy_headers_parsing', True,
group='oslo_middleware')
self.app = self._make_app()
response = self.get_json('/', path_prefix="",
headers=self.proxy_headers)
href = response["default_version"]["links"][0]["href"]
self.assertTrue(href.startswith("https://mycloud.com/ironic"))
def test_proxy_headers_disabled(self):
"""Test proxy headers middleware disabled by default"""
response = self.get_json('/', path_prefix="",
headers=self.proxy_headers)
href = response["default_version"]["links"][0]["href"]
# check that [api]public_endpoint is used when proxy headers parsing
# is disabled
self.assertTrue(href.startswith("http://spam.ham/eggs"))<|fim▁end|>
|
#
|
<|file_name|>REM_console.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3
import rem_backend.query_data as qd
import rem_backend.propagation_model_estimation as pm
import threading
import _thread
__author__ = "Daniel Denkovski", "Valentin Rakovic"
__copyright__ = "Copyright (c) 2017, Faculty of Electrical Engineering and Information Technologies, UKIM, Skopje, Macedonia"
__version__ = "0.1.0"
__email__ = "{danield, valentin}@feit.ukim.edu.mk"
'''
REM console module
Showcases the REM backend capabilities of the extension
Used as console interface for users to interact with the platform
'''
def main():
run = 1;
while (run):
print("Please choose from the selection:")
print("1. WiFi device localization")
print("2. Duty cycle calculation")
print("3. Path loss model estimation")
print("0. Quit")
choice = input(" >> ")
if (choice == '0'):
run = 0
elif (choice == '1'):
print("Loc:Enter the channel of interest")
chann = input(" >> ")
dev_list = qd.get_all_active_devices_on_channel(chann,1)
try:
print("Select the index of the device of interest")
ind = 1
for row in dev_list:
print("{}. {}".format(ind,row[0]))
ind += 1
devind = input(" >> ")<|fim▁hole|> try:
location = qd.estimate_tx_location(str(dev_list[int(devind)-1][0]),10)
print("The location of devices {} is:".format(str(dev_list[int(devind)-1][0])))
print("x:{} y:{} z:{} Pt:{} dBm".format(location[0],location[1],location[2],location[3]))
except:
print("not sufficient data for modeling")
print("")
except:
print("no devices")
print("")
elif (choice == '2'):
print("DC:Enter the channel of interest")
chann = input(" >> ")
ux, ul, dx, dy = input("provide ux ul dx dl coordinates of interest: ").split(' ')
try:
val = qd.get_duty_cycle_by_area(chann,10,ux,ul,dx,dy)
dc = val[0][0]
print("Duty cycle value for channel={} is {}".format(chann,dc))
except:
print("not sufficient data for modeling")
print("")
elif (choice == '3'):
print("PL:Enter the channel of interest")
chann = input(" >> ")
try:
val = pm.get_chann_model(10,chann)
print(val)
except:
print("not sufficient data for modeling")
print("")
if __name__=="__main__":
main()<|fim▁end|>
|
print(dev_list[int(devind)-1][0])
|
<|file_name|>stadium.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
import sys
import re
import types
import random
import lxml
from lxml import etree
from copy import deepcopy
def usage():
print "Usage: %s countryfile regionname valuefile clubfile1 [clubfile2...]" % sys.argv[0]
print " countryfile: XML file of the country"
print " regionname: stadium region"
print " valuefile: values of clubs"
print " clubfile[s]: files with the data of the clubs"
print
print "Countryfile will be changed. Club output will be to standard output."
def main():
try:
countryfilename = sys.argv[1]
regionname = sys.argv[2]
valuefilename = sys.argv[3]
clubfilenames = sys.argv[4:]
except IndexError:
usage()
sys.exit(1)
try:
countryfile = open(countryfilename, 'r')
countryroot = etree.parse(countryfile)
countryfile.close()
except IOError:
print "could not open", countryfilename
sys.exit(1)
regions = countryroot.findall(".//region")
countrynode = countryroot.find(".//country")
countryname = countrynode.get("name")
ourregion = None
for element in regions:
if element.get("name") == regionname:
ourregion = element
break
if type(ourregion) == types.NoneType:
# Region not found; creating one
ourregion = etree.Element("region", name=regionname)
regions.append(ourregion)
stadiums = []
stadiums = ourregion.findall(".//stadium")
countrychanged = False
dooutput = True
valuetable = {}
try:
valuefile = open(valuefilename, 'r')
except IOError:
print "could not open %s" % valuefilename
sys.exit(1)
for line in valuefile.readlines():
info = line.split('\t', 1)
valuetable[info[0].strip()] = int(info[1].strip())
valuefile.close()
clubsroot = etree.Element("Clubs")
for element in clubfilenames:
try:
clubfile = open(element, 'r')
clublines = clubfile.read()
clubfile.close()
except IOError:
print "could not open %s" % clubfilenames[0]
sys.exit(1)
clubname = re.compile(r'^ *([a-zA-Z 0-9-\'&]*)$', re.M).search(clublines, 1)
stadiumname = re.compile(r'^ *Ground \(ground history\) *([a-zA-Z 0-9-\'&]*?) *$', re.M).search(clublines, 1)
if type(stadiumname) != types.NoneType:
stadname = stadiumname.groups()[0]
else:
stadname = clubname.groups()[0] + " Stadium"
stadiumnode = etree.Element("stadium", name=stadname)
try:
thisvalue = valuetable[clubname.groups()[0]]
except:
if dooutput == True:
print "Could not find team %s in the values file" % clubname.groups()[0]
print "File that was being processed: %s" % element
print "No changes will be made."
dooutput = False
else:
print "%s - %s" % (clubname.groups()[0], element)
stadfound = False
for element in stadiums:
if element.get("name") == stadname:
stadfound = True
break
if stadfound == False:
countrystadiumnode = deepcopy(stadiumnode)
stadiumcapacity = int(thisvalue**(2.1)/25)/100*100
capnode = etree.Element("capacity", value="%d" % stadiumcapacity)
countrystadiumnode.append(capnode)
ourregion.append(countrystadiumnode)
stadiums.append(countrystadiumnode)
countrychanged = True
clubnode = etree.Element("club", name=clubname.groups()[0])
kit1node = etree.Element("kit")
jerseynode = etree.Element("jersey")
jerseynode.set("type", "0")
shortsnode = etree.Element("shorts")
socksnode = etree.Element("socks")
colornode = etree.Element("color")
colornode.set("r", "255")
colornode.set("g", "255")
colornode.set("b", "255")
imagenode = etree.Element("image", value="")
jerseynode.append(deepcopy(colornode))
jerseynode.append(imagenode)
shortsnode.append(deepcopy(colornode))
socksnode.append(colornode)
kit1node.append(jerseynode)
kit1node.append(shortsnode)
kit1node.append(socksnode)
kitsnode = etree.Element("kits")
kitsnode.append(deepcopy(kit1node))
kitsnode.append(deepcopy(kit1node))
clubnode.append(kitsnode)
clcountrynode = etree.Element("country", name=countryname)<|fim▁hole|> clregionnode = etree.Element("region", name=regionname)
clubnode.append(clcountrynode)
clubnode.append(clregionnode)
clubnode.append(stadiumnode)
clubsroot.append(clubnode)
if dooutput == True:
print (etree.tostring(clubsroot, pretty_print=True, encoding="UTF-8"))
if countrychanged:
parser = etree.XMLParser(remove_blank_text=True)
countrynew = etree.fromstring(etree.tostring(countryroot), parser)
countrystring = etree.tostring(countrynew, pretty_print=True, encoding="UTF-8")
countryfile = open(countryfilename, 'w')
countryfile.write(countrystring)
countryfile.close()
if __name__ == '__main__':
main()<|fim▁end|>
| |
<|file_name|>UriMapFeature.hpp<|end_file_name|><|fim▁begin|>/*
* UriMapFeature.hpp
*
* Copyright (c) 2010 Paul Giblock <pgib/at/users.sourceforge.net>
*
* This file is part of Unison - http://unison.sourceforge.net
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program (see COPYING); if not, write to the
* Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301 USA.
*
*/
#ifndef UNISON_LV2_URI_MAP_FEATURE_H
#define UNISON_LV2_URI_MAP_FEATURE_H
#include "Feature.hpp"
#include <lv2/uri-map.lv2/uri-map.h>
namespace Lv2 {
namespace Internal {
class UriMap;
<|fim▁hole|> public:
UriMapFeature (UriMap* uriMap);
LV2_Feature* lv2Feature ();
void initialize (LV2_Feature*, const Lv2Plugin&) const {};
void cleanup (LV2_Feature*) const {};
private:
static uint32_t uriToId (LV2_URI_Map_Callback_Data cbData, const char* map, const char* uri);
LV2_Feature m_feature;
LV2_URI_Map_Feature m_data;
UriMap* m_uriMap;
};
} // Internal
} // Lv2
#endif
// vim: tw=90 ts=8 sw=2 sts=2 et sta noai<|fim▁end|>
|
class UriMapFeature : public Feature
{
|
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>from .analyzer import Pep8Analyzer
from .issues_data import issues_data
analyzers = {
'pep8' :
{
'title' : 'Pep-8',
'class' : Pep8Analyzer,
'language' : 'python',
'issues_data' : issues_data,
},<|fim▁hole|><|fim▁end|>
|
}
|
<|file_name|>clang.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2010 (ita)
# Ralf Habacker, 2006 (rh)
# Yinon Ehrlich, 2009
"""
clang/llvm detection.
"""
import os, sys
from waflib import Configure, Options, Utils
from waflib.Tools import ccroot, ar
from waflib.Configure import conf
@conf
def find_clang(conf):
"""
Find the program clang, and if present, try to detect its version number
"""
cc = conf.find_program(['clang', 'cc'], var='CC')
cc = conf.cmd_to_list(cc)
conf.get_cc_version(cc, gcc=True)
conf.env.CC_NAME = 'clang'
conf.env.CC = cc
@conf
def clang_common_flags(conf):
"""
Common flags for clang on nearly all platforms
"""
v = conf.env
v['CC_SRC_F'] = []
v['CC_TGT_F'] = ['-c', '-o']
# linker
if not v['LINK_CC']: v['LINK_CC'] = v['CC']
v['CCLNK_SRC_F'] = []
v['CCLNK_TGT_F'] = ['-o']
v['CPPPATH_ST'] = '-I%s'
v['DEFINES_ST'] = '-D%s'
v['LIB_ST'] = '-l%s' # template for adding libs
v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
v['STLIB_ST'] = '-l%s'
v['STLIBPATH_ST'] = '-L%s'
v['RPATH_ST'] = '-Wl,-rpath,%s'
v['SONAME_ST'] = '-Wl,-h,%s'
v['SHLIB_MARKER'] = '-Wl,-Bdynamic'
v['STLIB_MARKER'] = '-Wl,-Bstatic'
# program
v['cprogram_PATTERN'] = '%s'
# shared librar
v['CFLAGS_cshlib'] = ['-fPIC']
v['LINKFLAGS_cshlib'] = ['-shared']
v['cshlib_PATTERN'] = 'lib%s.so'
# static lib
v['LINKFLAGS_cstlib'] = ['-Wl,-Bstatic']
v['cstlib_PATTERN'] = 'lib%s.a'
# osx stuff
v['LINKFLAGS_MACBUNDLE'] = ['-bundle', '-undefined', 'dynamic_lookup']
v['CFLAGS_MACBUNDLE'] = ['-fPIC']
v['macbundle_PATTERN'] = '%s.bundle'
@conf
def clang_modifier_win32(conf):
"""Configuration flags for executing clang on Windows"""
v = conf.env
v['cprogram_PATTERN'] = '%s.exe'
v['cshlib_PATTERN'] = '%s.dll'
v['implib_PATTERN'] = 'lib%s.dll.a'
v['IMPLIB_ST'] = '-Wl,--out-implib,%s'
v['CFLAGS_cshlib'] = []
v.append_value('CFLAGS_cshlib', ['-DDLL_EXPORT']) # TODO adding nonstandard defines like this DLL_EXPORT is not a good idea
# Auto-import is enabled by default even without this option,
# but enabling it explicitly has the nice effect of suppressing the rather boring, debug-level messages
# that the linker emits otherwise.
v.append_value('LINKFLAGS', ['-Wl,--enable-auto-import'])
@conf
def clang_modifier_cygwin(conf):
"""Configuration flags for executing clang on Cygwin"""
clang_modifier_win32(conf)
v = conf.env
v['cshlib_PATTERN'] = 'cyg%s.dll'
v.append_value('LINKFLAGS_cshlib', ['-Wl,--enable-auto-image-base'])
v['CFLAGS_cshlib'] = []
@conf
def clang_modifier_darwin(conf):
"""Configuration flags for executing clang on MacOS"""
v = conf.env
v['CFLAGS_cshlib'] = ['-fPIC', '-compatibility_version', '1', '-current_version', '1']
v['LINKFLAGS_cshlib'] = ['-dynamiclib']
v['cshlib_PATTERN'] = 'lib%s.dylib'
v['FRAMEWORKPATH_ST'] = '-F%s'
v['FRAMEWORK_ST'] = ['-framework']
v['ARCH_ST'] = ['-arch']
v['LINKFLAGS_cstlib'] = []
v['SHLIB_MARKER'] = []
v['STLIB_MARKER'] = []
v['SONAME_ST'] = []<|fim▁hole|>
@conf
def clang_modifier_aix(conf):
"""Configuration flags for executing clang on AIX"""
v = conf.env
v['LINKFLAGS_cprogram'] = ['-Wl,-brtl']
v['LINKFLAGS_cshlib'] = ['-shared','-Wl,-brtl,-bexpfull']
v['SHLIB_MARKER'] = []
@conf
def clang_modifier_hpux(conf):
v = conf.env
v['SHLIB_MARKER'] = []
v['CFLAGS_cshlib'] = ['-fPIC','-DPIC']
v['cshlib_PATTERN'] = 'lib%s.sl'
@conf
def clang_modifier_platform(conf):
"""Execute platform-specific functions based on *clang_modifier_+NAME*"""
# * set configurations specific for a platform.
# * the destination platform is detected automatically by looking at the macros the compiler predefines,
# and if it's not recognised, it fallbacks to sys.platform.
clang_modifier_func = getattr(conf, 'clang_modifier_' + conf.env.DEST_OS, None)
if clang_modifier_func:
clang_modifier_func()
def configure(conf):
"""
Configuration for clang
"""
conf.find_clang()
conf.find_ar()
conf.clang_common_flags()
conf.clang_modifier_platform()
conf.cc_load_tools()
conf.cc_add_flags()
conf.link_add_flags()<|fim▁end|>
| |
<|file_name|>subresource.py<|end_file_name|><|fim▁begin|>import os, sys, json, urlparse, urllib
def get_template(template_basename):
script_directory = os.path.dirname(os.path.abspath(__file__))
template_directory = os.path.abspath(os.path.join(script_directory,
"..",
"template"))
template_filename = os.path.join(template_directory, template_basename);
with open(template_filename, "r") as f:
return f.read()
# TODO(kristijanburnik): subdomain_prefix is a hardcoded value aligned with
# referrer-policy-test-case.js. The prefix should be configured in one place.
def get_swapped_origin_netloc(netloc, subdomain_prefix = "www1."):
if netloc.startswith(subdomain_prefix):
return netloc[len(subdomain_prefix):]
else:
return subdomain_prefix + netloc
def create_redirect_url(request, cross_origin = False):
parsed = urlparse.urlsplit(request.url)
destination_netloc = parsed.netloc
if cross_origin:<|fim▁hole|> scheme = parsed.scheme,
netloc = destination_netloc,
path = parsed.path,
query = None,
fragment = None))
return destination_url
def redirect(url, response):
response.add_required_headers = False
response.writer.write_status(301)
response.writer.write_header("access-control-allow-origin", "*")
response.writer.write_header("location", url)
response.writer.end_headers()
response.writer.write("")
def preprocess_redirection(request, response):
if "redirection" not in request.GET:
return False
redirection = request.GET["redirection"]
if redirection == "no-redirect":
return False
elif redirection == "keep-origin-redirect":
redirect_url = create_redirect_url(request, cross_origin = False)
elif redirection == "swap-origin-redirect":
redirect_url = create_redirect_url(request, cross_origin = True)
else:
raise ValueError("Invalid redirection type '%s'" % redirection)
redirect(redirect_url, response)
return True
def __noop(request, response):
return ""
def respond(request,
response,
status_code = 200,
content_type = "text/html",
payload_generator = __noop,
cache_control = "no-cache; must-revalidate",
access_control_allow_origin = "*"):
if preprocess_redirection(request, response):
return
response.add_required_headers = False
response.writer.write_status(status_code)
if access_control_allow_origin != None:
response.writer.write_header("access-control-allow-origin",
access_control_allow_origin)
response.writer.write_header("content-type", content_type)
response.writer.write_header("cache-control", cache_control)
response.writer.end_headers()
server_data = {"headers": json.dumps(request.headers, indent = 4)}
payload = payload_generator(server_data)
response.writer.write(payload)<|fim▁end|>
|
destination_netloc = get_swapped_origin_netloc(parsed.netloc)
destination_url = urlparse.urlunsplit(urlparse.SplitResult(
|
<|file_name|>0023_auto_20160505_1636.py<|end_file_name|><|fim▁begin|>from django.db import migrations
<|fim▁hole|>
operations = [
migrations.AlterModelOptions(
name="candidateresult",
options={"ordering": ("-num_ballots_reported",)},
)
]<|fim▁end|>
|
class Migration(migrations.Migration):
dependencies = [("uk_results", "0022_postresult_confirmed_resultset")]
|
<|file_name|>widgets.py<|end_file_name|><|fim▁begin|>""" Form widget classes """
from __future__ import absolute_import
from django.conf import settings
from django.forms.utils import flatatt
from django.forms.widgets import CheckboxInput
from django.urls import reverse
from django.utils.encoding import force_text
from django.utils.html import format_html
from django.utils.translation import ugettext as _
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangolib.markup import HTML, Text
<|fim▁hole|>
class TermsOfServiceCheckboxInput(CheckboxInput):
""" Renders a checkbox with a label linking to the terms of service. """
def render(self, name, value, attrs=None):
extra_attrs = attrs.copy()
extra_attrs.update({'type': 'checkbox', 'name': name})
final_attrs = self.build_attrs(self.attrs, extra_attrs=extra_attrs)
if self.check_test(value):
final_attrs['checked'] = 'checked'
if not (value is True or value is False or value is None or value == ''):
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_text(value)
# Translators: link_start and link_end are HTML tags for a link to the terms of service.
# platform_name is the name of this Open edX installation.
label = Text(_(
u'I, and my organization, accept the {link_start}{platform_name} API Terms of Service{link_end}.'
)).format(
platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME),
link_start=HTML(u'<a href="{url}" rel="noopener" target="_blank">').format(
url=reverse('api_admin:api-tos')
),
link_end=HTML('</a>'),
)
html = HTML(u'<input{{}} /> <label class="tos-checkbox-label" for="{id}">{label}</label>').format(
id=final_attrs['id'],
label=label
)
return format_html(html, flatatt(final_attrs))<|fim▁end|>
| |
<|file_name|>htmlmediaelement.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use audio_video_metadata;
use document_loader::{LoadBlocker, LoadType};
use dom::attr::Attr;
use dom::bindings::cell::DomRefCell;
use dom::bindings::codegen::Bindings::AttrBinding::AttrMethods;
use dom::bindings::codegen::Bindings::HTMLMediaElementBinding::CanPlayTypeResult;
use dom::bindings::codegen::Bindings::HTMLMediaElementBinding::HTMLMediaElementConstants;
use dom::bindings::codegen::Bindings::HTMLMediaElementBinding::HTMLMediaElementMethods;
use dom::bindings::codegen::Bindings::MediaErrorBinding::MediaErrorConstants::*;
use dom::bindings::codegen::Bindings::MediaErrorBinding::MediaErrorMethods;
use dom::bindings::codegen::InheritTypes::{ElementTypeId, HTMLElementTypeId};
use dom::bindings::codegen::InheritTypes::{HTMLMediaElementTypeId, NodeTypeId};
use dom::bindings::error::{Error, ErrorResult};
use dom::bindings::inheritance::Castable;
use dom::bindings::refcounted::Trusted;
use dom::bindings::reflector::DomObject;
use dom::bindings::root::{DomRoot, MutNullableDom};
use dom::bindings::str::DOMString;
use dom::document::Document;
use dom::element::{Element, AttributeMutation};
use dom::eventtarget::EventTarget;
use dom::htmlelement::HTMLElement;
use dom::htmlsourceelement::HTMLSourceElement;
use dom::mediaerror::MediaError;
use dom::node::{window_from_node, document_from_node, Node, UnbindContext};
use dom::promise::Promise;
use dom::virtualmethods::VirtualMethods;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
use ipc_channel::ipc;
use ipc_channel::router::ROUTER;
use microtask::{Microtask, MicrotaskRunnable};
use mime::{Mime, SubLevel, TopLevel};
use net_traits::{FetchResponseListener, FetchMetadata, Metadata, NetworkError};
use net_traits::request::{CredentialsMode, Destination, RequestInit, Type as RequestType};
use network_listener::{NetworkListener, PreInvoke};
use script_thread::ScriptThread;
use servo_url::ServoUrl;
use std::cell::Cell;
use std::collections::VecDeque;
use std::mem;
use std::rc::Rc;
use std::sync::{Arc, Mutex};
use task_source::TaskSource;
use time::{self, Timespec, Duration};
#[dom_struct]
// FIXME(nox): A lot of tasks queued for this element should probably be in the
// media element event task source.
pub struct HTMLMediaElement {
htmlelement: HTMLElement,
/// https://html.spec.whatwg.org/multipage/#dom-media-networkstate
network_state: Cell<NetworkState>,
/// https://html.spec.whatwg.org/multipage/#dom-media-readystate
ready_state: Cell<ReadyState>,
/// https://html.spec.whatwg.org/multipage/#dom-media-currentsrc
current_src: DomRefCell<String>,
/// Incremented whenever tasks associated with this element are cancelled.
generation_id: Cell<u32>,
/// https://html.spec.whatwg.org/multipage/#fire-loadeddata
///
/// Reset to false every time the load algorithm is invoked.
fired_loadeddata_event: Cell<bool>,
/// https://html.spec.whatwg.org/multipage/#dom-media-error
error: MutNullableDom<MediaError>,
/// https://html.spec.whatwg.org/multipage/#dom-media-paused
paused: Cell<bool>,
/// https://html.spec.whatwg.org/multipage/#attr-media-autoplay
autoplaying: Cell<bool>,
/// https://html.spec.whatwg.org/multipage/#delaying-the-load-event-flag
delaying_the_load_event_flag: DomRefCell<Option<LoadBlocker>>,
/// https://html.spec.whatwg.org/multipage/#list-of-pending-play-promises
#[ignore_heap_size_of = "promises are hard"]
pending_play_promises: DomRefCell<Vec<Rc<Promise>>>,
/// Play promises which are soon to be fulfilled by a queued task.
#[ignore_heap_size_of = "promises are hard"]
in_flight_play_promises_queue: DomRefCell<VecDeque<(Box<[Rc<Promise>]>, ErrorResult)>>,
/// The details of the video currently related to this media element.
// FIXME(nox): Why isn't this in HTMLVideoElement?
video: DomRefCell<Option<VideoMedia>>,
}
/// https://html.spec.whatwg.org/multipage/#dom-media-networkstate
#[derive(Clone, Copy, HeapSizeOf, JSTraceable, PartialEq)]
#[repr(u8)]
pub enum NetworkState {
Empty = HTMLMediaElementConstants::NETWORK_EMPTY as u8,
Idle = HTMLMediaElementConstants::NETWORK_IDLE as u8,
Loading = HTMLMediaElementConstants::NETWORK_LOADING as u8,
NoSource = HTMLMediaElementConstants::NETWORK_NO_SOURCE as u8,
}
/// https://html.spec.whatwg.org/multipage/#dom-media-readystate
#[derive(Clone, Copy, HeapSizeOf, JSTraceable, PartialEq, PartialOrd)]
#[repr(u8)]
enum ReadyState {
HaveNothing = HTMLMediaElementConstants::HAVE_NOTHING as u8,
HaveMetadata = HTMLMediaElementConstants::HAVE_METADATA as u8,
HaveCurrentData = HTMLMediaElementConstants::HAVE_CURRENT_DATA as u8,
HaveFutureData = HTMLMediaElementConstants::HAVE_FUTURE_DATA as u8,
HaveEnoughData = HTMLMediaElementConstants::HAVE_ENOUGH_DATA as u8,
}
#[derive(HeapSizeOf, JSTraceable)]
pub struct VideoMedia {
format: String,
#[ignore_heap_size_of = "defined in time"]
duration: Duration,
width: u32,
height: u32,
video: String,
audio: Option<String>,
}
impl HTMLMediaElement {
pub fn new_inherited(
tag_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> Self {
Self {
htmlelement: HTMLElement::new_inherited(tag_name, prefix, document),
network_state: Cell::new(NetworkState::Empty),
ready_state: Cell::new(ReadyState::HaveNothing),
current_src: DomRefCell::new("".to_owned()),
generation_id: Cell::new(0),
fired_loadeddata_event: Cell::new(false),
error: Default::default(),
paused: Cell::new(true),
// FIXME(nox): Why is this initialised to true?
autoplaying: Cell::new(true),
delaying_the_load_event_flag: Default::default(),
pending_play_promises: Default::default(),
in_flight_play_promises_queue: Default::default(),
video: DomRefCell::new(None),
}
}
fn media_type_id(&self) -> HTMLMediaElementTypeId {
match self.upcast::<Node>().type_id() {
NodeTypeId::Element(ElementTypeId::HTMLElement(
HTMLElementTypeId::HTMLMediaElement(media_type_id),
)) => {
media_type_id
},
_ => unreachable!(),
}
}
/// Marks that element as delaying the load event or not.
///
/// Nothing happens if the element was already delaying the load event and
/// we pass true to that method again.
///
/// https://html.spec.whatwg.org/multipage/#delaying-the-load-event-flag
fn delay_load_event(&self, delay: bool) {
let mut blocker = self.delaying_the_load_event_flag.borrow_mut();
if delay && blocker.is_none() {
*blocker = Some(LoadBlocker::new(&document_from_node(self), LoadType::Media));
} else if !delay && blocker.is_some() {
LoadBlocker::terminate(&mut *blocker);
}
}
/// https://html.spec.whatwg.org/multipage/#dom-media-play
// FIXME(nox): Move this back to HTMLMediaElementMethods::Play once
// Rc<Promise> doesn't require #[allow(unrooted_must_root)] anymore.
fn play(&self, promise: &Rc<Promise>) {
// Step 1.
// FIXME(nox): Reject promise if not allowed to play.
// Step 2.
if self.error.get().map_or(false, |e| e.Code() == MEDIA_ERR_SRC_NOT_SUPPORTED) {
promise.reject_error(Error::NotSupported);
return;
}
// Step 3.
self.push_pending_play_promise(promise);
// Step 4.
if self.network_state.get() == NetworkState::Empty {
self.invoke_resource_selection_algorithm();
}
// Step 5.
// FIXME(nox): Seek to earliest possible position if playback has ended
// and direction of playback is forwards.
let state = self.ready_state.get();
let window = window_from_node(self);
let task_source = window.dom_manipulation_task_source();
if self.Paused() {
// Step 6.1.
self.paused.set(false);
// Step 6.2.
// FIXME(nox): Set show poster flag to false and run time marches on
// steps if show poster flag is true.
// Step 6.3.
task_source.queue_simple_event(self.upcast(), atom!("play"), &window);
// Step 6.4.
match state {
ReadyState::HaveNothing |
ReadyState::HaveMetadata |
ReadyState::HaveCurrentData => {
task_source.queue_simple_event(
self.upcast(),
atom!("waiting"),
&window,
);
},
ReadyState::HaveFutureData |
ReadyState::HaveEnoughData => {
self.notify_about_playing();
}
}
} else if state == ReadyState::HaveFutureData || state == ReadyState::HaveEnoughData {
// Step 7.
self.take_pending_play_promises(Ok(()));
let this = Trusted::new(self);
let generation_id = self.generation_id.get();
task_source.queue(
task!(resolve_pending_play_promises: move || {
let this = this.root();
if generation_id != this.generation_id.get() {
return;
}
this.fulfill_in_flight_play_promises(|| ());
}),
window.upcast(),
).unwrap();
}
// Step 8.
self.autoplaying.set(false);
// Step 9.
// Not applicable here, the promise is returned from Play.
}
/// https://html.spec.whatwg.org/multipage/#internal-pause-steps
fn internal_pause_steps(&self) {
// Step 1.
self.autoplaying.set(false);
// Step 2.
if !self.Paused() {
// Step 2.1.
self.paused.set(true);
// Step 2.2.
self.take_pending_play_promises(Err(Error::Abort));
// Step 2.3.
let window = window_from_node(self);
let this = Trusted::new(self);
let generation_id = self.generation_id.get();
// FIXME(nox): Why are errors silenced here?
// FIXME(nox): Media element event task source should be used here.
let _ = window.dom_manipulation_task_source().queue(
task!(internal_pause_steps: move || {
let this = this.root();
if generation_id != this.generation_id.get() {
return;
}
this.fulfill_in_flight_play_promises(|| {
// Step 2.3.1.
this.upcast::<EventTarget>().fire_event(atom!("timeupdate"));
// Step 2.3.2.
this.upcast::<EventTarget>().fire_event(atom!("pause"));
// Step 2.3.3.
// Done after running this closure in
// `fulfill_in_flight_play_promises`.
});
}),
window.upcast(),
);
// Step 2.4.
// FIXME(nox): Set the official playback position to the current
// playback position.
}
}
// https://html.spec.whatwg.org/multipage/#notify-about-playing
fn notify_about_playing(&self) {
// Step 1.
self.take_pending_play_promises(Ok(()));
// Step 2.
let window = window_from_node(self);
let this = Trusted::new(self);
let generation_id = self.generation_id.get();
// FIXME(nox): Why are errors silenced here?
// FIXME(nox): Media element event task source should be used here.
let _ = window.dom_manipulation_task_source().queue(
task!(notify_about_playing: move || {
let this = this.root();
if generation_id != this.generation_id.get() {
return;
}
this.fulfill_in_flight_play_promises(|| {
// Step 2.1.
this.upcast::<EventTarget>().fire_event(atom!("playing"));
// Step 2.2.
// Done after running this closure in
// `fulfill_in_flight_play_promises`.
});
}),
window.upcast(),
);
}
// https://html.spec.whatwg.org/multipage/#ready-states
fn change_ready_state(&self, ready_state: ReadyState) {
let old_ready_state = self.ready_state.get();
self.ready_state.set(ready_state);
if self.network_state.get() == NetworkState::Empty {
return;
}
let window = window_from_node(self);
let task_source = window.dom_manipulation_task_source();
// Step 1.
match (old_ready_state, ready_state) {
(ReadyState::HaveNothing, ReadyState::HaveMetadata) => {
task_source.queue_simple_event(
self.upcast(),
atom!("loadedmetadata"),
&window,
);
// No other steps are applicable in this case.
return;
},
(ReadyState::HaveMetadata, new) if new >= ReadyState::HaveCurrentData => {
if !self.fired_loadeddata_event.get() {
self.fired_loadeddata_event.set(true);
let this = Trusted::new(self);
// FIXME(nox): Why are errors silenced here?
let _ = task_source.queue(
task!(media_reached_current_data: move || {
let this = this.root();
this.upcast::<EventTarget>().fire_event(atom!("loadeddata"));
this.delay_load_event(false);
}),
window.upcast(),
);
}
// Steps for the transition from HaveMetadata to HaveCurrentData
// or HaveFutureData also apply here, as per the next match
// expression.
},
(ReadyState::HaveFutureData, new) if new <= ReadyState::HaveCurrentData => {
// FIXME(nox): Queue a task to fire timeupdate and waiting
// events if the conditions call from the spec are met.
// No other steps are applicable in this case.
return;
},
_ => (),
}
if old_ready_state <= ReadyState::HaveCurrentData && ready_state >= ReadyState::HaveFutureData {
task_source.queue_simple_event(
self.upcast(),
atom!("canplay"),
&window,
);
if !self.Paused() {
self.notify_about_playing();
}
}
if ready_state == ReadyState::HaveEnoughData {
// TODO: Check sandboxed automatic features browsing context flag.
// FIXME(nox): I have no idea what this TODO is about.
// FIXME(nox): Review this block.
if self.autoplaying.get() &&
self.Paused() &&
self.Autoplay() {
// Step 1
self.paused.set(false);
// TODO step 2: show poster
// Step 3
task_source.queue_simple_event(
self.upcast(),
atom!("play"),
&window,
);
// Step 4
self.notify_about_playing();
// Step 5
self.autoplaying.set(false);
}
// FIXME(nox): According to the spec, this should come *before* the
// "play" event.
task_source.queue_simple_event(
self.upcast(),
atom!("canplaythrough"),
&window,
);
}
}
// https://html.spec.whatwg.org/multipage/#concept-media-load-algorithm
fn invoke_resource_selection_algorithm(&self) {
// Step 1.
self.network_state.set(NetworkState::NoSource);
// Step 2.
// FIXME(nox): Set show poster flag to true.
// Step 3.
self.delay_load_event(true);
// Step 4.
// If the resource selection mode in the synchronous section is
// "attribute", the URL of the resource to fetch is relative to the
// media element's node document when the src attribute was last
// changed, which is why we need to pass the base URL in the task
// right here.
let doc = document_from_node(self);
let task = MediaElementMicrotask::ResourceSelectionTask {
elem: DomRoot::from_ref(self),
base_url: doc.base_url()
};
// FIXME(nox): This will later call the resource_selection_algorith_sync
// method from below, if microtasks were trait objects, we would be able
// to put the code directly in this method, without the boilerplate
// indirections.
ScriptThread::await_stable_state(Microtask::MediaElement(task));
}
// https://html.spec.whatwg.org/multipage/#concept-media-load-algorithm
fn resource_selection_algorithm_sync(&self, base_url: ServoUrl) {
// Step 5.
// FIXME(nox): Maybe populate the list of pending text tracks.
// Step 6.
enum Mode {
// FIXME(nox): Support media object provider.
#[allow(dead_code)]
Object,
Attribute(String),
Children(DomRoot<HTMLSourceElement>),
}
fn mode(media: &HTMLMediaElement) -> Option<Mode> {
if let Some(attr) = media.upcast::<Element>().get_attribute(&ns!(), &local_name!("src")) {
return Some(Mode::Attribute(attr.Value().into()));
}
let source_child_element = media.upcast::<Node>()
.children()
.filter_map(DomRoot::downcast::<HTMLSourceElement>)
.next();
if let Some(element) = source_child_element {
return Some(Mode::Children(element));
}
None
}
let mode = if let Some(mode) = mode(self) {
mode
} else {
self.network_state.set(NetworkState::Empty);
// https://github.com/whatwg/html/issues/3065
self.delay_load_event(false);
return;
};
// Step 7.
self.network_state.set(NetworkState::Loading);
// Step 8.
let window = window_from_node(self);
window.dom_manipulation_task_source().queue_simple_event(
self.upcast(),
atom!("loadstart"),
&window,
);
// Step 9.
match mode {
// Step 9.obj.
Mode::Object => {
// Step 9.obj.1.
*self.current_src.borrow_mut() = "".to_owned();
// Step 9.obj.2.
// FIXME(nox): The rest of the steps should be ran in parallel.
// Step 9.obj.3.
// Note that the resource fetch algorithm itself takes care
// of the cleanup in case of failure itself.
// FIXME(nox): Pass the assigned media provider here.
self.resource_fetch_algorithm(Resource::Object);
},
Mode::Attribute(src) => {
// Step 9.attr.1.
if src.is_empty() {
self.queue_dedicated_media_source_failure_steps();
return;
}
// Step 9.attr.2.
let url_record = match base_url.join(&src) {
Ok(url) => url,
Err(_) => {
self.queue_dedicated_media_source_failure_steps();
return;
}
};
// Step 9.attr.3.
*self.current_src.borrow_mut() = url_record.as_str().into();
// Step 9.attr.4.
// Note that the resource fetch algorithm itself takes care
// of the cleanup in case of failure itself.
self.resource_fetch_algorithm(Resource::Url(url_record));
},
Mode::Children(_source) => {
// Step 9.children.
self.queue_dedicated_media_source_failure_steps()
},
}
}
// https://html.spec.whatwg.org/multipage/#concept-media-load-resource
fn resource_fetch_algorithm(&self, resource: Resource) {
// Steps 1-2.
// Unapplicable, the `resource` variable already conveys which mode
// is in use.
// Step 3.
// FIXME(nox): Remove all media-resource-specific text tracks.
// Step 4.
match resource {
Resource::Url(url) => {
// Step 4.remote.1.
if self.Preload() == "none" && !self.autoplaying.get() {
// Step 4.remote.1.1.
self.network_state.set(NetworkState::Idle);
// Step 4.remote.1.2.
let window = window_from_node(self);
window.dom_manipulation_task_source().queue_simple_event(
self.upcast(),
atom!("suspend"),
&window,
);
// Step 4.remote.1.3.
let this = Trusted::new(self);
window.dom_manipulation_task_source().queue(
task!(set_media_delay_load_event_flag_to_false: move || {
this.root().delay_load_event(false);
}),
window.upcast(),
).unwrap();
// Steps 4.remote.1.4.
// FIXME(nox): Somehow we should wait for the task from previous
// step to be ran before continuing.
// Steps 4.remote.1.5-4.remote.1.7.
// FIXME(nox): Wait for an implementation-defined event and
// then continue with the normal set of steps instead of just
// returning.
return;
}
// Step 4.remote.2.
// FIXME(nox): Handle CORS setting from crossorigin attribute.
let document = document_from_node(self);
let type_ = match self.media_type_id() {
HTMLMediaElementTypeId::HTMLAudioElement => RequestType::Audio,
HTMLMediaElementTypeId::HTMLVideoElement => RequestType::Video,
};
let request = RequestInit {
url,
type_,
destination: Destination::Media,
credentials_mode: CredentialsMode::Include,
use_url_credentials: true,
origin: document.origin().immutable().clone(),
pipeline_id: Some(self.global().pipeline_id()),
referrer_url: Some(document.url()),
referrer_policy: document.get_referrer_policy(),
.. RequestInit::default()
};
let context = Arc::new(Mutex::new(HTMLMediaElementContext::new(self)));
let (action_sender, action_receiver) = ipc::channel().unwrap();
let window = window_from_node(self);
let listener = NetworkListener {
context: context,
task_source: window.networking_task_source(),
canceller: Some(window.task_canceller())
};
ROUTER.add_route(action_receiver.to_opaque(), box move |message| {
listener.notify_fetch(message.to().unwrap());
});
document.loader().fetch_async_background(request, action_sender);
},
Resource::Object => {
// FIXME(nox): Use the current media resource.
self.queue_dedicated_media_source_failure_steps();
},
}
}
/// Queues a task to run the [dedicated media source failure steps][steps].
///
/// [steps]: https://html.spec.whatwg.org/multipage/#dedicated-media-source-failure-steps
fn queue_dedicated_media_source_failure_steps(&self) {
let window = window_from_node(self);
let this = Trusted::new(self);
let generation_id = self.generation_id.get();
self.take_pending_play_promises(Err(Error::NotSupported));
// FIXME(nox): Why are errors silenced here?
// FIXME(nox): Media element event task source should be used here.
let _ = window.dom_manipulation_task_source().queue(
task!(dedicated_media_source_failure_steps: move || {
let this = this.root();
if generation_id != this.generation_id.get() {
return;
}
this.fulfill_in_flight_play_promises(|| {
// Step 1.
this.error.set(Some(&*MediaError::new(
&window_from_node(&*this),
MEDIA_ERR_SRC_NOT_SUPPORTED,
)));
// Step 2.
// FIXME(nox): Forget the media-resource-specific tracks.
// Step 3.
this.network_state.set(NetworkState::NoSource);
// Step 4.
// FIXME(nox): Set show poster flag to true.
// Step 5.
this.upcast::<EventTarget>().fire_event(atom!("error"));
// Step 6.
// Done after running this closure in
// `fulfill_in_flight_play_promises`.
});
// Step 7.
this.delay_load_event(false);
}),
window.upcast(),
);
}
// https://html.spec.whatwg.org/multipage/#media-element-load-algorithm
fn media_element_load_algorithm(&self) {
// Reset the flag that signals whether loadeddata was ever fired for
// this invokation of the load algorithm.
self.fired_loadeddata_event.set(false);
// Step 1.
// FIXME(nox): Abort any already-running instance of the
// resource selection algorithm.
// Steps 2-4.
self.generation_id.set(self.generation_id.get() + 1);
while !self.in_flight_play_promises_queue.borrow().is_empty() {
self.fulfill_in_flight_play_promises(|| ());
}
let window = window_from_node(self);
let task_source = window.dom_manipulation_task_source();
// Step 5.
let network_state = self.network_state.get();
if network_state == NetworkState::Loading || network_state == NetworkState::Idle {
task_source.queue_simple_event(self.upcast(), atom!("abort"), &window);
}
// Step 6.
if network_state != NetworkState::Empty {
// Step 6.1.
task_source.queue_simple_event(self.upcast(), atom!("emptied"), &window);
// Step 6.2.
// FIXME(nox): Abort in-progress fetching process.
// Step 6.3.
// FIXME(nox): Detach MediaSource media provider object.
// Step 6.4.
// FIXME(nox): Forget the media-resource-specific tracks.
// Step 6.5.
if self.ready_state.get() != ReadyState::HaveNothing {
self.change_ready_state(ReadyState::HaveNothing);
}
// Step 6.6.
if !self.Paused() {
// Step 6.6.1.
self.paused.set(true);
// Step 6.6.2.
self.take_pending_play_promises(Err(Error::Abort));
self.fulfill_in_flight_play_promises(|| ());
}
// Step 6.7.
// FIXME(nox): If seeking is true, set it to false.
// Step 6.8.
// FIXME(nox): Set current and official playback position to 0 and
// maybe queue a task to fire a timeupdate event.
// Step 6.9.
// FIXME(nox): Set timeline offset to NaN.
// Step 6.10.
// FIXME(nox): Set duration to NaN.
}
// Step 7.
// FIXME(nox): Set playbackRate to defaultPlaybackRate.
// Step 8.
self.error.set(None);
self.autoplaying.set(true);
// Step 9.
self.invoke_resource_selection_algorithm();
// Step 10.
// FIXME(nox): Stop playback of any previously running media resource.
}
/// Appends a promise to the list of pending play promises.
#[allow(unrooted_must_root)]
fn push_pending_play_promise(&self, promise: &Rc<Promise>) {
self.pending_play_promises.borrow_mut().push(promise.clone());
}
/// Takes the pending play promises.
///
/// The result with which these promises will be fulfilled is passed here
/// and this method returns nothing because we actually just move the
/// current list of pending play promises to the
/// `in_flight_play_promises_queue` field.
///
/// Each call to this method must be followed by a call to
/// `fulfill_in_flight_play_promises`, to actually fulfill the promises
/// which were taken and moved to the in-flight queue.
#[allow(unrooted_must_root)]
fn take_pending_play_promises(&self, result: ErrorResult) {
let pending_play_promises = mem::replace(
&mut *self.pending_play_promises.borrow_mut(),
vec![],
);
self.in_flight_play_promises_queue.borrow_mut().push_back((
pending_play_promises.into(),
result,
));
}
/// Fulfills the next in-flight play promises queue after running a closure.
///
/// See the comment on `take_pending_play_promises` for why this method
/// does not take a list of promises to fulfill. Callers cannot just pop
/// the front list off of `in_flight_play_promises_queue` and later fulfill
/// the promises because that would mean putting
/// `#[allow(unrooted_must_root)]` on even more functions, potentially
/// hiding actual safety bugs.
#[allow(unrooted_must_root)]
fn fulfill_in_flight_play_promises<F>(&self, f: F)
where
F: FnOnce(),
{
let (promises, result) = self.in_flight_play_promises_queue
.borrow_mut()
.pop_front()
.expect("there should be at least one list of in flight play promises");
f();
for promise in &*promises {
match result {
Ok(ref value) => promise.resolve_native(value),
Err(ref error) => promise.reject_error(error.clone()),
}
}
}
/// Handles insertion of `source` children.
///
/// https://html.spec.whatwg.org/multipage/#the-source-element:nodes-are-inserted
pub fn handle_source_child_insertion(&self) {
if self.upcast::<Element>().has_attribute(&local_name!("src")) {
return;
}
if self.network_state.get() != NetworkState::Empty {
return;
}
self.media_element_load_algorithm();
}
}
impl HTMLMediaElementMethods for HTMLMediaElement {
// https://html.spec.whatwg.org/multipage/#dom-media-networkstate
fn NetworkState(&self) -> u16 {
self.network_state.get() as u16
}
// https://html.spec.whatwg.org/multipage/#dom-media-readystate
fn ReadyState(&self) -> u16 {
self.ready_state.get() as u16
}
// https://html.spec.whatwg.org/multipage/#dom-media-autoplay
make_bool_getter!(Autoplay, "autoplay");
// https://html.spec.whatwg.org/multipage/#dom-media-autoplay
make_bool_setter!(SetAutoplay, "autoplay");
// https://html.spec.whatwg.org/multipage/#dom-media-src
make_url_getter!(Src, "src");
// https://html.spec.whatwg.org/multipage/#dom-media-src
make_setter!(SetSrc, "src");
// https://html.spec.whatwg.org/multipage/#attr-media-preload
// Missing value default is user-agent defined.
make_enumerated_getter!(Preload, "preload", "", "none" | "metadata" | "auto");
// https://html.spec.whatwg.org/multipage/#attr-media-preload
make_setter!(SetPreload, "preload");
// https://html.spec.whatwg.org/multipage/#dom-media-currentsrc
fn CurrentSrc(&self) -> DOMString {
DOMString::from(self.current_src.borrow().clone())
}
// https://html.spec.whatwg.org/multipage/#dom-media-load
fn Load(&self) {
self.media_element_load_algorithm();
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-canplaytype
fn CanPlayType(&self, type_: DOMString) -> CanPlayTypeResult {
match type_.parse::<Mime>() {
Ok(Mime(TopLevel::Application, SubLevel::OctetStream, _)) |
Err(_) => {
CanPlayTypeResult::_empty
},
_ => CanPlayTypeResult::Maybe
}
}
// https://html.spec.whatwg.org/multipage/#dom-media-error
fn GetError(&self) -> Option<DomRoot<MediaError>> {
self.error.get()
}
// https://html.spec.whatwg.org/multipage/#dom-media-play
#[allow(unrooted_must_root)]
fn Play(&self) -> Rc<Promise> {
let promise = Promise::new(&self.global());
self.play(&promise);
promise
}
// https://html.spec.whatwg.org/multipage/#dom-media-pause
fn Pause(&self) {
// Step 1
if self.network_state.get() == NetworkState::Empty {
self.invoke_resource_selection_algorithm();
}
// Step 2
self.internal_pause_steps();
}
// https://html.spec.whatwg.org/multipage/#dom-media-paused
fn Paused(&self) -> bool {
self.paused.get()
}
}
impl VirtualMethods for HTMLMediaElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn attribute_mutated(&self, attr: &Attr, mutation: AttributeMutation) {
self.super_type().unwrap().attribute_mutated(attr, mutation);
match attr.local_name() {
&local_name!("src") => {
if mutation.new_value(attr).is_some() {
self.media_element_load_algorithm();
}
}
_ => (),
};
}
// https://html.spec.whatwg.org/multipage/#playing-the-media-resource:remove-an-element-from-a-document
fn unbind_from_tree(&self, context: &UnbindContext) {
self.super_type().unwrap().unbind_from_tree(context);
if context.tree_in_doc {
let task = MediaElementMicrotask::PauseIfNotInDocumentTask {
elem: DomRoot::from_ref(self)
};
ScriptThread::await_stable_state(Microtask::MediaElement(task));
}
}
}
#[derive(HeapSizeOf, JSTraceable)]
pub enum MediaElementMicrotask {
ResourceSelectionTask {
elem: DomRoot<HTMLMediaElement>,
base_url: ServoUrl
},
PauseIfNotInDocumentTask {
elem: DomRoot<HTMLMediaElement>,
}
}
impl MicrotaskRunnable for MediaElementMicrotask {
fn handler(&self) {
match self {
&MediaElementMicrotask::ResourceSelectionTask { ref elem, ref base_url } => {
elem.resource_selection_algorithm_sync(base_url.clone());
},
&MediaElementMicrotask::PauseIfNotInDocumentTask { ref elem } => {
if !elem.upcast::<Node>().is_in_doc() {
elem.internal_pause_steps();
}
},
}
}
}
enum Resource {
Object,
Url(ServoUrl),
}<|fim▁hole|>
struct HTMLMediaElementContext {
/// The element that initiated the request.
elem: Trusted<HTMLMediaElement>,
/// The response body received to date.
data: Vec<u8>,
/// The response metadata received to date.
metadata: Option<Metadata>,
/// The generation of the media element when this fetch started.
generation_id: u32,
/// Time of last progress notification.
next_progress_event: Timespec,
/// Whether the media metadata has been completely received.
have_metadata: bool,
/// True if this response is invalid and should be ignored.
ignore_response: bool,
}
// https://html.spec.whatwg.org/multipage/#media-data-processing-steps-list
impl FetchResponseListener for HTMLMediaElementContext {
fn process_request_body(&mut self) {}
fn process_request_eof(&mut self) {}
fn process_response(&mut self, metadata: Result<FetchMetadata, NetworkError>) {
self.metadata = metadata.ok().map(|m| {
match m {
FetchMetadata::Unfiltered(m) => m,
FetchMetadata::Filtered { unsafe_, .. } => unsafe_
}
});
let status_is_ok = self.metadata.as_ref()
.and_then(|m| m.status.as_ref())
.map_or(true, |s| s.0 >= 200 && s.0 < 300);
// => "If the media data cannot be fetched at all..."
if !status_is_ok {
// Ensure that the element doesn't receive any further notifications
// of the aborted fetch.
self.ignore_response = true;
self.elem.root().queue_dedicated_media_source_failure_steps();
}
}
fn process_response_chunk(&mut self, mut payload: Vec<u8>) {
if self.ignore_response {
// An error was received previously, skip processing the payload.
return;
}
self.data.append(&mut payload);
let elem = self.elem.root();
// https://html.spec.whatwg.org/multipage/#media-data-processing-steps-list
// => "Once enough of the media data has been fetched to determine the duration..."
if !self.have_metadata {
self.check_metadata(&elem);
} else {
elem.change_ready_state(ReadyState::HaveCurrentData);
}
// https://html.spec.whatwg.org/multipage/#concept-media-load-resource step 4,
// => "If mode is remote" step 2
if time::get_time() > self.next_progress_event {
let window = window_from_node(&*elem);
window.dom_manipulation_task_source().queue_simple_event(
elem.upcast(),
atom!("progress"),
&window,
);
self.next_progress_event = time::get_time() + Duration::milliseconds(350);
}
}
// https://html.spec.whatwg.org/multipage/#media-data-processing-steps-list
fn process_response_eof(&mut self, status: Result<(), NetworkError>) {
if self.ignore_response {
// An error was received previously, skip processing the payload.
return;
}
let elem = self.elem.root();
// => "If the media data can be fetched but is found by inspection to be in an unsupported
// format, or can otherwise not be rendered at all"
if !self.have_metadata {
elem.queue_dedicated_media_source_failure_steps();
}
// => "Once the entire media resource has been fetched..."
else if status.is_ok() {
elem.change_ready_state(ReadyState::HaveEnoughData);
elem.upcast::<EventTarget>().fire_event(atom!("progress"));
elem.network_state.set(NetworkState::Idle);
elem.upcast::<EventTarget>().fire_event(atom!("suspend"));
}
// => "If the connection is interrupted after some media data has been received..."
else if elem.ready_state.get() != ReadyState::HaveNothing {
// Step 2
elem.error.set(Some(&*MediaError::new(&*window_from_node(&*elem),
MEDIA_ERR_NETWORK)));
// Step 3
elem.network_state.set(NetworkState::Idle);
// Step 4.
elem.delay_load_event(false);
// Step 5
elem.upcast::<EventTarget>().fire_event(atom!("error"));
} else {
// => "If the media data cannot be fetched at all..."
elem.queue_dedicated_media_source_failure_steps();
}
}
}
impl PreInvoke for HTMLMediaElementContext {
fn should_invoke(&self) -> bool {
//TODO: finish_load needs to run at some point if the generation changes.
self.elem.root().generation_id.get() == self.generation_id
}
}
impl HTMLMediaElementContext {
fn new(elem: &HTMLMediaElement) -> HTMLMediaElementContext {
HTMLMediaElementContext {
elem: Trusted::new(elem),
data: vec![],
metadata: None,
generation_id: elem.generation_id.get(),
next_progress_event: time::get_time() + Duration::milliseconds(350),
have_metadata: false,
ignore_response: false,
}
}
fn check_metadata(&mut self, elem: &HTMLMediaElement) {
match audio_video_metadata::get_format_from_slice(&self.data) {
Ok(audio_video_metadata::Metadata::Video(meta)) => {
let dur = meta.audio.duration.unwrap_or(::std::time::Duration::new(0, 0));
*elem.video.borrow_mut() = Some(VideoMedia {
format: format!("{:?}", meta.format),
duration: Duration::seconds(dur.as_secs() as i64) +
Duration::nanoseconds(dur.subsec_nanos() as i64),
width: meta.dimensions.width,
height: meta.dimensions.height,
video: meta.video.unwrap_or("".to_owned()),
audio: meta.audio.audio,
});
// Step 6
elem.change_ready_state(ReadyState::HaveMetadata);
self.have_metadata = true;
}
_ => {}
}
}
}<|fim▁end|>
| |
<|file_name|>resize-base-min.js<|end_file_name|><|fim▁begin|>version https://git-lfs.github.com/spec/v1<|fim▁hole|>oid sha256:277086ec0fb49c6da22640a41bd9d52e5e3503cc15ae358cd76662b81439df83
size 10713<|fim▁end|>
| |
<|file_name|>media.go<|end_file_name|><|fim▁begin|>package responses
type MediaCommand struct {
Headers
MediaSessionID int `json:"mediaSessionId"`
}
type LoadMediaCommand struct {
Headers
Media MediaItem `json:"media"`
CurrentTime int `json:"currentTime"`
Autoplay bool `json:"autoplay"`
CustomData interface{} `json:"customData"`
}
type MediaItem struct {
ContentId string `json:"contentId"`
StreamType string `json:"streamType"`
ContentType string `json:"contentType"`
MetaData MediaItemMeta `json:"metadata"`
}
type MediaItemMeta struct {
Title string `json:"title"`
SubTitle string `json:"subtitle"`
Images []MediaItemMetaImage `json:"images"`
}
type MediaItemMetaImage struct {
Url string `json:"url"`
Width int `json:"width"`
Height int `json:"height"`
}
type MediaStatusMedia struct {
ContentId string `json:"contentId"`
StreamType string `json:"streamType"`
ContentType string `json:"contentType"`
Duration float64 `json:"duration"`
MetaData MediaItemMeta `json:"metadata"`
}
type MediaStatusResponse struct {
Headers
Status []*MediaStatus `json:"status,omitempty"`
}
type MediaStatus struct {
Headers
MediaSessionID int `json:"mediaSessionId"`
PlaybackRate float64 `json:"playbackRate"`
PlayerState string `json:"playerState"`
CurrentTime float64 `json:"currentTime"`
SupportedMediaCommands int `json:"supportedMediaCommands"`
Volume *Volume `json:"volume,omitempty"`<|fim▁hole|> CustomData map[string]interface{} `json:"customData"`
RepeatMode string `json:"repeatMode"`
IdleReason string `json:"idleReason"`
}
type SeekMediaCommand struct {
Headers
CurrentTime int `json:"currentTime"`
MediaSessionID int `json:"mediaSessionId"`
}<|fim▁end|>
|
Media *MediaStatusMedia `json:"media"`
|
<|file_name|>conf.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# python-gnupg documentation build configuration file, created by
# sphinx-quickstart on Fri Apr 5 22:38:47 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import psutil
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('./../'))
sys.path.insert(0, os.path.abspath('.'))
# -- Autodoc settings ----------------------------------------------------------
## trying to set this somewhere...
autodoc_member_order = 'bysource'
autodoc_default_flags = ['members', 'show-inheritance', 'undoc-members', 'show-hidden']
autoclass_content = 'both'
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.1'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'sphinx.ext.doctest',
'sphinxcontrib.fulltoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_static']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'gnupg'
copyright = u'2013-2014, Isis Agora Lovecruft'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from pretty_bad_protocol import gnupg
version = gnupg.__version__
# The full version, including alpha/beta/rc tags.
release = gnupg.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%d %B %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'monokai'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
# -- Options for HTML output ---------------------------------------------------<|fim▁hole|>html_theme = 'scrolls'
#html_theme = 'traditional'
#html_theme = 'nature'
#html_theme = 'pyramid'
html_theme = 'agogo'
#html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# 'stickysidebar': 'true',
# 'rightsidebar':'true',
'nosidebar': 'false',
# 'full_logo': 'false'
'sidebarwidth': '300'
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_static']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "gnupg: Python Module Documentation"
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%A, %d %B %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
html_file_suffix = '.html'
# Output file base name for HTML help builder.
htmlhelp_basename = 'gnupgdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'python-gnupg.tex', u'python-gnupg Documentation',
u'Isis Agora Lovecruft', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gnupg Python Module Docs', u'gnupg Python Module Documentation',
[u'Isis Agora Lovecruft'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'python-gnupg', u'python-gnupg Documentation',
u'Isis Agora Lovecruft', 'python-gnupg', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'python-gnupg'
epub_author = u'Isis Agora Lovecruft'
epub_publisher = u'Isis Agora Lovecruft'
epub_copyright = u'2013, Isis Agora Lovecruft'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True<|fim▁end|>
|
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
|
<|file_name|>test_wdm.py<|end_file_name|><|fim▁begin|><|fim▁hole|>import pprint
import shutil
import subprocess
import unittest
from pyspatialite import dbapi2 as db
import qgis.core # Need to import this before PyQt to ensure QGIS parts work
from PyQt4.QtSql import QSqlQuery, QSqlDatabase
from Roadnet.database import connect_and_open
from Roadnet.tests.integration.roadnet_test_cases import QgisTestCase
import Roadnet.roadnet_exceptions as rn_except
from Roadnet.ramp import wdm
from Roadnet.bin import shapefile_attributes
this_dir = os.path.dirname(os.path.abspath(__file__))
SQL_SCRIPT = """
INSERT INTO rdpoly VALUES (
1, 11, 1, 'CGWAY', 'LAR', NULL, NULL, NULL, NULL, NULL, NULL, NULL, 'C119/10', '/CGWAY/', '/CGWAY//', NULL, NULL, 11111,
GeomFromText("MULTIPOLYGON(((287500 691400, 287500 691500, 287600 691500, 287600 691400 )))", 27700) );
INSERT INTO rdpoly VALUES (
2, 11, 2, 'FTWAY', 'LAF', NULL, NULL, NULL, 'E', 1, 1, NULL, 'C119/10', '/FTWAY/1', '/FTWAY/E/1', NULL, NULL, 22222,
GeomFromText("MULTIPOLYGON(((288000 691400, 288000 691500, 288100 691500, 288100 691400 )))", 27700) );
INSERT INTO rdpoly VALUES (
3, 11, 3, 'FTWAY', 'LAF', NULL, NULL, NULL, 'E', 2, 2, NULL, 'C119/10', '/FTWAY/2', '/FTWAY/E/2', NULL, NULL, 33333,
GeomFromText("MULTIPOLYGON(((287500 691900, 287500 692000, 287600 692000, 287600 691900 )))", 27700) );
INSERT INTO rdpoly VALUES (
4, 11, 4, 'FTWAY', 'LAF', NULL, NULL, NULL, 'S', 1, 1, NULL, 'C119/20', '/FTWAY/1', '/FTWAY/S/1', NULL, NULL, 44444,
GeomFromText("MULTIPOLYGON(((287800 692200, 287800 692300, 287900 692300, 287900 692200 )))", 27700) );
INSERT INTO mcl VALUES (
1, 20574, NULL, 14305470, NULL, NULL, NULL, 'Grangemouth', NULL, NULL, NULL, NULL, NULL, 'F-5470', 60,
'Test MCL One',
NULL, 30, 'U', 'FT', 'Public', 11111, 'U', NULL, NULL,
GeomFromText("MULTILINESTRING((0 0,0 1,0 2))", 27700) );
INSERT INTO mcl VALUES (
2, 20573, NULL, 14305470, NULL, NULL, NULL, 'Grangemouth', NULL, NULL, NULL, NULL, NULL, 'F-5470', 50,
'Test MCL Two',
NULL, 30, 'U', 'FT', 'Public', 22222, 'U', NULL, NULL,
GeomFromText("MULTILINESTRING((293166.277 680074.52,293180.28 680074.606,293181.610 680074.83))", 27700) );
INSERT INTO mcl VALUES (
3, 18163, NULL, 14305470, NULL, NULL, NULL, 'Grangemouth', NULL, NULL, NULL, NULL, NULL, 'F-5470', 40,
'Test MCL Three',
NULL, 30, 'U', 'FT', 'Public', 33333, 'U', NULL, NULL,
GeomFromText("MULTILINESTRING((293141.8919999999 680074.376,293166.2779999999 680074.5219999999))", 27700) );
INSERT INTO mcl VALUES (
4, 18163, NULL, 14305470, NULL, NULL, NULL, 'Grangemouth', NULL, NULL, NULL, NULL, NULL, 'F-5470', 40,
'Test MCL Four',
NULL, 30, 'U', 'FT', 'Public', 44444, 'U', NULL, NULL,
GeomFromText("MULTILINESTRING((293141.8919999999 680074.376,293166.2779999999 680074.5219999999))", 27700) );
"""
class TestWDMExports(QgisTestCase):
empty_db_path = os.path.join('database_files', 'roadnet_empty.sqlite')
test_db_path = os.path.join(this_dir, 'roadnet_test.sqlite')
test_directory = os.path.join(this_dir, 'test_dir')
db = None
def setUp(self):
super(TestWDMExports, self).setUp()
# Make copy of empty database to work on
shutil.copy(self.empty_db_path, self.test_db_path)
# Populate with example data
conn = db.connect(self.test_db_path)
curs = conn.cursor()
try:
curs.executescript(SQL_SCRIPT)
finally:
conn.close()
# Open connection for tests
self.tidy()
os.makedirs(self.test_directory)
self.db = connect_and_open(self.test_db_path, 'integration_testing')
def tearDown(self):
super(TestWDMExports, self).tearDown()
if self.db: # Just in case self.db doesn't get set
self.db.close()
del self.db
QSqlDatabase.removeDatabase('integration_testing')
if os.path.exists(self.test_db_path):
os.remove(self.test_db_path)
def tidy(self):
shutil.rmtree(self.test_directory, ignore_errors=True)
def test_query_db_for_features_success(self):
# Arrange and Act
q = wdm.query_db_for_features('FTWAY', self.db)
# Assert
try:
self.assertTrue(isinstance(q, QSqlQuery),
"An active QSqlQuery wasn't returned ({})".format(type(q)))
finally:
q.finish()
del q
def test_ftway_export_returns_three_features(self):
# Arrange
features_query = wdm.query_db_for_features('FTWAY', self.db)
vlayer = wdm.create_temp_layer_in_memory()
# Act
wdm.add_features_to_vlayer(features_query, vlayer)
# Assert
expected = 3
count = vlayer.featureCount()
self.assertEqual(
expected, count,
"Number of exported FTWAY features was not {} ({})".format(expected, count))
@patch.object(rn_except.QMessageBoxWarningError, 'show_message_box')
def test_exported_attributes(self, mock_error):
# Arrange
outfile_names = {'CGWAY': 'RAMPEXPORT_Carriageway.shp',
'CYCLE': 'RAMPEXPORT_Cycleway_Path.shp',
'FTWAY': 'RAMPEXPORT_Footway.shp'}
expected_attributes = {
'CGWAY': [['1', 'CGWAY', 'LAR', '', '', '', '2.000000000000000',
'11111', '14305470', 'F-5470', '60', 'Test MCL One', '',
'30', 'U', 'U', '']],
'CYCLE': [],
'FTWAY': [
['2', 'FTWAY', 'LAF', 'E', '1', '1', '', '22222', '14305470',
'F-5470', '50', 'Test MCL Two', '', '30', 'U', 'U', ''],
['3', 'FTWAY', 'LAF', 'E', '2', '2', '', '33333', '14305470',
'F-5470', '40', 'Test MCL Three', '', '30', 'U', 'U', ''],
['4', 'FTWAY', 'LAF', 'S', '1', '1', '', '44444', '14305470',
'F-5470', '40', 'Test MCL Four', '', '30', 'U', 'U', '']]}
# Act
for element_type in outfile_names:
shapefile_path = os.path.join(self.test_directory,
outfile_names[element_type])
wdm.export(element_type, self.db, self.test_directory)
attr = shapefile_attributes.get_ogr2csv(shapefile_path)
# Assert
print("-------------")
print("Expected")
pprint.pprint(expected_attributes[element_type])
print("")
print("Actual")
pprint.pprint(attr)
print("-------------")
self.assertEqual(expected_attributes[element_type], attr)
def test_create_sql_command_without_length(self):
# Arrange
expected = """
SELECT AsBinary(rdpoly.geometry) AS geometry, rd_pol_id, element, hierarchy,
desc_2, desc_3, ref_3, currency_flag, feature_length, r_usrn,
mcl_ref, usrn, lor_ref_1, lor_ref_2, lor_desc, lane_number,
speed_limit, rural_urban_id, street_classification, carriageway
FROM rdpoly
LEFT OUTER JOIN mcl
ON rdpoly.mcl_cref = mcl.mcl_ref
WHERE element = "FTWAY"
AND rdpoly.symbol IN (11, 12);"""
# Act
sql = wdm.create_sql_command("FTWAY")
# Assert
self.assertEqual(expected, sql)
def test_create_sql_command_with_length(self):
# Arrange
expected = """
SELECT AsBinary(rdpoly.geometry) AS geometry, rd_pol_id, element, hierarchy,
desc_2, desc_3, ref_3, currency_flag, GLength(mcl.geometry) AS feature_length, r_usrn,
mcl_ref, usrn, lor_ref_1, lor_ref_2, lor_desc, lane_number,
speed_limit, rural_urban_id, street_classification, carriageway
FROM rdpoly
LEFT OUTER JOIN mcl
ON rdpoly.mcl_cref = mcl.mcl_ref
WHERE element = "CGWAY"
AND rdpoly.symbol IN (11, 12);"""
# Act
sql = wdm.create_sql_command("CGWAY")
# Assert
self.assertEqual(expected, sql)
def get_ogr_output_feature_count(shapefile_path):
cmd = ["ogrinfo", shapefile_path, "-al"]
ogr_output = subprocess.check_output(cmd)
for line in ogr_output.split('\n'):
if line.startswith("Feature Count"):
count = line.split(':')[1]
count = count.strip()
return int(count)
raise RuntimeError('Feature Count line not found in {}'.format(shapefile_path))
if __name__ == '__main__':
unittest.main()<|fim▁end|>
|
# -*- coding: utf-8 -*-
from mock import patch
import os
|
<|file_name|>configure.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python<|fim▁hole|># coding: utf8
import os
import subprocess
from '{% if cookiecutter.namespace %}{{ cookiecutter.namespace }}.{{ cookiecutter.project_slug }}{% else %}{{ cookiecutter.project_slug }}{% endif %}'.commands.base import BaseCommand
from '{% if cookiecutter.namespace %}{{ cookiecutter.namespace }}.{{ cookiecutter.project_slug }}{% else %}{{ cookiecutter.project_slug }}{% endif %}' import PROJECT_DIR
class Configure(BaseCommand):
def execute(self):
os.chdir(os.path.join(PROJECT_DIR, 'build'))
subprocess.run(['cmake', PROJECT_DIR])<|fim▁end|>
| |
<|file_name|>conf.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Clawpack documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 25 12:07:14 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.abspath('../..'))
sys.path.append(os.path.abspath('./ext'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.intersphinx','plot_directive','only_directives',
'sphinx.ext.inheritance_diagram']
# extensions.append('sphinx.ext.jsmath')
extensions.append('sphinx.ext.pngmath')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Clawpack'
copyright = u'2009, Randall J. LeVeque and others'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4.6'
# The full version, including alpha/beta/rc tags.
release = '4.6.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['users']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = 'math'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# html_style = 'mpl.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'clawlogo.jpg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'clawicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Clawpackdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'Clawpack.tex', ur'Clawpack Documentation',
ur'RJL', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/dev': None}
jsmath_path = 'jsmath/easy/load.js'<|fim▁hole|><|fim▁end|>
|
# jsmath_path = '_static/jsMath/easy/load.js'
keep_warnings = 'True'
|
<|file_name|>ModalContent.tsx<|end_file_name|><|fim▁begin|>/*
MIT License
Copyright (c) 2022 Looker Data Sciences, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
import type {
CompatibleHTMLProps,
PaddingProps,
SpacingSizes,
} from '@looker/design-tokens'
import type { Ref } from 'react'
import React, { forwardRef } from 'react'
import styled from 'styled-components'
import { OverflowShadow, useOverflow } from '../../utils'
export type ModalContentProps = CompatibleHTMLProps<HTMLDivElement> &
PaddingProps & {
/**
* If the Modal does not have a footer use this property to manually render padding
* at the bottom of the ModalContent. (`hasFooter={false}`)<|fim▁hole|> * @default true
*/
hasFooter?: boolean
/**
* If the Modal does not have a header use this property to manually render padding
* at the top of the ModalContent. (`hasHeader={false}`)
* @default true
*/
hasHeader?: boolean
}
type ModalContentPropsInternal = ModalContentProps & {
/**
* Used for vertical `y` padding when content does not have overflow and does have
* an adjacent footer or header.
* @private
* @default 'xxxsmall'
*/
overflowVerticalPadding?: SpacingSizes
}
const ModalContentLayout = forwardRef(
(
{
children,
hasFooter,
hasHeader,
pb,
pt,
py,
p,
overflowVerticalPadding = 'u05',
...props
}: ModalContentPropsInternal,
forwardedRef: Ref<HTMLDivElement>
) => {
const [hasOverflow, ref] = useOverflow(forwardedRef)
return (
<OverflowShadow
hasOverflow={hasOverflow}
ref={ref}
p={p}
pb={hasFooter && !hasOverflow ? overflowVerticalPadding : pb || py || p}
pt={hasHeader && !hasOverflow ? overflowVerticalPadding : pt || py || p}
{...props}
>
{children}
</OverflowShadow>
)
}
)
ModalContentLayout.displayName = 'ModalContentLayout'
export const ModalContent = styled(
ModalContentLayout
)<ModalContentPropsInternal>`
flex: 1 1 auto;
overflow: auto;
`<|fim▁end|>
| |
<|file_name|>virtual_network_peering.py<|end_file_name|><|fim▁begin|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class VirtualNetworkPeering(SubResource):
"""Peerings in a virtual network resource.
:param id: Resource ID.
:type id: str
:param allow_virtual_network_access: Whether the VMs in the linked virtual
network space would be able to access all the VMs in local Virtual network
space.
:type allow_virtual_network_access: bool
:param allow_forwarded_traffic: Whether the forwarded traffic from the VMs
in the remote virtual network will be allowed/disallowed.
:type allow_forwarded_traffic: bool
:param allow_gateway_transit: If gateway links can be used in remote
virtual networking to link to this virtual network.
:type allow_gateway_transit: bool
:param use_remote_gateways: If remote gateways can be used on this virtual
network. If the flag is set to true, and allowGatewayTransit on remote
peering is also true, virtual network will use gateways of remote virtual
network for transit. Only one peering can have this flag set to true. This
flag cannot be set if virtual network already has a gateway.
:type use_remote_gateways: bool
:param remote_virtual_network: The reference of the remote virtual
network.
:type remote_virtual_network: :class:`SubResource
<azure.mgmt.network.v2017_03_01.models.SubResource>`
:param peering_state: The status of the virtual network peering. Possible
values are 'Initiated', 'Connected', and 'Disconnected'. Possible values
include: 'Initiated', 'Connected', 'Disconnected'
:type peering_state: str or :class:`VirtualNetworkPeeringState
<azure.mgmt.network.v2017_03_01.models.VirtualNetworkPeeringState>`
:param provisioning_state: The provisioning state of the resource.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'allow_virtual_network_access': {'key': 'properties.allowVirtualNetworkAccess', 'type': 'bool'},
'allow_forwarded_traffic': {'key': 'properties.allowForwardedTraffic', 'type': 'bool'},
'allow_gateway_transit': {'key': 'properties.allowGatewayTransit', 'type': 'bool'},
'use_remote_gateways': {'key': 'properties.useRemoteGateways', 'type': 'bool'},
'remote_virtual_network': {'key': 'properties.remoteVirtualNetwork', 'type': 'SubResource'},<|fim▁hole|> }
def __init__(self, id=None, allow_virtual_network_access=None, allow_forwarded_traffic=None, allow_gateway_transit=None, use_remote_gateways=None, remote_virtual_network=None, peering_state=None, provisioning_state=None, name=None, etag=None):
super(VirtualNetworkPeering, self).__init__(id=id)
self.allow_virtual_network_access = allow_virtual_network_access
self.allow_forwarded_traffic = allow_forwarded_traffic
self.allow_gateway_transit = allow_gateway_transit
self.use_remote_gateways = use_remote_gateways
self.remote_virtual_network = remote_virtual_network
self.peering_state = peering_state
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag<|fim▁end|>
|
'peering_state': {'key': 'properties.peeringState', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
|
<|file_name|>logout.component.ts<|end_file_name|><|fim▁begin|>import { Component, OnInit } from '@angular/core';
import { Router } from '@angular/router';
import { CredentialsService } from './credentials.service';
import { AgriHub } from '../../global/agrihub';
@Component({<|fim▁hole|>export class LogoutComponent implements OnInit {
constructor(
private router: Router,
private credentialsService: CredentialsService
) {}
ngOnInit() {
this.credentialsService.deleteAll();
window.location.href = AgriHub.BASE_URL + 'login';
}
}<|fim▁end|>
|
selector: 'logout',
template: ''
})
|
<|file_name|>RemarksAPITest.cpp<|end_file_name|><|fim▁begin|>//===- unittest/Support/RemarksAPITest.cpp - C++ API tests ----------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "llvm/Remarks/Remark.h"
#include "llvm/Remarks/RemarkStringTable.h"
#include "gtest/gtest.h"
using namespace llvm;
TEST(RemarksAPI, Comparison) {
remarks::Remark R;
R.RemarkType = remarks::Type::Missed;
R.PassName = "pass";
R.RemarkName = "name";
R.FunctionName = "func";
R.Loc = remarks::RemarkLocation{"path", 3, 4};
R.Hotness = 5;
R.Args.emplace_back();
R.Args.back().Key = "key";
R.Args.back().Val = "value";
R.Args.emplace_back();
R.Args.back().Key = "keydebug";
R.Args.back().Val = "valuedebug";
R.Args.back().Loc = remarks::RemarkLocation{"argpath", 6, 7};
// Check that == works.
EXPECT_EQ(R, R);
// Check that != works.
remarks::Remark R2 = R.clone();
R2.FunctionName = "func0";
EXPECT_NE(R, R2);
// Check that we iterate through all the arguments.
remarks::Remark R3 = R.clone();
R3.Args.back().Val = "not";
EXPECT_NE(R, R3);
}
TEST(RemarksAPI, Clone) {
remarks::Remark R;
R.RemarkType = remarks::Type::Missed;
R.PassName = "pass";
R.RemarkName = "name";
R.FunctionName = "func";
R.Loc = remarks::RemarkLocation{"path", 3, 4};
R.Hotness = 5;
R.Args.emplace_back();
R.Args.back().Key = "key";
R.Args.back().Val = "value";
R.Args.emplace_back();
R.Args.back().Key = "keydebug";
R.Args.back().Val = "valuedebug";
R.Args.back().Loc = remarks::RemarkLocation{"argpath", 6, 7};
// Check that clone works.
remarks::Remark R2 = R.clone();
EXPECT_EQ(R, R2);
}
TEST(RemarksAPI, ArgsAsMsg) {
remarks::Remark R;
R.RemarkType = remarks::Type::Missed;
R.Args.emplace_back();
R.Args.back().Key = "key";
R.Args.back().Val = "can not do this ";
R.Args.emplace_back();
R.Args.back().Key = "keydebug";
R.Args.back().Val = "because of that.";
R.Args.back().Loc = remarks::RemarkLocation{"argpath", 6, 7};
EXPECT_EQ(R.getArgsAsMsg(), "can not do this because of that.");
}
TEST(RemarksAPI, StringTableInternalize) {
remarks::StringTable StrTab;
// Empty table.
EXPECT_EQ(StrTab.SerializedSize, 0UL);
remarks::Remark R;
R.RemarkType = remarks::Type::Missed;
R.PassName = "pass";
R.RemarkName = "name";
R.FunctionName = "func";
R.Loc = remarks::RemarkLocation{"path", 3, 4};
R.Args.emplace_back();
R.Args.back().Key = "keydebug";
R.Args.back().Val = "valuedebug";
R.Args.back().Loc = remarks::RemarkLocation{"argpath", 6, 7};
// Check that internalize starts using the strings from the string table.
remarks::Remark R2 = R.clone();
StrTab.internalize(R2);
// Check that the pointers in the remarks are different.
EXPECT_NE(R.PassName.data(), R2.PassName.data());
EXPECT_NE(R.RemarkName.data(), R2.RemarkName.data());
EXPECT_NE(R.FunctionName.data(), R2.FunctionName.data());<|fim▁hole|> EXPECT_NE(R.Args.back().Val.data(), R2.Args.back().Val.data());
EXPECT_NE(R.Args.back().Loc->SourceFilePath.data(),
R2.Args.back().Loc->SourceFilePath.data());
// Check that the internalized remark is using the pointers from the string table.
EXPECT_EQ(StrTab.add(R.PassName).second.data(), R2.PassName.data());
EXPECT_EQ(StrTab.add(R.RemarkName).second.data(), R2.RemarkName.data());
EXPECT_EQ(StrTab.add(R.FunctionName).second.data(), R2.FunctionName.data());
EXPECT_EQ(StrTab.add(R.Loc->SourceFilePath).second.data(),
R2.Loc->SourceFilePath.data());
EXPECT_EQ(StrTab.add(R.Args.back().Key).second.data(),
R2.Args.back().Key.data());
EXPECT_EQ(StrTab.add(R.Args.back().Val).second.data(),
R2.Args.back().Val.data());
EXPECT_EQ(StrTab.add(R.Args.back().Loc->SourceFilePath).second.data(),
R2.Args.back().Loc->SourceFilePath.data());
}<|fim▁end|>
|
EXPECT_NE(R.Loc->SourceFilePath.data(), R2.Loc->SourceFilePath.data());
EXPECT_NE(R.Args.back().Key.data(), R2.Args.back().Key.data());
|
<|file_name|>linked_list.rs<|end_file_name|><|fim▁begin|>use core::alloc::{GlobalAlloc, Layout};
use core::ptr::{self, NonNull};
use linked_list_allocator::Heap;
use spin::Mutex;
use crate::paging::{ActivePageTable, TableKind};
static HEAP: Mutex<Option<Heap>> = Mutex::new(None);
pub struct Allocator;
impl Allocator {
pub unsafe fn init(offset: usize, size: usize) {
*HEAP.lock() = Some(Heap::new(offset, size));
}
}
unsafe impl GlobalAlloc for Allocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
while let Some(ref mut heap) = *HEAP.lock() {
match heap.allocate_first_fit(layout) {<|fim▁hole|> heap.extend(crate::KERNEL_HEAP_SIZE);
},
other => return other.ok().map_or(ptr::null_mut(), |allocation| allocation.as_ptr()),
}
}
panic!("__rust_allocate: heap not initialized");
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
if let Some(ref mut heap) = *HEAP.lock() {
heap.deallocate(NonNull::new_unchecked(ptr), layout)
} else {
panic!("__rust_deallocate: heap not initialized");
}
}
}<|fim▁end|>
|
Err(()) => {
let size = heap.size();
super::map_heap(&mut ActivePageTable::new(TableKind::Kernel), crate::KERNEL_HEAP_OFFSET + size, crate::KERNEL_HEAP_SIZE);
|
<|file_name|>base.py<|end_file_name|><|fim▁begin|>"""
Provide basic components for groupby. These definitions
hold the allowlist of methods that are exposed on the
SeriesGroupBy and the DataFrameGroupBy objects.
"""
from __future__ import annotations
import dataclasses
from typing import Hashable
@dataclasses.dataclass(order=True, frozen=True)
class OutputKey:
label: Hashable
position: int
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
plotting_methods = frozenset(["plot", "hist"])
common_apply_allowlist = (
frozenset(
[
"quantile",
"fillna",
"mad",
"take",
"idxmax",
"idxmin",
"tshift",
"skew",
"corr",
"cov",
"diff",
]
)
| plotting_methods
)
series_apply_allowlist: frozenset[str] = (
common_apply_allowlist
| frozenset(
{"nlargest", "nsmallest", "is_monotonic_increasing", "is_monotonic_decreasing"}
)
) | frozenset(["dtype", "unique"])
dataframe_apply_allowlist: frozenset[str] = common_apply_allowlist | frozenset(
["dtypes", "corrwith"]
)
# cythonized transformations or canned "agg+broadcast", which do not
# require postprocessing of the result by transform.
cythonized_kernels = frozenset(["cumprod", "cumsum", "shift", "cummin", "cummax"])
# List of aggregation/reduction functions.
# These map each group to a single numeric value
reduction_kernels = frozenset(
[
"all",
"any",
"corrwith",
"count",
"first",
"idxmax",
"idxmin",
"last",
"mad",
"max",
"mean",
"median",
"min",
"ngroup",
"nth",
"nunique",
"prod",
# as long as `quantile`'s signature accepts only
# a single quantile value, it's a reduction.
# GH#27526 might change that.
"quantile",
"sem",
"size",
"skew",
"std",
"sum",
"var",
]
)
# List of transformation functions.
# a transformation is a function that, for each group,
# produces a result that has the same shape as the group.
transformation_kernels = frozenset(
[
"backfill",
"bfill",
"cumcount",
"cummax",
"cummin",
"cumprod",
"cumsum",
"diff",
"ffill",
"fillna",
"pad",
"pct_change",
"rank",
"shift",
"tshift",
]
)
# these are all the public methods on Grouper which don't belong
# in either of the above lists
groupby_other_methods = frozenset(
[
"agg",
"aggregate",
"apply",
"boxplot",
# corr and cov return ngroups*ncolumns rows, so they
# are neither a transformation nor a reduction
"corr",
"cov",
"describe",
"dtypes",
"expanding",
"ewm",
"filter",
"get_group",
"groups",
"head",
"hist",
"indices",
"ndim",
"ngroups",
"ohlc",
"pipe",
"plot",
"resample",
"rolling",
"tail",
"take",
"transform",<|fim▁hole|> "sample",
]
)
# Valid values of `name` for `groupby.transform(name)`
# NOTE: do NOT edit this directly. New additions should be inserted
# into the appropriate list above.
transform_kernel_allowlist = reduction_kernels | transformation_kernels<|fim▁end|>
| |
<|file_name|>subresources_test.go<|end_file_name|><|fim▁begin|>/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package integration
import (
"context"
"fmt"
"math"
"reflect"
"sort"
"strings"
"testing"
"time"
autoscaling "k8s.io/api/autoscaling/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apiserver/pkg/features"
genericfeatures "k8s.io/apiserver/pkg/features"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/dynamic"
featuregatetesting "k8s.io/component-base/featuregate/testing"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apiextensions-apiserver/test/integration/fixtures"
)
var labelSelectorPath = ".status.labelSelector"
var anotherLabelSelectorPath = ".status.anotherLabelSelector"
func NewNoxuSubresourcesCRDs(scope apiextensionsv1.ResourceScope) []*apiextensionsv1.CustomResourceDefinition {
return []*apiextensionsv1.CustomResourceDefinition{
// CRD that uses per-version subresources
{
ObjectMeta: metav1.ObjectMeta{Name: "noxus.mygroup.example.com"},
Spec: apiextensionsv1.CustomResourceDefinitionSpec{
Group: "mygroup.example.com",
Names: apiextensionsv1.CustomResourceDefinitionNames{
Plural: "noxus",
Singular: "nonenglishnoxu",
Kind: "WishIHadChosenNoxu",
ShortNames: []string{"foo", "bar", "abc", "def"},
ListKind: "NoxuItemList",
},
Scope: scope,
Versions: []apiextensionsv1.CustomResourceDefinitionVersion{
{
Name: "v1beta1",
Served: true,
Storage: true,
Subresources: &apiextensionsv1.CustomResourceSubresources{
Status: &apiextensionsv1.CustomResourceSubresourceStatus{},
Scale: &apiextensionsv1.CustomResourceSubresourceScale{
SpecReplicasPath: ".spec.replicas",
StatusReplicasPath: ".status.replicas",
LabelSelectorPath: &labelSelectorPath,
},
},
Schema: fixtures.AllowAllSchema(),
},
{
Name: "v1",
Served: true,
Storage: false,
Subresources: &apiextensionsv1.CustomResourceSubresources{
Status: &apiextensionsv1.CustomResourceSubresourceStatus{},
Scale: &apiextensionsv1.CustomResourceSubresourceScale{
SpecReplicasPath: ".spec.replicas",
StatusReplicasPath: ".status.replicas",
LabelSelectorPath: &anotherLabelSelectorPath,
},
},
Schema: fixtures.AllowAllSchema(),
},
},
},
},
}
}
func NewNoxuSubresourceInstance(namespace, name, version string) *unstructured.Unstructured {
return &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": fmt.Sprintf("mygroup.example.com/%s", version),
"kind": "WishIHadChosenNoxu",
"metadata": map[string]interface{}{
"namespace": namespace,
"name": name,
},
"spec": map[string]interface{}{
"num": int64(10),
"replicas": int64(3),
},
"status": map[string]interface{}{
"replicas": int64(7),
},
},
}
}
func NewNoxuSubresourceInstanceWithReplicas(namespace, name, version, replicasField string) *unstructured.Unstructured {
return &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": fmt.Sprintf("mygroup.example.com/%s", version),
"kind": "WishIHadChosenNoxu",
"metadata": map[string]interface{}{
"namespace": namespace,
"name": name,
},
"spec": map[string]interface{}{
"num": int64(10),
replicasField: int64(3),
},
"status": map[string]interface{}{
"replicas": int64(7),
},
},
}
}
func TestStatusSubresource(t *testing.T) {
tearDown, apiExtensionClient, dynamicClient, err := fixtures.StartDefaultServerWithClients(t)
if err != nil {
t.Fatal(err)
}
defer tearDown()
noxuDefinitions := NewNoxuSubresourcesCRDs(apiextensionsv1.NamespaceScoped)
for _, noxuDefinition := range noxuDefinitions {
noxuDefinition, err = fixtures.CreateNewV1CustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient)
if err != nil {
t.Fatal(err)
}
ns := "not-the-default"
for _, v := range noxuDefinition.Spec.Versions {
noxuResourceClient := newNamespacedCustomResourceVersionedClient(ns, dynamicClient, noxuDefinition, v.Name)
_, err = instantiateVersionedCustomResource(t, NewNoxuSubresourceInstance(ns, "foo", v.Name), noxuResourceClient, noxuDefinition, v.Name)
if err != nil {
t.Fatalf("unable to create noxu instance: %v", err)
}
gottenNoxuInstance, err := noxuResourceClient.Get(context.TODO(), "foo", metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
// status should not be set after creation
if val, ok := gottenNoxuInstance.Object["status"]; ok {
t.Fatalf("status should not be set after creation, got %v", val)
}
// .status.num = 20
err = unstructured.SetNestedField(gottenNoxuInstance.Object, int64(20), "status", "num")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// .spec.num = 20
err = unstructured.SetNestedField(gottenNoxuInstance.Object, int64(20), "spec", "num")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// UpdateStatus should not update spec.
// Check that .spec.num = 10 and .status.num = 20
updatedStatusInstance, err := noxuResourceClient.UpdateStatus(context.TODO(), gottenNoxuInstance, metav1.UpdateOptions{})
if err != nil {
t.Fatalf("unable to update status: %v", err)
}
specNum, found, err := unstructured.NestedInt64(updatedStatusInstance.Object, "spec", "num")
if !found || err != nil {
t.Fatalf("unable to get .spec.num")
}
if specNum != int64(10) {
t.Fatalf(".spec.num: expected: %v, got: %v", int64(10), specNum)
}
statusNum, found, err := unstructured.NestedInt64(updatedStatusInstance.Object, "status", "num")
if !found || err != nil {
t.Fatalf("unable to get .status.num")
}
if statusNum != int64(20) {
t.Fatalf(".status.num: expected: %v, got: %v", int64(20), statusNum)
}
gottenNoxuInstance, err = noxuResourceClient.Get(context.TODO(), "foo", metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
// .status.num = 40
err = unstructured.SetNestedField(gottenNoxuInstance.Object, int64(40), "status", "num")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// .spec.num = 40
err = unstructured.SetNestedField(gottenNoxuInstance.Object, int64(40), "spec", "num")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// Update should not update status.
// Check that .spec.num = 40 and .status.num = 20
updatedInstance, err := noxuResourceClient.Update(context.TODO(), gottenNoxuInstance, metav1.UpdateOptions{})
if err != nil {
t.Fatalf("unable to update instance: %v", err)
}
specNum, found, err = unstructured.NestedInt64(updatedInstance.Object, "spec", "num")
if !found || err != nil {
t.Fatalf("unable to get .spec.num")
}
if specNum != int64(40) {
t.Fatalf(".spec.num: expected: %v, got: %v", int64(40), specNum)
}
statusNum, found, err = unstructured.NestedInt64(updatedInstance.Object, "status", "num")
if !found || err != nil {
t.Fatalf("unable to get .status.num")
}
if statusNum != int64(20) {
t.Fatalf(".status.num: expected: %v, got: %v", int64(20), statusNum)
}
noxuResourceClient.Delete(context.TODO(), "foo", metav1.DeleteOptions{})
}
if err := fixtures.DeleteV1CustomResourceDefinition(noxuDefinition, apiExtensionClient); err != nil {
t.Fatal(err)
}
}
}
func TestScaleSubresource(t *testing.T) {
groupResource := schema.GroupResource{
Group: "mygroup.example.com",
Resource: "noxus",
}
tearDown, config, _, err := fixtures.StartDefaultServer(t)
if err != nil {
t.Fatal(err)
}
defer tearDown()
apiExtensionClient, err := clientset.NewForConfig(config)
if err != nil {
t.Fatal(err)
}
dynamicClient, err := dynamic.NewForConfig(config)
if err != nil {
t.Fatal(err)
}
noxuDefinitions := NewNoxuSubresourcesCRDs(apiextensionsv1.NamespaceScoped)
for _, noxuDefinition := range noxuDefinitions {
for _, v := range noxuDefinition.Spec.Versions {
// Start with a new CRD, so that the object doesn't have resourceVersion
noxuDefinition := noxuDefinition.DeepCopy()
subresources, err := getSubresourcesForVersion(noxuDefinition, v.Name)
if err != nil {
t.Fatal(err)
}
// set invalid json path for specReplicasPath
subresources.Scale.SpecReplicasPath = "foo,bar"
_, err = fixtures.CreateNewV1CustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient)
if err == nil {
t.Fatalf("unexpected non-error: specReplicasPath should be a valid json path under .spec")
}
subresources.Scale.SpecReplicasPath = ".spec.replicas"
noxuDefinition, err = fixtures.CreateNewV1CustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient)
if err != nil {
t.Fatal(err)
}
ns := "not-the-default"
noxuResourceClient := newNamespacedCustomResourceVersionedClient(ns, dynamicClient, noxuDefinition, v.Name)
_, err = instantiateVersionedCustomResource(t, NewNoxuSubresourceInstance(ns, "foo", v.Name), noxuResourceClient, noxuDefinition, v.Name)
if err != nil {
t.Fatalf("unable to create noxu instance: %v", err)
}
scaleClient, err := fixtures.CreateNewVersionedScaleClient(noxuDefinition, config, v.Name)
if err != nil {
t.Fatal(err)
}
// set .status.labelSelector = bar
gottenNoxuInstance, err := noxuResourceClient.Get(context.TODO(), "foo", metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
err = unstructured.SetNestedField(gottenNoxuInstance.Object, "bar", strings.Split((*subresources.Scale.LabelSelectorPath)[1:], ".")...)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
_, err = noxuResourceClient.UpdateStatus(context.TODO(), gottenNoxuInstance, metav1.UpdateOptions{})
if err != nil {
t.Fatalf("unable to update status: %v", err)
}
// get the scale object
gottenScale, err := scaleClient.Scales("not-the-default").Get(context.TODO(), groupResource, "foo", metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
if gottenScale.Spec.Replicas != 3 {
t.Fatalf("Scale.Spec.Replicas: expected: %v, got: %v", 3, gottenScale.Spec.Replicas)
}
if gottenScale.Status.Selector != "bar" {
t.Fatalf("Scale.Status.Selector: expected: %v, got: %v", "bar", gottenScale.Status.Selector)
}
if !utilfeature.DefaultFeatureGate.Enabled(features.RemoveSelfLink) {
// check self link
expectedSelfLink := fmt.Sprintf("/apis/mygroup.example.com/%s/namespaces/not-the-default/noxus/foo/scale", v.Name)
if gottenScale.GetSelfLink() != expectedSelfLink {
t.Fatalf("Scale.Metadata.SelfLink: expected: %v, got: %v", expectedSelfLink, gottenScale.GetSelfLink())
}
}
// update the scale object
// check that spec is updated, but status is not
gottenScale.Spec.Replicas = 5
gottenScale.Status.Selector = "baz"
updatedScale, err := scaleClient.Scales("not-the-default").Update(context.TODO(), groupResource, gottenScale, metav1.UpdateOptions{})
if err != nil {
t.Fatal(err)
}
if updatedScale.Spec.Replicas != 5 {
t.Fatalf("replicas: expected: %v, got: %v", 5, updatedScale.Spec.Replicas)
}
if updatedScale.Status.Selector != "bar" {
t.Fatalf("scale should not update status: expected %v, got: %v", "bar", updatedScale.Status.Selector)
}
// check that .spec.replicas = 5, but status is not updated
updatedNoxuInstance, err := noxuResourceClient.Get(context.TODO(), "foo", metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
specReplicas, found, err := unstructured.NestedInt64(updatedNoxuInstance.Object, "spec", "replicas")
if !found || err != nil {
t.Fatalf("unable to get .spec.replicas")
}
if specReplicas != 5 {
t.Fatalf("replicas: expected: %v, got: %v", 5, specReplicas)
}
statusLabelSelector, found, err := unstructured.NestedString(updatedNoxuInstance.Object, strings.Split((*subresources.Scale.LabelSelectorPath)[1:], ".")...)
if !found || err != nil {
t.Fatalf("unable to get %s", *subresources.Scale.LabelSelectorPath)
}
if statusLabelSelector != "bar" {
t.Fatalf("scale should not update status: expected %v, got: %v", "bar", statusLabelSelector)
}
// validate maximum value
// set .spec.replicas = math.MaxInt64
gottenNoxuInstance, err = noxuResourceClient.Get(context.TODO(), "foo", metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
err = unstructured.SetNestedField(gottenNoxuInstance.Object, int64(math.MaxInt64), "spec", "replicas")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
_, err = noxuResourceClient.Update(context.TODO(), gottenNoxuInstance, metav1.UpdateOptions{})
if err == nil {
t.Fatalf("unexpected non-error: .spec.replicas should be less than 2147483647")
}
noxuResourceClient.Delete(context.TODO(), "foo", metav1.DeleteOptions{})
if err := fixtures.DeleteV1CustomResourceDefinition(noxuDefinition, apiExtensionClient); err != nil {
t.Fatal(err)
}
}
}
}
func TestApplyScaleSubresource(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, true)()
tearDown, config, _, err := fixtures.StartDefaultServer(t)
if err != nil {
t.Fatal(err)
}
defer tearDown()<|fim▁hole|> }
dynamicClient, err := dynamic.NewForConfig(config)
if err != nil {
t.Fatal(err)
}
noxuDefinition := NewNoxuSubresourcesCRDs(apiextensionsv1.NamespaceScoped)[0]
subresources, err := getSubresourcesForVersion(noxuDefinition, "v1beta1")
if err != nil {
t.Fatal(err)
}
subresources.Scale.SpecReplicasPath = ".spec.replicas[0]"
noxuDefinition, err = fixtures.CreateNewV1CustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient)
if err != nil {
t.Fatal(err)
}
// Create a client for it.
ns := "not-the-default"
noxuResourceClient := newNamespacedCustomResourceVersionedClient(ns, dynamicClient, noxuDefinition, "v1beta1")
obj := NewNoxuSubresourceInstanceWithReplicas(ns, "foo", "v1beta1", "replicas[0]")
obj, err = noxuResourceClient.Create(context.TODO(), obj, metav1.CreateOptions{})
if err != nil {
t.Logf("%#v", obj)
t.Fatalf("Failed to create CustomResource: %v", err)
}
noxuResourceClient = newNamespacedCustomResourceVersionedClient(ns, dynamicClient, noxuDefinition, "v1")
patch := `{"metadata": {"name": "foo"}, "kind": "WishIHadChosenNoxu", "apiVersion": "mygroup.example.com/v1", "spec": {"replicas": 3}}`
obj, err = noxuResourceClient.Patch(context.TODO(), "foo", types.ApplyPatchType, []byte(patch), metav1.PatchOptions{FieldManager: "applier"})
if err != nil {
t.Logf("%#v", obj)
t.Fatalf("Failed to Apply CustomResource: %v", err)
}
if got := len(obj.GetManagedFields()); got != 2 {
t.Fatalf("Expected 2 managed fields, got %v: %v", got, obj.GetManagedFields())
}
_, err = noxuResourceClient.Patch(context.TODO(), "foo", types.MergePatchType, []byte(`{"spec": {"replicas": 5}}`), metav1.PatchOptions{FieldManager: "scaler"}, "scale")
if err != nil {
t.Fatal(err)
}
obj, err = noxuResourceClient.Get(context.TODO(), "foo", metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed to Get CustomResource: %v", err)
}
// Managed fields should have 3 entries: one for scale, one for spec, and one for the rest of the fields
managedFields := obj.GetManagedFields()
if len(managedFields) != 3 {
t.Fatalf("Expected 3 managed fields, got %v: %v", len(managedFields), obj.GetManagedFields())
}
specEntry := managedFields[0]
if specEntry.Manager != "applier" || specEntry.APIVersion != "mygroup.example.com/v1" || specEntry.Operation != "Apply" || string(specEntry.FieldsV1.Raw) != `{"f:spec":{}}` || specEntry.Subresource != "" {
t.Fatalf("Unexpected entry: %v", specEntry)
}
scaleEntry := managedFields[1]
if scaleEntry.Manager != "scaler" || scaleEntry.APIVersion != "mygroup.example.com/v1" || scaleEntry.Operation != "Update" || string(scaleEntry.FieldsV1.Raw) != `{"f:spec":{"f:replicas":{}}}` || scaleEntry.Subresource != "scale" {
t.Fatalf("Unexpected entry: %v", scaleEntry)
}
restEntry := managedFields[2]
if restEntry.Manager != "integration.test" || restEntry.APIVersion != "mygroup.example.com/v1beta1" {
t.Fatalf("Unexpected entry: %v", restEntry)
}
}
func TestValidationSchemaWithStatus(t *testing.T) {
tearDown, config, _, err := fixtures.StartDefaultServer(t)
if err != nil {
t.Fatal(err)
}
defer tearDown()
apiExtensionClient, err := clientset.NewForConfig(config)
if err != nil {
t.Fatal(err)
}
dynamicClient, err := dynamic.NewForConfig(config)
if err != nil {
t.Fatal(err)
}
noxuDefinition := newNoxuValidationCRDs()[0]
// make sure we are not restricting fields to properties even in subschemas
noxuDefinition.Spec.Versions[0].Schema.OpenAPIV3Schema = &apiextensionsv1.JSONSchemaProps{
Type: "object",
Properties: map[string]apiextensionsv1.JSONSchemaProps{
"spec": {
Type: "object",
Description: "Validation for spec",
Properties: map[string]apiextensionsv1.JSONSchemaProps{
"replicas": {
Type: "integer",
},
},
},
},
Required: []string{"spec"},
Description: "This is a description at the root of the schema",
}
noxuDefinition.Spec.Versions[1].Schema.OpenAPIV3Schema = noxuDefinition.Spec.Versions[0].Schema.OpenAPIV3Schema
_, err = fixtures.CreateNewV1CustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient)
if err != nil {
t.Fatalf("unable to created crd %v: %v", noxuDefinition.Name, err)
}
}
func TestValidateOnlyStatus(t *testing.T) {
tearDown, apiExtensionClient, dynamicClient, err := fixtures.StartDefaultServerWithClients(t)
if err != nil {
t.Fatal(err)
}
defer tearDown()
// UpdateStatus should validate only status
// 1. create a crd with max value of .spec.num = 10 and .status.num = 10
// 2. create a cr with .spec.num = 10 and .status.num = 10 (valid)
// 3. update the spec of the cr with .spec.num = 15 (spec is invalid), expect no error
// 4. update the spec of the cr with .spec.num = 15 (spec is invalid), expect error
// max value of spec.num = 10 and status.num = 10
schema := &apiextensionsv1.JSONSchemaProps{
Type: "object",
Properties: map[string]apiextensionsv1.JSONSchemaProps{
"spec": {
Type: "object",
Properties: map[string]apiextensionsv1.JSONSchemaProps{
"num": {
Type: "integer",
Maximum: float64Ptr(10),
},
},
},
"status": {
Type: "object",
Properties: map[string]apiextensionsv1.JSONSchemaProps{
"num": {
Type: "integer",
Maximum: float64Ptr(10),
},
},
},
},
}
noxuDefinitions := NewNoxuSubresourcesCRDs(apiextensionsv1.NamespaceScoped)
for _, noxuDefinition := range noxuDefinitions {
noxuDefinition.Spec.Versions[0].Schema = &apiextensionsv1.CustomResourceValidation{
OpenAPIV3Schema: schema.DeepCopy(),
}
noxuDefinition.Spec.Versions[1].Schema = &apiextensionsv1.CustomResourceValidation{
OpenAPIV3Schema: schema.DeepCopy(),
}
noxuDefinition, err = fixtures.CreateNewV1CustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient)
if err != nil {
t.Fatal(err)
}
ns := "not-the-default"
for _, v := range noxuDefinition.Spec.Versions {
noxuResourceClient := newNamespacedCustomResourceVersionedClient(ns, dynamicClient, noxuDefinition, v.Name)
// set .spec.num = 10 and .status.num = 10
noxuInstance := NewNoxuSubresourceInstance(ns, "foo", v.Name)
err = unstructured.SetNestedField(noxuInstance.Object, int64(10), "status", "num")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
createdNoxuInstance, err := instantiateVersionedCustomResource(t, noxuInstance, noxuResourceClient, noxuDefinition, v.Name)
if err != nil {
t.Fatalf("unable to create noxu instance: %v", err)
}
// update the spec with .spec.num = 15, expecting no error
err = unstructured.SetNestedField(createdNoxuInstance.Object, int64(15), "spec", "num")
if err != nil {
t.Fatalf("unexpected error setting .spec.num: %v", err)
}
createdNoxuInstance, err = noxuResourceClient.UpdateStatus(context.TODO(), createdNoxuInstance, metav1.UpdateOptions{})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// update with .status.num = 15, expecting an error
err = unstructured.SetNestedField(createdNoxuInstance.Object, int64(15), "status", "num")
if err != nil {
t.Fatalf("unexpected error setting .status.num: %v", err)
}
_, err = noxuResourceClient.UpdateStatus(context.TODO(), createdNoxuInstance, metav1.UpdateOptions{})
if err == nil {
t.Fatal("expected error, but got none")
}
statusError, isStatus := err.(*apierrors.StatusError)
if !isStatus || statusError == nil {
t.Fatalf("expected status error, got %T: %v", err, err)
}
if !strings.Contains(statusError.Error(), "Invalid value") {
t.Fatalf("expected 'Invalid value' in error, got: %v", err)
}
noxuResourceClient.Delete(context.TODO(), "foo", metav1.DeleteOptions{})
}
if err := fixtures.DeleteV1CustomResourceDefinition(noxuDefinition, apiExtensionClient); err != nil {
t.Fatal(err)
}
}
}
func TestSubresourcesDiscovery(t *testing.T) {
tearDown, config, _, err := fixtures.StartDefaultServer(t)
if err != nil {
t.Fatal(err)
}
defer tearDown()
apiExtensionClient, err := clientset.NewForConfig(config)
if err != nil {
t.Fatal(err)
}
dynamicClient, err := dynamic.NewForConfig(config)
if err != nil {
t.Fatal(err)
}
noxuDefinitions := NewNoxuSubresourcesCRDs(apiextensionsv1.NamespaceScoped)
for _, noxuDefinition := range noxuDefinitions {
noxuDefinition, err = fixtures.CreateNewV1CustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient)
if err != nil {
t.Fatal(err)
}
for _, v := range noxuDefinition.Spec.Versions {
group := "mygroup.example.com"
version := v.Name
resources, err := apiExtensionClient.Discovery().ServerResourcesForGroupVersion(group + "/" + version)
if err != nil {
t.Fatal(err)
}
if len(resources.APIResources) != 3 {
t.Fatalf("Expected exactly the resources \"noxus\", \"noxus/status\" and \"noxus/scale\" in group version %v/%v via discovery, got: %v", group, version, resources.APIResources)
}
// check discovery info for status
status := resources.APIResources[1]
if status.Name != "noxus/status" {
t.Fatalf("incorrect status via discovery: expected name: %v, got: %v", "noxus/status", status.Name)
}
if status.Namespaced != true {
t.Fatalf("incorrect status via discovery: expected namespace: %v, got: %v", true, status.Namespaced)
}
if status.Kind != "WishIHadChosenNoxu" {
t.Fatalf("incorrect status via discovery: expected kind: %v, got: %v", "WishIHadChosenNoxu", status.Kind)
}
expectedVerbs := []string{"get", "patch", "update"}
sort.Strings(status.Verbs)
if !reflect.DeepEqual([]string(status.Verbs), expectedVerbs) {
t.Fatalf("incorrect status via discovery: expected: %v, got: %v", expectedVerbs, status.Verbs)
}
// check discovery info for scale
scale := resources.APIResources[2]
if scale.Group != autoscaling.GroupName {
t.Fatalf("incorrect scale via discovery: expected group: %v, got: %v", autoscaling.GroupName, scale.Group)
}
if scale.Version != "v1" {
t.Fatalf("incorrect scale via discovery: expected version: %v, got %v", "v1", scale.Version)
}
if scale.Name != "noxus/scale" {
t.Fatalf("incorrect scale via discovery: expected name: %v, got: %v", "noxus/scale", scale.Name)
}
if scale.Namespaced != true {
t.Fatalf("incorrect scale via discovery: expected namespace: %v, got: %v", true, scale.Namespaced)
}
if scale.Kind != "Scale" {
t.Fatalf("incorrect scale via discovery: expected kind: %v, got: %v", "Scale", scale.Kind)
}
sort.Strings(scale.Verbs)
if !reflect.DeepEqual([]string(scale.Verbs), expectedVerbs) {
t.Fatalf("incorrect scale via discovery: expected: %v, got: %v", expectedVerbs, scale.Verbs)
}
}
if err := fixtures.DeleteV1CustomResourceDefinition(noxuDefinition, apiExtensionClient); err != nil {
t.Fatal(err)
}
}
}
func TestGeneration(t *testing.T) {
tearDown, apiExtensionClient, dynamicClient, err := fixtures.StartDefaultServerWithClients(t)
if err != nil {
t.Fatal(err)
}
defer tearDown()
noxuDefinitions := NewNoxuSubresourcesCRDs(apiextensionsv1.NamespaceScoped)
for _, noxuDefinition := range noxuDefinitions {
noxuDefinition, err = fixtures.CreateNewV1CustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient)
if err != nil {
t.Fatal(err)
}
ns := "not-the-default"
for _, v := range noxuDefinition.Spec.Versions {
noxuResourceClient := newNamespacedCustomResourceVersionedClient(ns, dynamicClient, noxuDefinition, v.Name)
_, err = instantiateVersionedCustomResource(t, NewNoxuSubresourceInstance(ns, "foo", v.Name), noxuResourceClient, noxuDefinition, v.Name)
if err != nil {
t.Fatalf("unable to create noxu instance: %v", err)
}
// .metadata.generation = 1
gottenNoxuInstance, err := noxuResourceClient.Get(context.TODO(), "foo", metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
if gottenNoxuInstance.GetGeneration() != 1 {
t.Fatalf(".metadata.generation should be 1 after creation")
}
// .status.num = 20
err = unstructured.SetNestedField(gottenNoxuInstance.Object, int64(20), "status", "num")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// UpdateStatus does not increment generation
updatedStatusInstance, err := noxuResourceClient.UpdateStatus(context.TODO(), gottenNoxuInstance, metav1.UpdateOptions{})
if err != nil {
t.Fatalf("unable to update status: %v", err)
}
if updatedStatusInstance.GetGeneration() != 1 {
t.Fatalf("updating status should not increment .metadata.generation: expected: %v, got: %v", 1, updatedStatusInstance.GetGeneration())
}
gottenNoxuInstance, err = noxuResourceClient.Get(context.TODO(), "foo", metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
// .spec.num = 20
err = unstructured.SetNestedField(gottenNoxuInstance.Object, int64(20), "spec", "num")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// Update increments generation
updatedInstance, err := noxuResourceClient.Update(context.TODO(), gottenNoxuInstance, metav1.UpdateOptions{})
if err != nil {
t.Fatalf("unable to update instance: %v", err)
}
if updatedInstance.GetGeneration() != 2 {
t.Fatalf("updating spec should increment .metadata.generation: expected: %v, got: %v", 2, updatedStatusInstance.GetGeneration())
}
noxuResourceClient.Delete(context.TODO(), "foo", metav1.DeleteOptions{})
}
if err := fixtures.DeleteV1CustomResourceDefinition(noxuDefinition, apiExtensionClient); err != nil {
t.Fatal(err)
}
}
}
func TestSubresourcePatch(t *testing.T) {
groupResource := schema.GroupResource{
Group: "mygroup.example.com",
Resource: "noxus",
}
tearDown, config, _, err := fixtures.StartDefaultServer(t)
if err != nil {
t.Fatal(err)
}
defer tearDown()
apiExtensionClient, err := clientset.NewForConfig(config)
if err != nil {
t.Fatal(err)
}
dynamicClient, err := dynamic.NewForConfig(config)
if err != nil {
t.Fatal(err)
}
noxuDefinitions := NewNoxuSubresourcesCRDs(apiextensionsv1.NamespaceScoped)
for _, noxuDefinition := range noxuDefinitions {
noxuDefinition, err = fixtures.CreateNewV1CustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient)
if err != nil {
t.Fatal(err)
}
ns := "not-the-default"
for _, v := range noxuDefinition.Spec.Versions {
noxuResourceClient := newNamespacedCustomResourceVersionedClient(ns, dynamicClient, noxuDefinition, v.Name)
t.Logf("Creating foo")
_, err = instantiateVersionedCustomResource(t, NewNoxuSubresourceInstance(ns, "foo", v.Name), noxuResourceClient, noxuDefinition, v.Name)
if err != nil {
t.Fatalf("unable to create noxu instance: %v", err)
}
scaleClient, err := fixtures.CreateNewVersionedScaleClient(noxuDefinition, config, v.Name)
if err != nil {
t.Fatal(err)
}
t.Logf("Patching .status.num to 999")
patch := []byte(`{"spec": {"num":999}, "status": {"num":999}}`)
patchedNoxuInstance, err := noxuResourceClient.Patch(context.TODO(), "foo", types.MergePatchType, patch, metav1.PatchOptions{}, "status")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
expectInt64(t, patchedNoxuInstance.UnstructuredContent(), 999, "status", "num") // .status.num should be 999
expectInt64(t, patchedNoxuInstance.UnstructuredContent(), 10, "spec", "num") // .spec.num should remain 10
// server-side-apply increments resouceVersion if the resource is unchanged for 1 second after the previous write,
// and by waiting a second we ensure that resourceVersion will be updated if the no-op patch increments resourceVersion
time.Sleep(time.Second)
// no-op patch
rv := patchedNoxuInstance.GetResourceVersion()
found := false
t.Logf("Patching .status.num again to 999")
patchedNoxuInstance, err = noxuResourceClient.Patch(context.TODO(), "foo", types.MergePatchType, patch, metav1.PatchOptions{}, "status")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// make sure no-op patch does not increment resourceVersion
expectInt64(t, patchedNoxuInstance.UnstructuredContent(), 999, "status", "num")
expectInt64(t, patchedNoxuInstance.UnstructuredContent(), 10, "spec", "num")
expectString(t, patchedNoxuInstance.UnstructuredContent(), rv, "metadata", "resourceVersion")
// empty patch
t.Logf("Applying empty patch")
patchedNoxuInstance, err = noxuResourceClient.Patch(context.TODO(), "foo", types.MergePatchType, []byte(`{}`), metav1.PatchOptions{}, "status")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// an empty patch is a no-op patch. make sure it does not increment resourceVersion
expectInt64(t, patchedNoxuInstance.UnstructuredContent(), 999, "status", "num")
expectInt64(t, patchedNoxuInstance.UnstructuredContent(), 10, "spec", "num")
expectString(t, patchedNoxuInstance.UnstructuredContent(), rv, "metadata", "resourceVersion")
t.Logf("Patching .spec.replicas to 7")
patch = []byte(`{"spec": {"replicas":7}, "status": {"replicas":7}}`)
patchedNoxuInstance, err = noxuResourceClient.Patch(context.TODO(), "foo", types.MergePatchType, patch, metav1.PatchOptions{}, "scale")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
expectInt64(t, patchedNoxuInstance.UnstructuredContent(), 7, "spec", "replicas")
expectInt64(t, patchedNoxuInstance.UnstructuredContent(), 0, "status", "replicas") // .status.replicas should remain 0
rv, found, err = unstructured.NestedString(patchedNoxuInstance.UnstructuredContent(), "metadata", "resourceVersion")
if err != nil {
t.Fatal(err)
}
if !found {
t.Fatalf("metadata.resourceVersion not found")
}
// Scale.Spec.Replicas = 7 but Scale.Status.Replicas should remain 0
gottenScale, err := scaleClient.Scales("not-the-default").Get(context.TODO(), groupResource, "foo", metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
if gottenScale.Spec.Replicas != 7 {
t.Fatalf("Scale.Spec.Replicas: expected: %v, got: %v", 7, gottenScale.Spec.Replicas)
}
if gottenScale.Status.Replicas != 0 {
t.Fatalf("Scale.Status.Replicas: expected: %v, got: %v", 0, gottenScale.Spec.Replicas)
}
// no-op patch
t.Logf("Patching .spec.replicas again to 7")
patchedNoxuInstance, err = noxuResourceClient.Patch(context.TODO(), "foo", types.MergePatchType, patch, metav1.PatchOptions{}, "scale")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// make sure no-op patch does not increment resourceVersion
expectInt64(t, patchedNoxuInstance.UnstructuredContent(), 7, "spec", "replicas")
expectInt64(t, patchedNoxuInstance.UnstructuredContent(), 0, "status", "replicas")
expectString(t, patchedNoxuInstance.UnstructuredContent(), rv, "metadata", "resourceVersion")
// empty patch
t.Logf("Applying empty patch")
patchedNoxuInstance, err = noxuResourceClient.Patch(context.TODO(), "foo", types.MergePatchType, []byte(`{}`), metav1.PatchOptions{}, "scale")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// an empty patch is a no-op patch. make sure it does not increment resourceVersion
expectInt64(t, patchedNoxuInstance.UnstructuredContent(), 7, "spec", "replicas")
expectInt64(t, patchedNoxuInstance.UnstructuredContent(), 0, "status", "replicas")
expectString(t, patchedNoxuInstance.UnstructuredContent(), rv, "metadata", "resourceVersion")
// make sure strategic merge patch is not supported for both status and scale
_, err = noxuResourceClient.Patch(context.TODO(), "foo", types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "status")
if err == nil {
t.Fatalf("unexpected non-error: strategic merge patch is not supported for custom resources")
}
_, err = noxuResourceClient.Patch(context.TODO(), "foo", types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "scale")
if err == nil {
t.Fatalf("unexpected non-error: strategic merge patch is not supported for custom resources")
}
noxuResourceClient.Delete(context.TODO(), "foo", metav1.DeleteOptions{})
}
if err := fixtures.DeleteV1CustomResourceDefinition(noxuDefinition, apiExtensionClient); err != nil {
t.Fatal(err)
}
}
}<|fim▁end|>
|
apiExtensionClient, err := clientset.NewForConfig(config)
if err != nil {
t.Fatal(err)
|
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>from django.conf.urls import url
from fundraiser_app import views
urlpatterns = [
url(r'^$', views.FMItemListView.as_view(), name='fmitem_list'),
url(r'^about/$', views.AboutView.as_view(), name='about'),
url(r'^fmitem/(?P<pk>\d+)$', views.FMItemDetailView.as_view(), name='fmitem_detail'),
url(r'^fmitem/new$', views.FMItemCreateView.as_view(), name='fmitem_new'),
url(r'^fmitem/(?P<pk>\d+)/edit$', views.FMItemUpdateView.as_view(), name='fmitem_edit'),<|fim▁hole|><|fim▁end|>
|
url(r'^fmitem/(?P<pk>\d+)/remove$', views.FMItemDeleteView.as_view(), name='fmitem_remove'),
url(r'^fmitem/(?P<pk>\d+)/publish/$', views.fmitem_publish, name='fmitem_publish'),
]
|
<|file_name|>enchantingdialog.hpp<|end_file_name|><|fim▁begin|>#ifndef MWGUI_ENCHANTINGDIALOG_H
#define MWGUI_ENCHANTINGDIALOG_H
#include "spellcreationdialog.hpp"
#include "../mwbase/windowmanager.hpp"
#include "../mwmechanics/enchanting.hpp"
namespace MWGui
{
class ItemSelectionDialog;
class ItemWidget;
class EnchantingDialog : public WindowBase, public ReferenceInterface, public EffectEditorBase
{
public:
EnchantingDialog();
virtual ~EnchantingDialog();
virtual void open();
virtual void exit();
void setSoulGem (const MWWorld::Ptr& gem);
void setItem (const MWWorld::Ptr& item);
void startEnchanting(MWWorld::Ptr actor);
void startSelfEnchanting(MWWorld::Ptr soulgem);<|fim▁hole|>
protected:
virtual void onReferenceUnavailable();
virtual void notifyEffectsChanged ();
void onCancelButtonClicked(MyGUI::Widget* sender);
void onSelectItem (MyGUI::Widget* sender);
void onSelectSoul (MyGUI::Widget* sender);
void onItemSelected(MWWorld::Ptr item);
void onItemCancel();
void onSoulSelected(MWWorld::Ptr item);
void onSoulCancel();
void onBuyButtonClicked(MyGUI::Widget* sender);
void updateLabels();
void onTypeButtonClicked(MyGUI::Widget* sender);
ItemSelectionDialog* mItemSelectionDialog;
MyGUI::Button* mCancelButton;
ItemWidget* mItemBox;
ItemWidget* mSoulBox;
MyGUI::Button* mTypeButton;
MyGUI::Button* mBuyButton;
MyGUI::TextBox* mName;
MyGUI::TextBox* mEnchantmentPoints;
MyGUI::TextBox* mCastCost;
MyGUI::TextBox* mCharge;
MyGUI::TextBox* mPrice;
MyGUI::TextBox* mPriceText;
MWMechanics::Enchanting mEnchanting;
ESM::EffectList mEffectList;
};
}
#endif<|fim▁end|>
|
virtual void resetReference();
|
<|file_name|>unrecognizedgrouppb_test.go<|end_file_name|><|fim▁begin|>// Code generated by protoc-gen-gogo.
// source: unrecognizedgroup.proto
// DO NOT EDIT!
/*
Package unrecognizedgroup is a generated protocol buffer package.
It is generated from these files:
unrecognizedgroup.proto
It has these top-level messages:
NewNoGroup
A
OldWithGroup
*/
package unrecognizedgroup
import testing "testing"
import math_rand "math/rand"
import time "time"
import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
import github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb"
import fmt "fmt"
import go_parser "go/parser"
import proto "github.com/gogo/protobuf/proto"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
func TestNewNoGroupProto(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedNewNoGroup(popr, false)
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &NewNoGroup{}
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
littlefuzz := make([]byte, len(dAtA))
copy(littlefuzz, dAtA)
for i := range dAtA {
dAtA[i] = byte(popr.Intn(256))
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
if len(littlefuzz) > 0 {
fuzzamount := 100
for i := 0; i < fuzzamount; i++ {
littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256))
littlefuzz = append(littlefuzz, byte(popr.Intn(256)))
}
// shouldn't panic
_ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg)
}
}
func TestNewNoGroupMarshalTo(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedNewNoGroup(popr, false)
size := p.Size()
dAtA := make([]byte, size)
for i := range dAtA {
dAtA[i] = byte(popr.Intn(256))
}
_, err := p.MarshalTo(dAtA)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &NewNoGroup{}
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
for i := range dAtA {
dAtA[i] = byte(popr.Intn(256))
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func TestAProto(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedA(popr, false)
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &A{}
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
littlefuzz := make([]byte, len(dAtA))
copy(littlefuzz, dAtA)
for i := range dAtA {<|fim▁hole|> }
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
if len(littlefuzz) > 0 {
fuzzamount := 100
for i := 0; i < fuzzamount; i++ {
littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256))
littlefuzz = append(littlefuzz, byte(popr.Intn(256)))
}
// shouldn't panic
_ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg)
}
}
func TestAMarshalTo(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedA(popr, false)
size := p.Size()
dAtA := make([]byte, size)
for i := range dAtA {
dAtA[i] = byte(popr.Intn(256))
}
_, err := p.MarshalTo(dAtA)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &A{}
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
for i := range dAtA {
dAtA[i] = byte(popr.Intn(256))
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func TestOldWithGroupProto(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedOldWithGroup(popr, false)
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &OldWithGroup{}
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
littlefuzz := make([]byte, len(dAtA))
copy(littlefuzz, dAtA)
for i := range dAtA {
dAtA[i] = byte(popr.Intn(256))
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
if len(littlefuzz) > 0 {
fuzzamount := 100
for i := 0; i < fuzzamount; i++ {
littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256))
littlefuzz = append(littlefuzz, byte(popr.Intn(256)))
}
// shouldn't panic
_ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg)
}
}
func TestOldWithGroup_Group1Proto(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedOldWithGroup_Group1(popr, false)
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &OldWithGroup_Group1{}
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
littlefuzz := make([]byte, len(dAtA))
copy(littlefuzz, dAtA)
for i := range dAtA {
dAtA[i] = byte(popr.Intn(256))
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
if len(littlefuzz) > 0 {
fuzzamount := 100
for i := 0; i < fuzzamount; i++ {
littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256))
littlefuzz = append(littlefuzz, byte(popr.Intn(256)))
}
// shouldn't panic
_ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg)
}
}
func TestOldWithGroup_Group2Proto(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedOldWithGroup_Group2(popr, false)
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &OldWithGroup_Group2{}
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
littlefuzz := make([]byte, len(dAtA))
copy(littlefuzz, dAtA)
for i := range dAtA {
dAtA[i] = byte(popr.Intn(256))
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
if len(littlefuzz) > 0 {
fuzzamount := 100
for i := 0; i < fuzzamount; i++ {
littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256))
littlefuzz = append(littlefuzz, byte(popr.Intn(256)))
}
// shouldn't panic
_ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg)
}
}
func TestNewNoGroupJSON(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedNewNoGroup(popr, true)
marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{}
jsondata, err := marshaler.MarshalToString(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &NewNoGroup{}
err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p)
}
}
func TestAJSON(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedA(popr, true)
marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{}
jsondata, err := marshaler.MarshalToString(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &A{}
err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p)
}
}
func TestOldWithGroupJSON(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedOldWithGroup(popr, true)
marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{}
jsondata, err := marshaler.MarshalToString(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &OldWithGroup{}
err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p)
}
}
func TestOldWithGroup_Group1JSON(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedOldWithGroup_Group1(popr, true)
marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{}
jsondata, err := marshaler.MarshalToString(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &OldWithGroup_Group1{}
err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p)
}
}
func TestOldWithGroup_Group2JSON(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedOldWithGroup_Group2(popr, true)
marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{}
jsondata, err := marshaler.MarshalToString(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &OldWithGroup_Group2{}
err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p)
}
}
func TestNewNoGroupProtoText(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedNewNoGroup(popr, true)
dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p)
msg := &NewNoGroup{}
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func TestNewNoGroupProtoCompactText(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedNewNoGroup(popr, true)
dAtA := github_com_gogo_protobuf_proto.CompactTextString(p)
msg := &NewNoGroup{}
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func TestAProtoText(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedA(popr, true)
dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p)
msg := &A{}
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func TestAProtoCompactText(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedA(popr, true)
dAtA := github_com_gogo_protobuf_proto.CompactTextString(p)
msg := &A{}
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func TestOldWithGroupProtoText(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedOldWithGroup(popr, true)
dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p)
msg := &OldWithGroup{}
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func TestOldWithGroupProtoCompactText(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedOldWithGroup(popr, true)
dAtA := github_com_gogo_protobuf_proto.CompactTextString(p)
msg := &OldWithGroup{}
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func TestOldWithGroup_Group1ProtoText(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedOldWithGroup_Group1(popr, true)
dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p)
msg := &OldWithGroup_Group1{}
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func TestOldWithGroup_Group1ProtoCompactText(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedOldWithGroup_Group1(popr, true)
dAtA := github_com_gogo_protobuf_proto.CompactTextString(p)
msg := &OldWithGroup_Group1{}
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func TestOldWithGroup_Group2ProtoText(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedOldWithGroup_Group2(popr, true)
dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p)
msg := &OldWithGroup_Group2{}
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func TestOldWithGroup_Group2ProtoCompactText(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedOldWithGroup_Group2(popr, true)
dAtA := github_com_gogo_protobuf_proto.CompactTextString(p)
msg := &OldWithGroup_Group2{}
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func TestUnrecognizedgroupDescription(t *testing.T) {
UnrecognizedgroupDescription()
}
func TestNewNoGroupVerboseEqual(t *testing.T) {
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedNewNoGroup(popr, false)
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
if err != nil {
panic(err)
}
msg := &NewNoGroup{}
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
panic(err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("%#v !VerboseEqual %#v, since %v", msg, p, err)
}
}
func TestAVerboseEqual(t *testing.T) {
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedA(popr, false)
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
if err != nil {
panic(err)
}
msg := &A{}
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
panic(err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("%#v !VerboseEqual %#v, since %v", msg, p, err)
}
}
func TestOldWithGroupVerboseEqual(t *testing.T) {
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedOldWithGroup(popr, false)
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
if err != nil {
panic(err)
}
msg := &OldWithGroup{}
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
panic(err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("%#v !VerboseEqual %#v, since %v", msg, p, err)
}
}
func TestOldWithGroup_Group1VerboseEqual(t *testing.T) {
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedOldWithGroup_Group1(popr, false)
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
if err != nil {
panic(err)
}
msg := &OldWithGroup_Group1{}
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
panic(err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("%#v !VerboseEqual %#v, since %v", msg, p, err)
}
}
func TestOldWithGroup_Group2VerboseEqual(t *testing.T) {
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedOldWithGroup_Group2(popr, false)
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
if err != nil {
panic(err)
}
msg := &OldWithGroup_Group2{}
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
panic(err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("%#v !VerboseEqual %#v, since %v", msg, p, err)
}
}
func TestNewNoGroupGoString(t *testing.T) {
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedNewNoGroup(popr, false)
s1 := p.GoString()
s2 := fmt.Sprintf("%#v", p)
if s1 != s2 {
t.Fatalf("GoString want %v got %v", s1, s2)
}
_, err := go_parser.ParseExpr(s1)
if err != nil {
panic(err)
}
}
func TestAGoString(t *testing.T) {
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedA(popr, false)
s1 := p.GoString()
s2 := fmt.Sprintf("%#v", p)
if s1 != s2 {
t.Fatalf("GoString want %v got %v", s1, s2)
}
_, err := go_parser.ParseExpr(s1)
if err != nil {
panic(err)
}
}
func TestOldWithGroupGoString(t *testing.T) {
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedOldWithGroup(popr, false)
s1 := p.GoString()
s2 := fmt.Sprintf("%#v", p)
if s1 != s2 {
t.Fatalf("GoString want %v got %v", s1, s2)
}
_, err := go_parser.ParseExpr(s1)
if err != nil {
panic(err)
}
}
func TestOldWithGroup_Group1GoString(t *testing.T) {
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedOldWithGroup_Group1(popr, false)
s1 := p.GoString()
s2 := fmt.Sprintf("%#v", p)
if s1 != s2 {
t.Fatalf("GoString want %v got %v", s1, s2)
}
_, err := go_parser.ParseExpr(s1)
if err != nil {
panic(err)
}
}
func TestOldWithGroup_Group2GoString(t *testing.T) {
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedOldWithGroup_Group2(popr, false)
s1 := p.GoString()
s2 := fmt.Sprintf("%#v", p)
if s1 != s2 {
t.Fatalf("GoString want %v got %v", s1, s2)
}
_, err := go_parser.ParseExpr(s1)
if err != nil {
panic(err)
}
}
func TestNewNoGroupSize(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedNewNoGroup(popr, true)
size2 := github_com_gogo_protobuf_proto.Size(p)
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
size := p.Size()
if len(dAtA) != size {
t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA))
}
if size2 != size {
t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2)
}
size3 := github_com_gogo_protobuf_proto.Size(p)
if size3 != size {
t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3)
}
}
func TestASize(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedA(popr, true)
size2 := github_com_gogo_protobuf_proto.Size(p)
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
size := p.Size()
if len(dAtA) != size {
t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA))
}
if size2 != size {
t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2)
}
size3 := github_com_gogo_protobuf_proto.Size(p)
if size3 != size {
t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3)
}
}
func TestNewNoGroupStringer(t *testing.T) {
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedNewNoGroup(popr, false)
s1 := p.String()
s2 := fmt.Sprintf("%v", p)
if s1 != s2 {
t.Fatalf("String want %v got %v", s1, s2)
}
}
func TestAStringer(t *testing.T) {
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedA(popr, false)
s1 := p.String()
s2 := fmt.Sprintf("%v", p)
if s1 != s2 {
t.Fatalf("String want %v got %v", s1, s2)
}
}
func TestOldWithGroupStringer(t *testing.T) {
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedOldWithGroup(popr, false)
s1 := p.String()
s2 := fmt.Sprintf("%v", p)
if s1 != s2 {
t.Fatalf("String want %v got %v", s1, s2)
}
}
func TestOldWithGroup_Group1Stringer(t *testing.T) {
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedOldWithGroup_Group1(popr, false)
s1 := p.String()
s2 := fmt.Sprintf("%v", p)
if s1 != s2 {
t.Fatalf("String want %v got %v", s1, s2)
}
}
func TestOldWithGroup_Group2Stringer(t *testing.T) {
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedOldWithGroup_Group2(popr, false)
s1 := p.String()
s2 := fmt.Sprintf("%v", p)
if s1 != s2 {
t.Fatalf("String want %v got %v", s1, s2)
}
}
//These tests are generated by github.com/gogo/protobuf/plugin/testgen<|fim▁end|>
|
dAtA[i] = byte(popr.Intn(256))
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
|
<|file_name|>tyencode.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Type encoding
#![allow(unused_must_use)] // as with encoding, everything is a no-fail MemWriter
#![allow(non_camel_case_types)]<|fim▁hole|>
use std::cell::RefCell;
use std::collections::HashMap;
use middle::subst;
use middle::subst::VecPerParamSpace;
use middle::ty::ParamTy;
use middle::ty;
use syntax::abi::Abi;
use syntax::ast;
use syntax::ast::*;
use syntax::diagnostic::SpanHandler;
use syntax::parse::token;
use rbml::io::SeekableMemWriter;
macro_rules! mywrite( ($($arg:tt)*) => ({ write!($($arg)*); }) )
pub struct ctxt<'a, 'tcx: 'a> {
pub diag: &'a SpanHandler,
// Def -> str Callback:
pub ds: fn(DefId) -> String,
// The type context.
pub tcx: &'a ty::ctxt<'tcx>,
pub abbrevs: &'a abbrev_map
}
// Compact string representation for ty.t values. API ty_str & parse_from_str.
// Extra parameters are for converting to/from def_ids in the string rep.
// Whatever format you choose should not contain pipe characters.
pub struct ty_abbrev {
s: String
}
pub type abbrev_map = RefCell<HashMap<ty::t, ty_abbrev>>;
pub fn enc_ty(w: &mut SeekableMemWriter, cx: &ctxt, t: ty::t) {
match cx.abbrevs.borrow_mut().find(&t) {
Some(a) => { w.write(a.s.as_bytes()); return; }
None => {}
}
let pos = w.tell().unwrap();
enc_sty(w, cx, &ty::get(t).sty);
let end = w.tell().unwrap();
let len = end - pos;
fn estimate_sz(u: u64) -> u64 {
let mut n = u;
let mut len = 0;
while n != 0 { len += 1; n = n >> 4; }
return len;
}
let abbrev_len = 3 + estimate_sz(pos) + estimate_sz(len);
if abbrev_len < len {
// I.e. it's actually an abbreviation.
cx.abbrevs.borrow_mut().insert(t, ty_abbrev {
s: format!("#{:x}:{:x}#", pos, len)
});
}
}
fn enc_mutability(w: &mut SeekableMemWriter, mt: ast::Mutability) {
match mt {
MutImmutable => (),
MutMutable => mywrite!(w, "m"),
}
}
fn enc_mt(w: &mut SeekableMemWriter, cx: &ctxt, mt: ty::mt) {
enc_mutability(w, mt.mutbl);
enc_ty(w, cx, mt.ty);
}
fn enc_opt<T>(w: &mut SeekableMemWriter, t: Option<T>, enc_f: |&mut SeekableMemWriter, T|) {
match t {
None => mywrite!(w, "n"),
Some(v) => {
mywrite!(w, "s");
enc_f(w, v);
}
}
}
fn enc_vec_per_param_space<T>(w: &mut SeekableMemWriter,
cx: &ctxt,
v: &VecPerParamSpace<T>,
op: |&mut SeekableMemWriter, &ctxt, &T|) {
for &space in subst::ParamSpace::all().iter() {
mywrite!(w, "[");
for t in v.get_slice(space).iter() {
op(w, cx, t);
}
mywrite!(w, "]");
}
}
pub fn enc_substs(w: &mut SeekableMemWriter, cx: &ctxt, substs: &subst::Substs) {
enc_region_substs(w, cx, &substs.regions);
enc_vec_per_param_space(w, cx, &substs.types,
|w, cx, &ty| enc_ty(w, cx, ty));
}
fn enc_region_substs(w: &mut SeekableMemWriter, cx: &ctxt, substs: &subst::RegionSubsts) {
match *substs {
subst::ErasedRegions => {
mywrite!(w, "e");
}
subst::NonerasedRegions(ref regions) => {
mywrite!(w, "n");
enc_vec_per_param_space(w, cx, regions,
|w, cx, &r| enc_region(w, cx, r));
}
}
}
pub fn enc_region(w: &mut SeekableMemWriter, cx: &ctxt, r: ty::Region) {
match r {
ty::ReLateBound(id, br) => {
mywrite!(w, "b[{}|", id);
enc_bound_region(w, cx, br);
mywrite!(w, "]");
}
ty::ReEarlyBound(node_id, space, index, name) => {
mywrite!(w, "B[{}|{}|{}|{}]",
node_id,
space.to_uint(),
index,
token::get_name(name));
}
ty::ReFree(ref fr) => {
mywrite!(w, "f[{}|", fr.scope_id);
enc_bound_region(w, cx, fr.bound_region);
mywrite!(w, "]");
}
ty::ReScope(nid) => {
mywrite!(w, "s{}|", nid);
}
ty::ReStatic => {
mywrite!(w, "t");
}
ty::ReEmpty => {
mywrite!(w, "e");
}
ty::ReInfer(_) => {
// these should not crop up after typeck
cx.diag.handler().bug("cannot encode region variables");
}
}
}
fn enc_bound_region(w: &mut SeekableMemWriter, cx: &ctxt, br: ty::BoundRegion) {
match br {
ty::BrAnon(idx) => {
mywrite!(w, "a{}|", idx);
}
ty::BrNamed(d, name) => {
mywrite!(w, "[{}|{}]",
(cx.ds)(d),
token::get_name(name));
}
ty::BrFresh(id) => {
mywrite!(w, "f{}|", id);
}
}
}
pub fn enc_trait_ref(w: &mut SeekableMemWriter, cx: &ctxt, s: &ty::TraitRef) {
mywrite!(w, "{}|", (cx.ds)(s.def_id));
enc_substs(w, cx, &s.substs);
}
pub fn enc_trait_store(w: &mut SeekableMemWriter, cx: &ctxt, s: ty::TraitStore) {
match s {
ty::UniqTraitStore => mywrite!(w, "~"),
ty::RegionTraitStore(re, m) => {
mywrite!(w, "&");
enc_region(w, cx, re);
enc_mutability(w, m);
}
}
}
fn enc_sty(w: &mut SeekableMemWriter, cx: &ctxt, st: &ty::sty) {
match *st {
ty::ty_nil => mywrite!(w, "n"),
ty::ty_bot => mywrite!(w, "z"),
ty::ty_bool => mywrite!(w, "b"),
ty::ty_char => mywrite!(w, "c"),
ty::ty_int(t) => {
match t {
TyI => mywrite!(w, "i"),
TyI8 => mywrite!(w, "MB"),
TyI16 => mywrite!(w, "MW"),
TyI32 => mywrite!(w, "ML"),
TyI64 => mywrite!(w, "MD")
}
}
ty::ty_uint(t) => {
match t {
TyU => mywrite!(w, "u"),
TyU8 => mywrite!(w, "Mb"),
TyU16 => mywrite!(w, "Mw"),
TyU32 => mywrite!(w, "Ml"),
TyU64 => mywrite!(w, "Md")
}
}
ty::ty_float(t) => {
match t {
TyF32 => mywrite!(w, "Mf"),
TyF64 => mywrite!(w, "MF"),
}
}
ty::ty_enum(def, ref substs) => {
mywrite!(w, "t[{}|", (cx.ds)(def));
enc_substs(w, cx, substs);
mywrite!(w, "]");
}
ty::ty_trait(box ty::TyTrait {
def_id,
ref substs,
ref bounds
}) => {
mywrite!(w, "x[{}|", (cx.ds)(def_id));
enc_substs(w, cx, substs);
enc_existential_bounds(w, cx, bounds);
mywrite!(w, "]");
}
ty::ty_tup(ref ts) => {
mywrite!(w, "T[");
for t in ts.iter() { enc_ty(w, cx, *t); }
mywrite!(w, "]");
}
ty::ty_box(typ) => { mywrite!(w, "@"); enc_ty(w, cx, typ); }
ty::ty_uniq(typ) => { mywrite!(w, "~"); enc_ty(w, cx, typ); }
ty::ty_ptr(mt) => { mywrite!(w, "*"); enc_mt(w, cx, mt); }
ty::ty_rptr(r, mt) => {
mywrite!(w, "&");
enc_region(w, cx, r);
enc_mt(w, cx, mt);
}
ty::ty_vec(t, sz) => {
mywrite!(w, "V");
enc_ty(w, cx, t);
mywrite!(w, "/");
match sz {
Some(n) => mywrite!(w, "{}|", n),
None => mywrite!(w, "|"),
}
}
ty::ty_str => {
mywrite!(w, "v");
}
ty::ty_closure(ref f) => {
mywrite!(w, "f");
enc_closure_ty(w, cx, &**f);
}
ty::ty_bare_fn(ref f) => {
mywrite!(w, "F");
enc_bare_fn_ty(w, cx, f);
}
ty::ty_infer(_) => {
cx.diag.handler().bug("cannot encode inference variable types");
}
ty::ty_param(ParamTy {space, idx: id, def_id: did}) => {
mywrite!(w, "p{}|{}|{}|", (cx.ds)(did), id, space.to_uint())
}
ty::ty_struct(def, ref substs) => {
mywrite!(w, "a[{}|", (cx.ds)(def));
enc_substs(w, cx, substs);
mywrite!(w, "]");
}
ty::ty_unboxed_closure(def, region) => {
mywrite!(w, "k{}", (cx.ds)(def));
enc_region(w, cx, region);
}
ty::ty_err => {
mywrite!(w, "e");
}
ty::ty_open(_) => {
cx.diag.handler().bug("unexpected type in enc_sty (ty_open)");
}
}
}
fn enc_fn_style(w: &mut SeekableMemWriter, p: FnStyle) {
match p {
NormalFn => mywrite!(w, "n"),
UnsafeFn => mywrite!(w, "u"),
}
}
fn enc_abi(w: &mut SeekableMemWriter, abi: Abi) {
mywrite!(w, "[");
mywrite!(w, "{}", abi.name());
mywrite!(w, "]")
}
fn enc_onceness(w: &mut SeekableMemWriter, o: Onceness) {
match o {
Once => mywrite!(w, "o"),
Many => mywrite!(w, "m")
}
}
pub fn enc_bare_fn_ty(w: &mut SeekableMemWriter, cx: &ctxt, ft: &ty::BareFnTy) {
enc_fn_style(w, ft.fn_style);
enc_abi(w, ft.abi);
enc_fn_sig(w, cx, &ft.sig);
}
pub fn enc_closure_ty(w: &mut SeekableMemWriter, cx: &ctxt, ft: &ty::ClosureTy) {
enc_fn_style(w, ft.fn_style);
enc_onceness(w, ft.onceness);
enc_trait_store(w, cx, ft.store);
enc_existential_bounds(w, cx, &ft.bounds);
enc_fn_sig(w, cx, &ft.sig);
enc_abi(w, ft.abi);
}
fn enc_fn_sig(w: &mut SeekableMemWriter, cx: &ctxt, fsig: &ty::FnSig) {
mywrite!(w, "[{}|", fsig.binder_id);
for ty in fsig.inputs.iter() {
enc_ty(w, cx, *ty);
}
mywrite!(w, "]");
if fsig.variadic {
mywrite!(w, "V");
} else {
mywrite!(w, "N");
}
enc_ty(w, cx, fsig.output);
}
pub fn enc_builtin_bounds(w: &mut SeekableMemWriter, _cx: &ctxt, bs: &ty::BuiltinBounds) {
for bound in bs.iter() {
match bound {
ty::BoundSend => mywrite!(w, "S"),
ty::BoundSized => mywrite!(w, "Z"),
ty::BoundCopy => mywrite!(w, "P"),
ty::BoundSync => mywrite!(w, "T"),
}
}
mywrite!(w, ".");
}
pub fn enc_existential_bounds(w: &mut SeekableMemWriter, cx: &ctxt, bs: &ty::ExistentialBounds) {
enc_region(w, cx, bs.region_bound);
enc_builtin_bounds(w, cx, &bs.builtin_bounds);
}
pub fn enc_bounds(w: &mut SeekableMemWriter, cx: &ctxt, bs: &ty::ParamBounds) {
enc_builtin_bounds(w, cx, &bs.builtin_bounds);
for &r in bs.region_bounds.iter() {
mywrite!(w, "R");
enc_region(w, cx, r);
}
for tp in bs.trait_bounds.iter() {
mywrite!(w, "I");
enc_trait_ref(w, cx, &**tp);
}
mywrite!(w, ".");
}
pub fn enc_type_param_def(w: &mut SeekableMemWriter, cx: &ctxt, v: &ty::TypeParameterDef) {
mywrite!(w, "{}:{}|{}|{}|",
token::get_ident(v.ident), (cx.ds)(v.def_id),
v.space.to_uint(), v.index);
enc_opt(w, v.associated_with, |w, did| mywrite!(w, "{}", (cx.ds)(did)));
mywrite!(w, "|");
enc_bounds(w, cx, &v.bounds);
enc_opt(w, v.default, |w, t| enc_ty(w, cx, t));
}<|fim▁end|>
| |
<|file_name|>condvar.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use prelude::v1::*;
use cell::UnsafeCell;
use libc::{self, DWORD};
use sys::c;
use sys::mutex::{self, Mutex};
use sys::os;
use time::Duration;
pub struct Condvar { inner: UnsafeCell<c::CONDITION_VARIABLE> }
unsafe impl Send for Condvar {}
unsafe impl Sync for Condvar {}
impl Condvar {
pub const fn new() -> Condvar {
Condvar { inner: UnsafeCell::new(c::CONDITION_VARIABLE_INIT) }
}
#[inline]
pub unsafe fn wait(&self, mutex: &Mutex) {
let r = c::SleepConditionVariableSRW(self.inner.get(),
mutex::raw(mutex),
libc::INFINITE,
0);
debug_assert!(r != 0);
}
pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
let r = c::SleepConditionVariableSRW(self.inner.get(),
mutex::raw(mutex),
super::dur2timeout(dur),
0);
if r == 0 {
const ERROR_TIMEOUT: DWORD = 0x5B4;
debug_assert_eq!(os::errno() as usize, ERROR_TIMEOUT as usize);
false
} else {
true
}<|fim▁hole|> }
#[inline]
pub unsafe fn notify_one(&self) {
c::WakeConditionVariable(self.inner.get())
}
#[inline]
pub unsafe fn notify_all(&self) {
c::WakeAllConditionVariable(self.inner.get())
}
pub unsafe fn destroy(&self) {
// ...
}
}<|fim▁end|>
| |
<|file_name|>dc_algorithm_rebeca.hpp<|end_file_name|><|fim▁begin|>////////////////////////////////////////////////////////////////////////////////
// Author: Thomas Arndt [email protected]
// This file is part of CYTHAR Sequenzer
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
////////////////////////////////////////////////////////////////////////////////
#ifndef DC_ALGROITHM_REBECA_HPP_INCLUDED
#define DC_ALGROITHM_REBECA_HPP_INCLUDED
#include <dc_objectstructure.h>
#include <dc_objectobserver.h>
#include <stddef.h>
#include <vector>
#ifdef DEBUG
#include <iostream>
#endif // DEBUG
namespace dc_algorithm{
using namespace dc_objects;
static inline int calc_one_repeat_step(int const left_hand_basevalue__,
OPERATOR const _operator_,
int const operand__) noexcept
{
switch (_operator_)
{
case OPERATOR::NOT:
return left_hand_basevalue__;
case OPERATOR::PLUS:
return left_hand_basevalue__+operand__;
case OPERATOR::MINUS:
return left_hand_basevalue__-operand__ ;
case OPERATOR::MULTI:
if(operand__ != 0)
return left_hand_basevalue__*operand__;
/*else*/
return left_hand_basevalue__;
case OPERATOR::DIV:
if(operand__ != 0)
return left_hand_basevalue__/operand__;
/*else*/
return left_hand_basevalue__;
case OPERATOR::MODULA:
if(operand__ != 0)
return left_hand_basevalue__%operand__;
/*else*/
return left_hand_basevalue__;
default:
return 0;
}
}
typedef std::vector<int> TYPE_series;
//! Berechnet eine einfache Serie
static inline TYPE_series calc_a_series(int const left_hand_basevalue__,
OPERATOR const _operator_,
int const operand__,
size_t const repeat_steps__) noexcept
{
TYPE_series results_serie{};
if(repeat_steps__ == 0)
return results_serie;
int repeat_step_result_or_base{left_hand_basevalue__};
for(size_t repeat_step{0} ; repeat_step != repeat_steps__ ; ++repeat_step)
{
repeat_step_result_or_base = calc_one_repeat_step(repeat_step_result_or_base,
_operator_,
operand__);
results_serie.push_back(repeat_step_result_or_base);
}
return results_serie;
}
//! Verrechnet zwei Serien per Eintrag mit _operator_ zu neuer Serie
inline static TYPE_series calc_two_series(TYPE_series const& left_hand_serie__,
OPERATOR const _operator_,
TYPE_series const& operand_serie__) noexcept
{
#ifdef DEBUG
if(left_hand_serie__.size() != operand_serie__.size())
std::cout << "BUG: Series incompatible!" << std::endl;
#endif // DEBUG
TYPE_series results_serie{};
for(size_t repeat_step{0};
repeat_step != left_hand_serie__.size();
++repeat_step)
{
results_serie.push_back(calc_one_repeat_step(left_hand_serie__[repeat_step],
_operator_,
operand_serie__[repeat_step]));
}
return std::move(results_serie);
}
//! Verrechnet Serien mit der Eintrags Id = Repeatstep
inline static TYPE_series calc_serie_by_index(TYPE_series const& left_hand_serie__,
OPERATOR const _operator_) noexcept
{
TYPE_series results_serie{};
for(size_t repeat_step{0};
repeat_step != left_hand_serie__.size();
++repeat_step)
{
results_serie.push_back(calc_one_repeat_step(left_hand_serie__[repeat_step],
_operator_,<|fim▁hole|> }
//! Berechnung von Rebeca für Interval
static inline TYPE_series calc_rebeca_for_interval(dc_objects::Step const* step__)
{
//auto delay(step__->get_property<StepPropertiesIndex::DELAY>());
auto repeat_steps(step__->get_property<StepPropertiesIndex::REPEAT_TIMES>());
auto interval(step__->get_property<StepPropertiesIndex::INTERVAL>());
auto operator_(step__->get_property<StepPropertiesIndex::INTERVAL_OPERATOR>());
auto operand_(step__->get_property<StepPropertiesIndex::INTERVAL_OPERAND>());
return calc_a_series(interval,
static_cast<OPERATOR>(operator_),
operand_,
repeat_steps);
}
//! Berechnung von Rebeca für einfache Offset Serie
static inline TYPE_series calc_rebeca_for_offset(dc_objects::Step const* step__)
{
//auto delay(step__->get_property<StepPropertiesIndex::DELAY>());
auto repeat_steps(step__->get_property<StepPropertiesIndex::REPEAT_TIMES>());
auto offset(step__->get_property<StepPropertiesIndex::OFFSET>());
auto operator_(step__->get_property<StepPropertiesIndex::OFFSET_OPERATOR>());
auto operand_(step__->get_property<StepPropertiesIndex::OFFSET_OPERAND>());
return calc_a_series(offset,
static_cast<OPERATOR>(operator_),
operand_,
repeat_steps);
}
//! Berechnung von Rebeca für einfache Velcocity Serie
static inline TYPE_series calc_rebeca_for_velocity(dc_objects::Step const* step__)
{
//auto delay(step__->get_property<StepPropertiesIndex::DELAY>());
auto repeat_steps(step__->get_property<StepPropertiesIndex::REPEAT_TIMES>());
auto velocity(step__->get_property<StepPropertiesIndex::VELOCITY>());
auto operator_(step__->get_property<StepPropertiesIndex::VELOCITY_OPERATOR>());
auto operand_(step__->get_property<StepPropertiesIndex::VELOCITY_OPERAND>());
return calc_a_series(velocity,
static_cast<OPERATOR>(operator_),
operand_,
repeat_steps);
}
//! Berechnung von Rebeca für Length
static inline TYPE_series calc_rebeca_for_length(dc_objects::Step const* step__)
{
//auto delay(step__->get_property<StepPropertiesIndex::DELAY>());
auto repeat_steps(step__->get_property<StepPropertiesIndex::REPEAT_TIMES>());
auto length(step__->get_property<StepPropertiesIndex::LENGTH>());
auto operator_(step__->get_property<StepPropertiesIndex::LENGTH_OPERATOR>());
auto operand_(step__->get_property<StepPropertiesIndex::LENGTH_OPERAND>());
return calc_a_series(length,
static_cast<OPERATOR>(operator_),
operand_,
repeat_steps);
}
template<int MIN, int MAX>
static inline TYPE_series fit_series_to_range(TYPE_series serie__) noexcept
{
for(size_t i{0} ; i!= serie__.size() ; ++i)
{
if(serie__[i] < MIN)
{
serie__[i] = MIN;
continue;
}
if(serie__[i] > MAX)
serie__[i] = MAX;
}
return std::move(serie__);
}
//! Berechnung zwischen Offset-Interval & Repeat serie
static inline TYPE_series calc_rebeca_for_offet_complete(dc_objects::Step const* step__)
{
// Zwischenrechnung
auto offset2interval_serie(calc_two_series(calc_rebeca_for_offset(step__), //! atNOT will pass offset.
static_cast<OPERATOR>(step__->get_property<StepPropertiesIndex::OFFSET_2_INTERVAL_OPERATOR>()),
calc_rebeca_for_interval(step__)));
// Final Result
auto offset_series(calc_serie_by_index(offset2interval_serie,
static_cast<OPERATOR>(step__->get_property<StepPropertiesIndex::OFFSET_INTERVAL_2_REPEAT_OPERATOR>())));
// Abschnippel?
if(step__->get_property<StepPropertiesIndex::OFFSET_FIT_TO_RANGE>() == true)
offset_series = fit_series_to_range<OFFSET_MIN,OFFSET_MAX>(std::move(offset_series));
return offset_series;
}
//! Berechnung zwischen Velocity-Interval & Repeat serie
static inline TYPE_series calc_rebeca_for_velocity_complete(dc_objects::Step const* step__)
{
// Zwischenrechnung
auto velocity2interval_serie(calc_two_series(calc_rebeca_for_velocity(step__), //! atNOT will pass velocity.
static_cast<OPERATOR>(step__->get_property<StepPropertiesIndex::VELOCITY_2_INTERVAL_OPERATOR>()),
calc_rebeca_for_interval(step__)));
// Final Result
auto velocity_series(calc_serie_by_index(velocity2interval_serie,
static_cast<OPERATOR>(step__->get_property<StepPropertiesIndex::VELOCITY_INTERVAL_2_REPEAT_OPERATOR>())));
// Abschnippel?
if(step__->get_property<StepPropertiesIndex::VELOCITY_FIT_TO_RANGE>() == true)
velocity_series = fit_series_to_range<VELOCITY_MIN,VELOCITY_MAX>(std::move(velocity_series));
return velocity_series;
}
//! Berechnung zwischen Length-Interval & Repeat serie
static inline TYPE_series calc_rebeca_for_length_complete(dc_objects::Step const* step__)
{
// Zwischenrechnung
auto length2interval_serie(calc_two_series(calc_rebeca_for_length(step__), //! atNOT will pass length.
static_cast<OPERATOR>(step__->get_property<StepPropertiesIndex::LENGTH_2_INTERVAL_OPERATOR>()),
calc_rebeca_for_interval(step__)));
// Final Result
auto length_series(calc_serie_by_index(length2interval_serie,
static_cast<OPERATOR>(step__->get_property<StepPropertiesIndex::LENGTH_INTERVAL_2_REPEAT_OPERATOR>())));
// Abschnippel?
if(step__->get_property<StepPropertiesIndex::LENGTH_FIT_TO_RANGE>() == true)
length_series = fit_series_to_range<LENGTH_MIN,LENGTH_MAX>(std::move(length_series));
return length_series;
}
}
#endif // DC_ALGROITHM_REBECA_HPP_INCLUDED<|fim▁end|>
|
repeat_step));
}
return std::move(results_serie);
|
<|file_name|>dir_72c031272133aec1d916095cf903ecf1.js<|end_file_name|><|fim▁begin|>var dir_72c031272133aec1d916095cf903ecf1 =
[<|fim▁hole|> [ "HTTP", "dir_7e5fd1ff9265fa651882e3ad4d93cc88.html", "dir_7e5fd1ff9265fa651882e3ad4d93cc88" ],
[ "Module", "dir_10705d0e5b4538c0e815cbe3b6497638.html", "dir_10705d0e5b4538c0e815cbe3b6497638" ]
];<|fim▁end|>
| |
<|file_name|>test_inbound.py<|end_file_name|><|fim▁begin|>##
# Copyright (c) 2008-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twisted.internet.defer import inlineCallbacks, succeed
from twisted.internet import reactor
from twisted.python.modules import getModule
from twisted.trial import unittest
from twistedcaldav.config import ConfigDict
from twistedcaldav.ical import Component
from txdav.caldav.datastore.scheduling.imip.inbound import IMIPReplyWork
from txdav.caldav.datastore.scheduling.imip.inbound import MailReceiver
from txdav.caldav.datastore.scheduling.imip.inbound import MailRetriever
from txdav.caldav.datastore.scheduling.imip.inbound import injectMessage
from txdav.caldav.datastore.scheduling.imip.inbound import shouldDeleteAllMail
from txdav.caldav.datastore.scheduling.imip.inbound import IMAP4DownloadProtocol
from txdav.common.datastore.test.util import CommonCommonTests
from twext.enterprise.jobqueue import JobItem
import email
class InboundTests(CommonCommonTests, unittest.TestCase):
@inlineCallbacks
def setUp(self):
super(InboundTests, self).setUp()
yield self.buildStoreAndDirectory()
self.receiver = MailReceiver(self.store, self.directory)
self.retriever = MailRetriever(
self.store, self.directory,
ConfigDict({
"Type" : "pop",
"UseSSL" : False,
"Server" : "example.com",
"Port" : 123,
"Username" : "xyzzy",
})
)
def decorateTransaction(txn):
txn._mailRetriever = self.retriever
self.store.callWithNewTransactions(decorateTransaction)
module = getModule(__name__)
self.dataPath = module.filePath.sibling("data")
def dataFile(self, name):
"""
Get the contents of a given data file from the 'data/mail' test
fixtures directory.
"""
return self.dataPath.child(name).getContent()
def test_checkDSNFailure(self):
data = {
'good_reply' : (False, None, None),
'dsn_failure_no_original' : (True, 'failed', None),
'dsn_failure_no_ics' : (True, 'failed', None),
'dsn_failure_with_ics' : (True, 'failed', '''BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
METHOD:REQUEST
PRODID:-//example Inc.//iCal 3.0//EN
BEGIN:VTIMEZONE
TZID:US/Pacific
BEGIN:STANDARD
DTSTART:20071104T020000
RRULE:FREQ=YEARLY;BYMONTH=11;BYDAY=1SU
TZNAME:PST
TZOFFSETFROM:-0700
TZOFFSETTO:-0800
END:STANDARD
BEGIN:DAYLIGHT
DTSTART:20070311T020000
RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=2SU
TZNAME:PDT
TZOFFSETFROM:-0800
TZOFFSETTO:-0700
END:DAYLIGHT
END:VTIMEZONE
BEGIN:VEVENT
UID:1E71F9C8-AEDA-48EB-98D0-76E898F6BB5C
DTSTART;TZID=US/Pacific:20080812T094500
DTEND;TZID=US/Pacific:20080812T104500
ATTENDEE;CUTYPE=INDIVIDUAL;CN=User 01;PARTSTAT=ACCEPTED:mailto:user01@exam
ple.com
ATTENDEE;CUTYPE=INDIVIDUAL;RSVP=TRUE;ROLE=REQ-PARTICIPANT;PARTSTAT=NEEDS-A
CTION;[email protected]:mailto:[email protected]
CREATED:20080812T191857Z
DTSTAMP:20080812T191932Z
ORGANIZER;CN=User 01:mailto:xyzzy+8e16b897-d544-4217-88e9-a363d08
[email protected]
SEQUENCE:2
SUMMARY:New Event
TRANSP:OPAQUE
END:VEVENT
END:VCALENDAR
'''),
}
for filename, expected in data.iteritems():
msg = email.message_from_string(self.dataFile(filename))
self.assertEquals(self.receiver.checkDSN(msg), expected)
@inlineCallbacks
def test_processDSN(self):
template = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
METHOD:REQUEST
PRODID:-//example Inc.//iCal 3.0//EN
BEGIN:VTIMEZONE
TZID:US/Pacific
BEGIN:DAYLIGHT
DTSTART:20070311T020000
RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=2SU
TZNAME:PDT
TZOFFSETFROM:-0800
TZOFFSETTO:-0700
END:DAYLIGHT
BEGIN:STANDARD
DTSTART:20071104T020000
RRULE:FREQ=YEARLY;BYMONTH=11;BYDAY=1SU
TZNAME:PST
TZOFFSETFROM:-0700
TZOFFSETTO:-0800
END:STANDARD
END:VTIMEZONE
BEGIN:VEVENT
UID:1E71F9C8-AEDA-48EB-98D0-76E898F6BB5C
DTSTART;TZID=US/Pacific:20080812T094500
DTEND;TZID=US/Pacific:20080812T104500
ATTENDEE;CUTYPE=INDIVIDUAL;CN=User 01;PARTSTAT=ACCEPTED:mailto:user01@exam
ple.com
ATTENDEE;CUTYPE=INDIVIDUAL;RSVP=TRUE;ROLE=REQ-PARTICIPANT;PARTSTAT=NEEDS-A
CTION;[email protected]:mailto:[email protected]
CREATED:20080812T191857Z
DTSTAMP:20080812T191932Z
ORGANIZER;CN=User 01:mailto:xyzzy+%[email protected]
SEQUENCE:2
SUMMARY:New Event
TRANSP:OPAQUE
END:VEVENT
END:VCALENDAR
"""
# Make sure an unknown token is not processed
calBody = template % "bogus_token"
self.assertEquals(
(yield self.receiver.processDSN(calBody, "xyzzy")),
MailReceiver.UNKNOWN_TOKEN
)
# Make sure a known token *is* processed
txn = self.store.newTransaction()
record = (yield txn.imipCreateToken(
"urn:x-uid:5A985493-EE2C-4665-94CF-4DFEA3A89500",
"mailto:[email protected]",
"1E71F9C8-AEDA-48EB-98D0-76E898F6BB5C"
))
yield txn.commit()
calBody = template % record.token
result = (yield self.receiver.processDSN(calBody, "xyzzy"))
self.assertEquals(result, MailReceiver.INJECTION_SUBMITTED)
yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60)
@inlineCallbacks
def test_processReply(self):
# Make sure an unknown token in an older email is deleted
msg = email.message_from_string(self.dataFile('good_reply_past'))
result = (yield self.receiver.processReply(msg))
self.assertEquals(result, MailReceiver.UNKNOWN_TOKEN_OLD)
# Make sure an unknown token is not processed
msg = email.message_from_string(self.dataFile('good_reply_future'))
result = (yield self.receiver.processReply(msg))
self.assertEquals(result, MailReceiver.UNKNOWN_TOKEN)
# Make sure a known token *is* processed
txn = self.store.newTransaction()
yield txn.imipCreateToken(
"urn:x-uid:5A985493-EE2C-4665-94CF-4DFEA3A89500",
"mailto:[email protected]",
"1E71F9C8-AEDA-48EB-98D0-76E898F6BB5C",
token="d7cdf68d-8b73-4df1-ad3b-f08002fb285f"
)
yield txn.commit()
result = (yield self.receiver.processReply(msg))
self.assertEquals(result, MailReceiver.INJECTION_SUBMITTED)
yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60)
@inlineCallbacks
def test_processReplyMissingOrganizer(self):
msg = email.message_from_string(self.dataFile('reply_missing_organizer'))
# stick the token in the database first
txn = self.store.newTransaction()
yield txn.imipCreateToken(
"urn:x-uid:5A985493-EE2C-4665-94CF-4DFEA3A89500",
"mailto:[email protected]",
"1E71F9C8-AEDA-48EB-98D0-76E898F6BB5C",
token="d7cdf68d-8b73-4df1-ad3b-f08002fb285f"
)
yield txn.commit()
result = (yield self.receiver.processReply(msg))
self.assertEquals(result, MailReceiver.INJECTION_SUBMITTED)
yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60)
@inlineCallbacks
def test_processReplyMissingAttendee(self):
msg = email.message_from_string(self.dataFile('reply_missing_attendee'))
txn = self.store.newTransaction()
yield txn.imipCreateToken(
"urn:x-uid:5A985493-EE2C-4665-94CF-4DFEA3A89500",
"mailto:[email protected]",
"1E71F9C8-AEDA-48EB-98D0-76E898F6BB5C",
token="d7cdf68d-8b73-4df1-ad3b-f08002fb285f"
)
yield txn.commit()
result = (yield self.receiver.processReply(msg))
self.assertEquals(result, MailReceiver.INJECTION_SUBMITTED)
yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60)
@inlineCallbacks
def test_processReplyMissingAttachment(self):
msg = email.message_from_string(
self.dataFile('reply_missing_attachment')
)
# stick the token in the database first
txn = self.store.newTransaction()
yield txn.imipCreateToken(
"urn:x-uid:5A985493-EE2C-4665-94CF-4DFEA3A89500",
"mailto:[email protected]",
"1E71F9C8-AEDA-48EB-98D0-76E898F6BB5C",
token="d7cdf68d-8b73-4df1-ad3b-f08002fb285f"
)
yield txn.commit()
result = (yield self.receiver.processReply(msg))
self.assertEquals(result, MailReceiver.REPLY_FORWARDED_TO_ORGANIZER)
yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60)
@inlineCallbacks
def test_injectMessage(self):
calendar = Component.fromString("""BEGIN:VCALENDAR
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
VERSION:2.0
METHOD:REPLY
BEGIN:VEVENT
UID:12345-67890
DTSTAMP:20130208T120000Z
DTSTART:20180601T120000Z
DTEND:20180601T130000Z
ORGANIZER:urn:x-uid:user01
ATTENDEE:mailto:[email protected];PARTSTAT=ACCEPTED
END:VEVENT
END:VCALENDAR
""")
txn = self.store.newTransaction()
result = (yield injectMessage(
txn,
"urn:x-uid:user01",
"mailto:[email protected]",
calendar
))
yield txn.commit()
self.assertEquals(
"1.2;Scheduling message has been delivered",
result.responses[0].reqstatus.toString()
)
@inlineCallbacks
def test_injectMessageWithError(self):
calendar = Component.fromString("""BEGIN:VCALENDAR
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
VERSION:2.0
METHOD:REPLY
BEGIN:VEVENT
UID:12345-67890
DTSTAMP:20130208T120000Z
DTSTART:20180601T120000Z
DTEND:20180601T130000Z
ORGANIZER:urn:x-uid:unknown_user
ATTENDEE:mailto:[email protected];PARTSTAT=ACCEPTED
END:VEVENT
END:VCALENDAR
""")
txn = self.store.newTransaction()
result = (yield injectMessage(
txn,
"urn:x-uid:unknown_user",
"mailto:[email protected]",
calendar
))
yield txn.commit()
self.assertEquals(
"3.7;Invalid Calendar User",
result.responses[0].reqstatus.toString()
)
@inlineCallbacks
def test_work(self):<|fim▁hole|>
calendar = """BEGIN:VCALENDAR
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
VERSION:2.0
METHOD:REPLY
BEGIN:VEVENT
UID:12345-67890
DTSTAMP:20130208T120000Z
DTSTART:20180601T120000Z
DTEND:20180601T130000Z
ORGANIZER:urn:x-uid:user01
ATTENDEE:mailto:[email protected];PARTSTAT=ACCEPTED
END:VEVENT
END:VCALENDAR
"""
txn = self.store.newTransaction()
yield txn.enqueue(
IMIPReplyWork,
organizer="urn:x-uid:user01",
attendee="mailto:[email protected]",
icalendarText=calendar
)
yield txn.commit()
yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60)
def test_shouldDeleteAllMail(self):
# Delete if the mail server is on the same host and using our
# dedicated account:
self.assertTrue(shouldDeleteAllMail(
"calendar.example.com",
"calendar.example.com",
"com.apple.calendarserver"
))
self.assertTrue(shouldDeleteAllMail(
"calendar.example.com",
"localhost",
"com.apple.calendarserver"
))
# Don't delete all otherwise:
self.assertFalse(shouldDeleteAllMail(
"calendar.example.com",
"calendar.example.com",
"not_ours"
))
self.assertFalse(shouldDeleteAllMail(
"calendar.example.com",
"localhost",
"not_ours"
))
self.assertFalse(shouldDeleteAllMail(
"calendar.example.com",
"mail.example.com",
"com.apple.calendarserver"
))
@inlineCallbacks
def test_deletion(self):
"""
Verify the IMAP protocol will delete messages only when the right
conditions are met. Either:
A) We've been told to delete all mail
B) We've not been told to delete all mail, but it was a message
we processed
"""
def stubFetchNextMessage():
pass
def stubCbFlagDeleted(result):
self.flagDeletedResult = result
return succeed(None)
proto = IMAP4DownloadProtocol()
self.patch(proto, "fetchNextMessage", stubFetchNextMessage)
self.patch(proto, "cbFlagDeleted", stubCbFlagDeleted)
results = {
"ignored" : (
{
"RFC822" : "a message"
}
)
}
# Delete all mail = False; action taken = submitted; result = deletion
proto.factory = StubFactory(MailReceiver.INJECTION_SUBMITTED, False)
self.flagDeletedResult = None
yield proto.cbGotMessage(results, "xyzzy")
self.assertEquals(self.flagDeletedResult, "xyzzy")
# Delete all mail = False; action taken = not submitted; result = no deletion
proto.factory = StubFactory(MailReceiver.NO_TOKEN, False)
self.flagDeletedResult = None
yield proto.cbGotMessage(results, "xyzzy")
self.assertEquals(self.flagDeletedResult, None)
# Delete all mail = True; action taken = submitted; result = deletion
proto.factory = StubFactory(MailReceiver.INJECTION_SUBMITTED, True)
self.flagDeletedResult = None
yield proto.cbGotMessage(results, "xyzzy")
self.assertEquals(self.flagDeletedResult, "xyzzy")
# Delete all mail = True; action taken = not submitted; result = deletion
proto.factory = StubFactory(MailReceiver.NO_TOKEN, True)
self.flagDeletedResult = None
yield proto.cbGotMessage(results, "xyzzy")
self.assertEquals(self.flagDeletedResult, "xyzzy")
@inlineCallbacks
def test_missingIMAPMessages(self):
"""
Make sure L{IMAP4DownloadProtocol.cbGotMessage} can deal with missing messages.
"""
class DummyResult(object):
def __init__(self):
self._values = []
def values(self):
return self._values
noResult = DummyResult()
missingKey = DummyResult()
missingKey.values().append({})
imap4 = IMAP4DownloadProtocol()
imap4.messageUIDs = []
imap4.fetchNextMessage = lambda : None
result = yield imap4.cbGotMessage(noResult, [])
self.assertTrue(result is None)
result = yield imap4.cbGotMessage(missingKey, [])
self.assertTrue(result is None)
class StubFactory(object):
def __init__(self, actionTaken, deleteAllMail):
self.actionTaken = actionTaken
self.deleteAllMail = deleteAllMail
def handleMessage(self, messageData):
return succeed(self.actionTaken)<|fim▁end|>
| |
<|file_name|>mocks.py<|end_file_name|><|fim▁begin|>""" test """
import logging
import os
import tempfile
import sys
import random
from bzt.engine import Engine, Configuration, FileLister
from bzt.utils import load_class
from bzt.engine import Provisioning, ScenarioExecutor, Reporter, AggregatorListener
from bzt.modules.aggregator import ResultsReader
from tests import random_sample
try:
from exceptions import KeyboardInterrupt
except ImportError:
from builtins import KeyboardInterrupt
class EngineEmul(Engine):
"""
emulating engine
"""
def __init__(self):
Engine.__init__(self, logging.getLogger(''))
self.artifacts_base_dir = os.path.dirname(__file__) + "/../build/test"
self._create_artifacts_dir()
self.finalize_exc = None
self.was_finalize = False
def _shutdown(self):
return super(EngineEmul, self)._shutdown()
def dump_config(self):
""" test """
fname = tempfile.mkstemp()[1]
self.config.dump(fname, Configuration.JSON)
with open(fname) as fh:
logging.debug("JSON:\n%s", fh.read())
class ModuleMock(ScenarioExecutor, Provisioning, Reporter, FileLister):
""" mock """
def __init__(self):
super(ModuleMock, self).__init__()
self.postproc_exc = None
self.check_exc = None
self.prepare_exc = None
self.startup_exc = None
self.shutdown_exc = None
self.check_iterations = sys.maxsize
self.was_shutdown = False
self.was_startup = False
self.was_prepare = False
self.was_check = False
self.was_postproc = False
def prepare(self):
"""
:raise self.prepare_exc:
"""
self.log.info("Preparing mock")
self.was_prepare = True
self.check_iterations = int(self.settings.get('check_iterations', "2"))
self.postproc_exc = self.get_exc("postproc")
self.check_exc = self.get_exc("check")
self.prepare_exc = self.get_exc("prepare")
self.startup_exc = self.get_exc("startup")
self.shutdown_exc = self.get_exc("shutdown")
if isinstance(self.engine.aggregator, ResultsReader):
reader = MockReader()
for num in range(0, self.check_iterations):
for quan in range(0, int(random.random() * 10)):
reader.data.append(random_sample(num))
self.engine.aggregator.add_reader(reader)
if self.prepare_exc:
raise self.prepare_exc
<|fim▁hole|> :raise self.startup_exc:
"""
self.log.info("Startup mock")
self.was_startup = True
if self.startup_exc:
raise self.startup_exc
def check(self):
"""
:return: :raise self.check_exc:
"""
self.was_check = True
self.log.info("Checks remaining: %s", self.check_iterations)
self.check_iterations -= 1
if not self.check_iterations:
if self.check_exc:
raise self.check_exc
else:
return True
return False
def shutdown(self):
"""
:raise self.shutdown_exc:
"""
self.log.info("Shutdown mock")
self.was_shutdown = True
if self.shutdown_exc:
raise self.shutdown_exc
def post_process(self):
"""
:raise self.postproc_exc:
"""
self.log.info("Postproc mock")
self.was_postproc = True
if self.postproc_exc:
raise self.postproc_exc
def get_exc(self, param):
"""
:type param: str
:return:
"""
name = self.settings.get(param, "")
if name:
cls = load_class(name)
return cls()
return None
def resource_files(self):
"""
:return:
"""
return [__file__]
class MockReader(ResultsReader, AggregatorListener):
"""
test
"""
def __init__(self):
super(MockReader, self).__init__()
self.results = []
self.data = []
self.add_listener(self)
self.track_percentiles = [0, 50, 90, 99, 99.5, 100]
def _read(self, final_pass=False):
"""
Emulating read samples
:type final_pass: bool
:return:
"""
while self.data:
# logging.debug("Emul read: %s", self.data[0])
yield self.data.pop(0)
def aggregated_second(self, data):
"""
Store and assert aggregate sequence
:type data: dict
:raise AssertionError:
"""
if self.results:
if self.results[-1]["ts"] >= data["ts"]:
raise AssertionError("TS sequence wrong: %s>=%s" % (self.results[-1]["ts"], data["ts"]))
logging.info("Data: %s", data)
self.results.append(data)
def download_progress_mock(blocknum, blocksize, totalsize):
pass
class ResultChecker(AggregatorListener):
def __init__(self, callback):
super(ResultChecker, self).__init__()
self.callback = callback
def aggregated_second(self, data):
self.callback(data)<|fim▁end|>
|
def startup(self):
"""
|
<|file_name|>test_mapper.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import unittest
import mock
import openerp.tests.common as common
from openerp.addons.connector.unit.mapper import (
Mapper,
ImportMapper,
ExportMapper,
ImportMapChild,
MappingDefinition,
changed_by,
only_create,
convert,
follow_m2o_relations,
m2o_to_backend,
backend_to_m2o,
none,
MapOptions,
mapping)
from openerp.addons.connector.backend import Backend
from openerp.addons.connector.connector import ConnectorEnvironment
from openerp.addons.connector.session import ConnectorSession
class test_mapper(unittest.TestCase):
""" Test Mapper """
def test_mapping_decorator(self):
class KifKrokerMapper(Mapper):
_model_name = 'res.users'
@changed_by('name', 'city')
@mapping
@only_create
def name(self):
pass
@changed_by('email')
@mapping
def email(self):
pass
@changed_by('street')
@mapping
def street(self):
pass
def no_decorator(self):
pass
self.maxDiff = None
name_def = MappingDefinition(changed_by=set(('name', 'city')),
only_create=True)
email_def = MappingDefinition(changed_by=set(('email',)),
only_create=False)
street_def = MappingDefinition(changed_by=set(('street',)),
only_create=False)
self.assertEqual(KifKrokerMapper._map_methods,
{'name': name_def,
'email': email_def,
'street': street_def,
})
def test_mapping_decorator_cross_classes(self):
""" Mappings should not propagate to other classes"""
class MomMapper(Mapper):
_model_name = 'res.users'
@changed_by('name', 'city')
@mapping
def name(self):
pass
class ZappMapper(Mapper):
_model_name = 'res.users'
@changed_by('email')
@only_create
@mapping
def email(self):
pass
mom_def = MappingDefinition(changed_by=set(('name', 'city')),
only_create=False)
zapp_def = MappingDefinition(changed_by=set(('email',)),
only_create=True)
self.assertEqual(MomMapper._map_methods,
{'name': mom_def})
self.assertEqual(ZappMapper._map_methods,
{'email': zapp_def})
def test_mapping_decorator_cumul(self):
""" Mappings should cumulate the ``super`` mappings
and the local mappings."""
class FryMapper(Mapper):
_model_name = 'res.users'
@changed_by('name', 'city')
@mapping
def name(self):
pass
class FarnsworthMapper(FryMapper):
_model_name = 'res.users'
@changed_by('email')
@mapping
def email(self):
pass
name_def = MappingDefinition(changed_by=set(('name', 'city')),
only_create=False)
email_def = MappingDefinition(changed_by=set(('email',)),
only_create=False)
self.assertEqual(FarnsworthMapper._map_methods,
{'name': name_def,
'email': email_def})
def test_mapping_decorator_cumul_changed_by(self):
""" Mappings should cumulate the changed_by fields of the
``super`` mappings and the local mappings """
class FryMapper(Mapper):
_model_name = 'res.users'
@changed_by('name', 'city')
@mapping
def name(self):
pass
class FarnsworthMapper(FryMapper):
_model_name = 'res.users'
@changed_by('email')
@mapping
def name(self):
pass
class ThirdMapper(FarnsworthMapper):
_model_name = 'res.users'
@changed_by('email', 'street')
@mapping
def name(self):
pass
name_def = MappingDefinition(changed_by=set(('name', 'city', 'email')),
only_create=False)
self.assertEqual(FarnsworthMapper._map_methods,
{'name': name_def})
name_def = MappingDefinition(changed_by=set(('name', 'city',
'email', 'street')),
only_create=False)
self.assertEqual(ThirdMapper._map_methods,
{'name': name_def})
def test_several_bases_cumul(self):
class FryMapper(Mapper):
_model_name = 'res.users'
@changed_by('name', 'city')
@mapping
def name(self):
pass
@only_create
@mapping
def street(self):
pass
@only_create
@mapping
def zip(self):
pass
class FarnsworthMapper(Mapper):
_model_name = 'res.users'
@changed_by('email')
@mapping
def name(self):
pass
@changed_by('street')
@mapping
def city(self):
pass
@mapping
def zip(self):
pass
class ThirdMapper(FryMapper, FarnsworthMapper):
_model_name = 'res.users'
@changed_by('email', 'street')
@mapping
def name(self):
pass
@mapping
def email(self):
pass
name_def = MappingDefinition(changed_by=set(('name', 'city',
'email', 'street')),
only_create=False)
street_def = MappingDefinition(changed_by=set([]),
only_create=True)
city_def = MappingDefinition(changed_by=set(('street',)),
only_create=False)
email_def = MappingDefinition(changed_by=set([]),
only_create=False)
zip_def = MappingDefinition(changed_by=set([]),
only_create=True)
self.assertEqual(ThirdMapper._map_methods['name'], name_def)
self.assertEqual(ThirdMapper._map_methods['street'], street_def)
self.assertEqual(ThirdMapper._map_methods['city'], city_def)
self.assertEqual(ThirdMapper._map_methods['email'], email_def)
self.assertEqual(ThirdMapper._map_methods['zip'], zip_def)
def test_mapping_record(self):
""" Map a record and check the result """
class MyMapper(ImportMapper):
direct = [('name', 'out_name')]
@mapping
def street(self, record):
return {'out_street': record['street'].upper()}
env = mock.MagicMock()
record = {'name': 'Guewen',
'street': 'street'}
mapper = MyMapper(env)
map_record = mapper.map_record(record)
expected = {'out_name': 'Guewen',
'out_street': 'STREET'}
self.assertEqual(map_record.values(), expected)
self.assertEqual(map_record.values(for_create=True), expected)
def test_mapping_record_on_create(self):
""" Map a record and check the result for creation of record """
class MyMapper(ImportMapper):
direct = [('name', 'out_name')]
@mapping
def street(self, record):
return {'out_street': record['street'].upper()}
@only_create
@mapping
def city(self, record):
return {'out_city': 'city'}
env = mock.MagicMock()
record = {'name': 'Guewen',
'street': 'street'}
mapper = MyMapper(env)
map_record = mapper.map_record(record)
expected = {'out_name': 'Guewen',
'out_street': 'STREET'}
self.assertEqual(map_record.values(), expected)
expected = {'out_name': 'Guewen',
'out_street': 'STREET',
'out_city': 'city'}
self.assertEqual(map_record.values(for_create=True), expected)
def test_mapping_update(self):
""" Force values on a map record """
class MyMapper(ImportMapper):
direct = [('name', 'out_name')]
@mapping
def street(self, record):
return {'out_street': record['street'].upper()}
@only_create
@mapping
def city(self, record):
return {'out_city': 'city'}
env = mock.MagicMock()
record = {'name': 'Guewen',
'street': 'street'}
mapper = MyMapper(env)
map_record = mapper.map_record(record)
map_record.update({'test': 1}, out_city='forced')
expected = {'out_name': 'Guewen',
'out_street': 'STREET',
'out_city': 'forced',
'test': 1}
self.assertEqual(map_record.values(), expected)
expected = {'out_name': 'Guewen',
'out_street': 'STREET',
'out_city': 'forced',
'test': 1}
self.assertEqual(map_record.values(for_create=True), expected)
def test_finalize(self):
""" Inherit finalize to modify values """
class MyMapper(ImportMapper):
direct = [('name', 'out_name')]
def finalize(self, record, values):
result = super(MyMapper, self).finalize(record, values)
result['test'] = 'abc'
return result
env = mock.MagicMock()
record = {'name': 'Guewen',
'street': 'street'}
mapper = MyMapper(env)
map_record = mapper.map_record(record)
expected = {'out_name': 'Guewen',
'test': 'abc'}
self.assertEqual(map_record.values(), expected)
expected = {'out_name': 'Guewen',
'test': 'abc'}
self.assertEqual(map_record.values(for_create=True), expected)
def test_some_fields(self):
""" Map only a selection of fields """
class MyMapper(ImportMapper):
direct = [('name', 'out_name'),
('street', 'out_street'),
]
@changed_by('country')
@mapping
def country(self, record):
return {'country': 'country'}
env = mock.MagicMock()
record = {'name': 'Guewen',
'street': 'street',
'country': 'country'}
mapper = MyMapper(env)
map_record = mapper.map_record(record)
expected = {'out_name': 'Guewen',
'country': 'country'}
self.assertEqual(map_record.values(fields=['name', 'country']),
expected)
expected = {'out_name': 'Guewen',
'country': 'country'}
self.assertEqual(map_record.values(for_create=True,
fields=['name', 'country']),
expected)
def test_mapping_modifier(self):
""" Map a direct record with a modifier function """
def do_nothing(field):
def transform(self, record, to_attr):
return record[field]
return transform
class MyMapper(ImportMapper):
direct = [(do_nothing('name'), 'out_name')]
env = mock.MagicMock()
record = {'name': 'Guewen'}
mapper = MyMapper(env)
map_record = mapper.map_record(record)
expected = {'out_name': 'Guewen'}
self.assertEqual(map_record.values(), expected)
self.assertEqual(map_record.values(for_create=True), expected)
def test_mapping_convert(self):
""" Map a direct record with the convert modifier function """
class MyMapper(ImportMapper):
direct = [(convert('name', int), 'out_name')]
env = mock.MagicMock()
record = {'name': '300'}
mapper = MyMapper(env)
map_record = mapper.map_record(record)
expected = {'out_name': 300}
self.assertEqual(map_record.values(), expected)
self.assertEqual(map_record.values(for_create=True), expected)
def test_mapping_modifier_none(self):
""" Pipeline of modifiers """
class MyMapper(ImportMapper):
direct = [(none('in_f'), 'out_f'),
(none('in_t'), 'out_t')]
env = mock.MagicMock()
record = {'in_f': False, 'in_t': True}
mapper = MyMapper(env)
map_record = mapper.map_record(record)
expected = {'out_f': None, 'out_t': True}
self.assertEqual(map_record.values(), expected)
self.assertEqual(map_record.values(for_create=True), expected)
def test_mapping_modifier_pipeline(self):
""" Pipeline of modifiers """
class MyMapper(ImportMapper):
direct = [(none(convert('in_f', bool)), 'out_f'),
(none(convert('in_t', bool)), 'out_t')]
env = mock.MagicMock()
record = {'in_f': 0, 'in_t': 1}
mapper = MyMapper(env)
map_record = mapper.map_record(record)
expected = {'out_f': None, 'out_t': True}
self.assertEqual(map_record.values(), expected)
self.assertEqual(map_record.values(for_create=True), expected)
def test_mapping_custom_option(self):
""" Usage of custom options in mappings """
class MyMapper(ImportMapper):
@mapping
def any(self, record):
if self.options.custom:
res = True
else:
res = False
return {'res': res}
env = mock.MagicMock()
record = {}
mapper = MyMapper(env)
map_record = mapper.map_record(record)
expected = {'res': True}
self.assertEqual(map_record.values(custom=True), expected)
def test_mapping_custom_option_not_defined(self):
""" Usage of custom options not defined raise AttributeError """
class MyMapper(ImportMapper):
@mapping
def any(self, record):
if self.options.custom is None:
res = True
else:
res = False
return {'res': res}
env = mock.MagicMock()
record = {}
mapper = MyMapper(env)
map_record = mapper.map_record(record)
expected = {'res': True}
self.assertEqual(map_record.values(), expected)
def test_map_options(self):
""" Test MapOptions """
options = MapOptions({'xyz': 'abc'}, k=1)
options.l = 2
self.assertEqual(options['xyz'], 'abc')
self.assertEqual(options['k'], 1)
self.assertEqual(options['l'], 2)
self.assertEqual(options.xyz, 'abc')
self.assertEqual(options.k, 1)
self.assertEqual(options.l, 2)
self.assertEqual(options['undefined'], None)
self.assertEqual(options.undefined, None)
def test_changed_by_fields(self):
""" Test attribute ``_changed_by_fields`` on Mapper."""
class MyExportMapper(ExportMapper):
direct = [('street', 'out_street'),
(none('in_t'), 'out_t'),
(none(convert('in_f', bool)), 'out_f')]
@changed_by('name', 'city')
@mapping
def name(self):
pass
@changed_by('email')
@mapping
def email(self):
pass
def no_decorator(self):
pass
self.assertEqual(
MyExportMapper._changed_by_fields,
set(['street', 'in_t', 'in_f', 'name', 'city', 'email']))
class test_mapper_recordsets(common.TransactionCase):
""" Test mapper with "real" records instead of mocks """
def setUp(self):
super(test_mapper_recordsets, self).setUp()
self.session = ConnectorSession(self.cr, self.uid)
self.backend = mock.Mock(wraps=Backend('x', version='y'),
name='backend')
backend_record = mock.Mock()
backend_record.get_backend.return_value = self.backend
self.connector_env = ConnectorEnvironment(
backend_record, self.session, 'res.partner')
def test_mapping_modifier_follow_m2o_relations(self):
""" Map with the follow_m2o_relations modifier """
class MyMapper(ImportMapper):
direct = [
(follow_m2o_relations('parent_id.name'), 'parent_name'),
]
partner = self.browse_ref('base.res_partner_address_4')
mapper = MyMapper(self.connector_env)
map_record = mapper.map_record(partner)
expected = {'parent_name': 'Agrolait'}
self.assertEqual(map_record.values(), expected)
self.assertEqual(map_record.values(for_create=True), expected)
class test_mapper_binding(common.TransactionCase):
""" Test Mapper with Bindings"""
def setUp(self):
super(test_mapper_binding, self).setUp()
self.session = ConnectorSession(self.cr, self.uid)
self.backend = mock.Mock(wraps=Backend('x', version='y'),
name='backend')
backend_record = mock.Mock()
backend_record.get_backend.return_value = self.backend
self.connector_env = ConnectorEnvironment(
backend_record, self.session, 'res.partner')
self.country_binder = mock.Mock(name='country_binder')
self.country_binder.return_value = self.country_binder
self.backend.get_class.return_value = self.country_binder
def test_mapping_m2o_to_backend(self):
""" Map a direct record with the m2o_to_backend modifier function """
class MyMapper(ImportMapper):
_model_name = 'res.partner'
direct = [(m2o_to_backend('country_id'), 'country')]
partner = self.env.ref('base.main_partner')
partner.write({'country_id': self.env.ref('base.ch').id})
self.country_binder.to_backend.return_value = 10
mapper = MyMapper(self.connector_env)
map_record = mapper.map_record(partner)
self.assertEqual(map_record.values(), {'country': 10})
self.country_binder.to_backend.assert_called_once_with(
partner.country_id.id, wrap=False)
def test_mapping_backend_to_m2o(self):
""" Map a direct record with the backend_to_m2o modifier function """
class MyMapper(ImportMapper):
_model_name = 'res.partner'
direct = [(backend_to_m2o('country'), 'country_id')]
record = {'country': 10}
ch = self.env.ref('base.ch')
self.country_binder.to_openerp.return_value = ch
mapper = MyMapper(self.connector_env)
map_record = mapper.map_record(record)
self.assertEqual(map_record.values(), {'country_id': ch.id})
self.country_binder.to_openerp.assert_called_once_with(
10, unwrap=False)
def test_mapping_record_children_no_map_child(self):
""" Map a record with children, using default MapChild """
backend = Backend('backend', '42')
@backend
class LineMapper(ImportMapper):
_model_name = 'res.currency.rate'
direct = [('name', 'name')]
@mapping
def price(self, record):
return {'rate': record['rate'] * 2}
@only_create
@mapping
def discount(self, record):
return {'test': .5}
@backend
class ObjectMapper(ImportMapper):
_model_name = 'res.currency'
direct = [('name', 'name')]
children = [('lines', 'line_ids', 'res.currency.rate')]
backend_record = mock.Mock()
backend_record.get_backend.side_effect = lambda *a: backend
env = ConnectorEnvironment(backend_record, self.session,
'res.currency')
record = {'name': 'SO1',
'lines': [{'name': '2013-11-07',
'rate': 10},
{'name': '2013-11-08',
'rate': 20}]}
mapper = ObjectMapper(env)
map_record = mapper.map_record(record)
expected = {'name': 'SO1',
'line_ids': [(0, 0, {'name': '2013-11-07',
'rate': 20}),
(0, 0, {'name': '2013-11-08',
'rate': 40})]
}
self.assertEqual(map_record.values(), expected)
expected = {'name': 'SO1',
'line_ids': [(0, 0, {'name': '2013-11-07',
'rate': 20,
'test': .5}),
(0, 0, {'name': '2013-11-08',
'rate': 40,
'test': .5})]
}
self.assertEqual(map_record.values(for_create=True), expected)
def test_mapping_record_children(self):
""" Map a record with children, using defined MapChild """
backend = Backend('backend', '42')
@backend
class LineMapper(ImportMapper):
_model_name = 'res.currency.rate'
direct = [('name', 'name')]
@mapping
def price(self, record):
return {'rate': record['rate'] * 2}
@only_create
@mapping
def discount(self, record):
return {'test': .5}
@backend
class SaleLineImportMapChild(ImportMapChild):
_model_name = 'res.currency.rate'
def format_items(self, items_values):
return [('ABC', values) for values in items_values]
@backend
class ObjectMapper(ImportMapper):
_model_name = 'res.currency'
direct = [('name', 'name')]
children = [('lines', 'line_ids', 'res.currency.rate')]
backend_record = mock.Mock()
backend_record.get_backend.side_effect = lambda *a: backend
env = ConnectorEnvironment(backend_record, self.session,
'res.currency')
record = {'name': 'SO1',
'lines': [{'name': '2013-11-07',
'rate': 10},
{'name': '2013-11-08',
'rate': 20}]}
mapper = ObjectMapper(env)
map_record = mapper.map_record(record)
expected = {'name': 'SO1',
'line_ids': [('ABC', {'name': '2013-11-07',
'rate': 20}),
('ABC', {'name': '2013-11-08',
'rate': 40})]
}
self.assertEqual(map_record.values(), expected)
expected = {'name': 'SO1',
'line_ids': [('ABC', {'name': '2013-11-07',
'rate': 20,
'test': .5}),
('ABC', {'name': '2013-11-08',
'rate': 40,
'test': .5})]
}
self.assertEqual(map_record.values(for_create=True), expected)
def test_modifier_import_filter_field(self):
""" A direct mapping with a modifier must still be considered
from the list of fields
"""
class MyMapper(ImportMapper):
direct = [('field', 'field2'),
('no_field', 'no_field2'),
(convert('name', int), 'out_name')]
env = mock.MagicMock()
record = {'name': '300', 'field': 'value', 'no_field': 'no_value'}
mapper = MyMapper(env)
map_record = mapper.map_record(record)
expected = {'out_name': 300, 'field2': 'value'}
self.assertEqual(map_record.values(fields=['field', 'name']), expected)
self.assertEqual(map_record.values(for_create=True,
fields=['field', 'name']), expected)
def test_modifier_export_filter_field(self):
""" A direct mapping with a modifier on an export mapping """
class MyMapper(ExportMapper):
direct = [('field', 'field2'),
('no_field', 'no_field2'),
(convert('name', int), 'out_name')]
env = mock.MagicMock()
record = {'name': '300', 'field': 'value', 'no_field': 'no_value'}
mapper = MyMapper(env)<|fim▁hole|> fields=['field', 'name']), expected)<|fim▁end|>
|
map_record = mapper.map_record(record)
expected = {'out_name': 300, 'field2': 'value'}
self.assertEqual(map_record.values(fields=['field', 'name']), expected)
self.assertEqual(map_record.values(for_create=True,
|
<|file_name|>accession.js<|end_file_name|><|fim▁begin|>/*
Copyright 2017 The BioBricks Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
var fs = require('fs')
var methodNotAllowed = require('./method-not-allowed')
var notFound = require('./not-found')
var path = require('path')
var BYTES_PER_LINE = require('../bytes-per-accession')
module.exports = function (request, response, configuration) {
if (request.method === 'GET') {
var number = parseInt(request.params.number)
var file = path.join(configuration.directory, 'accessions')
// Get a file descriptor.
fs.open(file, 'r', function (error, fd) {
/* istanbul ignore if */
if (error) {
request.log.error(error)
response.statusCode = 500
response.end()
} else {
// Calculate the offset where the requested accession's line
// begins, if we have it.
var offset = BYTES_PER_LINE * (number - 1)
// Stat the file and compare its size to the offset for the
// accession number requested.
fs.fstat(fd, function (error, stats) {
/* istanbul ignore if */
if (error) {
request.log.error(error)
response.statusCode = 500
response.end()
fs.close(fd)
} else {
// If the accessions file is too small to have the requested
// accession number, respond 404.
if (stats.size < (offset + BYTES_PER_LINE)) {
notFound(request, response, configuration)
// Otherwise, read the line for the accession from the file,
// starting at the calculated offset.
} else {
var buffer = Buffer.alloc(BYTES_PER_LINE - 1)
fs.read(
fd, buffer, 0, buffer.byteLength, offset,<|fim▁hole|> if (error) {
request.log.error(error)
response.statusCode = 500
response.end()
} else {
// Redirect the client to the publication path.
var split = buffer
.toString()
.split(',')
response.statusCode = 303
response.setHeader(
'Location', configuration.base + 'publications/' + split[1]
)
response.end()
}
fs.close(fd, function () {
// pass
})
}
)
}
}
})
}
})
} else {
methodNotAllowed(response)
}
}<|fim▁end|>
|
function (error) {
/* istanbul ignore if */
|
<|file_name|>reparent.cpp<|end_file_name|><|fim▁begin|>/////////////////////////////////////////////////////////////////////////////
// Name: src/x11/reparent.cpp
// Purpose: wxWindow
// Author: Julian Smart
// Modified by:
// Created: 2002-03-09
// Copyright: (c) Julian Smart
// Licence: wxWindows licence
/////////////////////////////////////////////////////////////////////////////
// for compilers that support precompilation, includes "wx.h".
#include "wx/wxprec.h"
// ============================================================================
// declarations
// ============================================================================
// ----------------------------------------------------------------------------
// headers
// ----------------------------------------------------------------------------
#if !wxUSE_NANOX
#include "wx/x11/reparent.h"
#ifndef WX_PRECOMP
#include "wx/log.h"
#include "wx/app.h"
#include "wx/timer.h"
#endif
#include "wx/evtloop.h"
#include "wx/x11/private.h"
#include "X11/Xatom.h"
#include "wx/generic/private/timer.h"
/*
* wxAdoptedWindow
*/
wxAdoptedWindow::wxAdoptedWindow()
{
}
wxAdoptedWindow::wxAdoptedWindow(WXWindow window)
{
m_mainWindow = window;
}
wxAdoptedWindow::~wxAdoptedWindow()
{
}
/*
* wxReparenter
*/
static bool Xerror;
static Atom WM_STATE = 0;
bool wxReparenter::sm_done = false;
wxAdoptedWindow* wxReparenter::sm_toReparent = NULL;
wxWindow* wxReparenter::sm_newParent = NULL;
wxString wxReparenter::sm_name;
bool wxReparenter::sm_exactMatch = false;
static int ErrorHandler(Display* WXUNUSED(dpy), XErrorEvent* WXUNUSED(event))
{
Xerror = True;
return False;
}
// We assume that toReparent has had its X window set
// appropriately.
bool wxReparenter::Reparent(wxWindow* newParent, wxAdoptedWindow* toReparent)
{
XWindowAttributes xwa;
Window *children;
unsigned int numchildren, each;
Window returnroot, returnparent;
XErrorHandler old;
int parentOffset = 0;
old = XSetErrorHandler(ErrorHandler);
XReparentWindow( wxGlobalDisplay(),
(Window) toReparent->X11GetMainWindow(),
(Window) newParent->X11GetMainWindow(),
0, 0);
if (!XQueryTree( wxGlobalDisplay(),
(Window) toReparent->X11GetMainWindow(),
&returnroot, &returnparent,
&children, &numchildren) || Xerror)
{
XSetErrorHandler(old);
return true;
}
if (numchildren > 0)
{
// TEST: see if we can get away with reparenting just
// first one
if (numchildren > 1)
{
wxLogDebug(wxT("Found %d, but only reparenting 1 child."), numchildren);
numchildren = 1;
}
wxLogDebug(wxT("Reparenting %d children."), numchildren);
/* Stacking order is preserved since XQueryTree returns its children in
bottommost to topmost order
*/
for (each=0; each<numchildren; each++)
{
XGetWindowAttributes( wxGlobalDisplay(),
children[each], &xwa);
fprintf(stderr,
"Reparenting child at offset %d and position %d, %d.\n",
parentOffset, parentOffset+xwa.x, parentOffset+xwa.y);
XReparentWindow( wxGlobalDisplay(),
children[each], (Window) newParent->X11GetMainWindow(),
xwa.x, xwa.y);
}
}
XSetErrorHandler(old);
return true;
}
// Wait for an appropriate window to be created.
// If exactMatch is false, a substring match is OK.
// If windowName is empty, then wait for the next overrideRedirect window.
bool wxReparenter::WaitAndReparent(wxWindow* newParent, wxAdoptedWindow* toReparent,
const wxString& windowName,
bool exactMatch)
{
sm_newParent = newParent;
sm_toReparent = toReparent;
sm_exactMatch = exactMatch;
sm_name = windowName;
Display* display = wxGlobalDisplay();
XSelectInput(display,
RootWindowOfScreen(DefaultScreenOfDisplay(display)),
SubstructureNotifyMask);
if (!WM_STATE)
WM_STATE = XInternAtom(display, "WM_STATE", False);
sm_done = false;
wxEventLoop eventLoop;
while (!sm_done)
{
if (eventLoop.Pending())
{
XEvent xevent;
XNextEvent(display, & xevent);
if (!wxTheApp->ProcessXEvent((WXEvent*) & xevent))
{
// Do the local event processing
ProcessXEvent((WXEvent*) & xevent);
}
}
else
{
#if wxUSE_TIMER
wxGenericTimerImpl::NotifyTimers();
wxTheApp->ProcessIdle();
#endif
}
}
return true;
}
bool wxReparenter::ProcessXEvent(WXEvent* event)
{
XEvent* xevent = (XEvent*) event;
Window client;
if (!sm_done)
{
if (xevent->type == MapNotify)
{
wxLogDebug(wxT("Window was mapped"));
}
if (xevent->type == MapNotify && !xevent->xmap.override_redirect &&
(client = (Window) FindAClientWindow((WXWindow) xevent->xmap.window, sm_name)))
{
wxLogDebug(wxT("Found a client window, about to reparent"));
wxASSERT(sm_toReparent->GetParent() == NULL);
sm_toReparent->SetHandle((WXWindow) client);
sm_newParent->AddChild(sm_toReparent);
sm_done = Reparent(sm_newParent, sm_toReparent);
return sm_done;
} else if (xevent->type == MapNotify &&
xevent->xmap.override_redirect &&
xevent->xmap.window)
{
wxLogDebug(wxT("Found an override redirect window, about to reparent"));
sm_toReparent->SetHandle((WXWindow) xevent->xmap.window);
sm_newParent->AddChild(sm_toReparent);
wxASSERT(sm_toReparent->GetParent() == NULL);
sm_done = Reparent(sm_newParent, sm_toReparent);
return sm_done;
}
}
return false;
}
WXWindow wxReparenter::FindAClientWindow(WXWindow window, const wxString& name)
{
int rvalue, i;
Atom actualtype;
int actualformat;
unsigned long nitems, bytesafter;<|fim▁hole|> unsigned int numchildren;
Window returnroot, returnparent;
Window result = 0;
XErrorHandler old;
char *clientName;
Xerror = False;
old = XSetErrorHandler(ErrorHandler);
rvalue = XGetWindowProperty((Display*) wxGetDisplay(),
(Window) window, WM_STATE,
0, 1, False,
AnyPropertyType, &actualtype, &actualformat,
&nitems, &bytesafter, &propreturn);
XSetErrorHandler(old);
if (!Xerror && rvalue == Success && actualtype != None)
{
if (rvalue == Success)
{
XFree((char *) propreturn);
}
XFetchName((Display*) wxGetDisplay(), (Window) window, &clientName);
wxString str1(name);
wxString str2 = wxString::FromAscii(clientName);
str1.Lower();
str2.Lower();
bool matches;
if (sm_exactMatch)
matches = (name == wxString::FromAscii(clientName));
else
matches = (str1.Contains(str2) || str2.Contains(str1));
XFree(clientName);
if (matches)
return (WXWindow) window;
else
return NULL;
}
old = XSetErrorHandler(ErrorHandler);
if (!XQueryTree((Display*) wxGetDisplay(), (Window) window, &returnroot, &returnparent,
&children, &numchildren) || Xerror)
{
XSetErrorHandler(old);
return NULL;
}
XSetErrorHandler(old);
result = 0;
for (i=0; i<(int)numchildren && !result ;i++) {
result = (Window) FindAClientWindow((WXWindow) children[i], name);
}
if (numchildren) {
XFree((char *) children);
} return (WXWindow) result;
}
#endif // !wxUSE_NANOX<|fim▁end|>
|
unsigned char *propreturn;
Window *children;
|
<|file_name|>ASMCode.java<|end_file_name|><|fim▁begin|>import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.HashMap;<|fim▁hole|>//this code contains the output assembly code that the program outputs.
//will have at least three functions:
//add(string, string, string, string) <- adds an assembly code line
//optimise(int) <- optimises the output code, wherever possible
//write(string) <- writes the code to the desired filename
public class ASMCode {
LinkedList<String> lines = new LinkedList<String>();
LinkedList<String> data = new LinkedList<String>();
HashMap<String, String> stringMap = new HashMap<String, String>();
LinkedList<lineWrapper> functionLines = new LinkedList<lineWrapper>();
boolean hasFCall;
private interface lineWrapper {
String compile(int stacksize, boolean funcCall);
}
private class stdLineWrapper implements lineWrapper {
private String line;
@Override
public String compile(int stacksize, boolean funcCall) {
return line;
}
public stdLineWrapper(String s) {
line = s;
}
}
private class functionReturnWrapper implements lineWrapper {
@Override
public String compile(int stacksize, boolean funcCall) {
StringBuilder s = new StringBuilder();
if(stacksize != 0) {
s.append("\tmov\tsp, fp");
s.append(System.getProperty("line.separator"));//system independent newline
s.append("\tpop\tfp");
s.append(System.getProperty("line.separator"));
}
if(hasFCall) {
s.append("\tpop\tra");
s.append(System.getProperty("line.separator"));
}
s.append("\tjmpr\tra");
return s.toString();
}
}
public void add(String inst, String reg1, String reg2, String reg3) {
if(deadCode) return;
String newInst = "\t"+inst;
if(reg1 != null) {
newInst = newInst + "\t" + reg1;
if(reg2 != null) {
newInst = newInst + ", " + reg2;
if(reg3 != null) {
newInst = newInst + ", " + reg3;
}
}
}
functionLines.addLast(new stdLineWrapper(newInst));
}
public void add(String inst, String reg1, String reg2) {
add(inst, reg1, reg2, null);
}
public void add(String inst, String reg1) {
add(inst, reg1, null, null);
}
public void add(String inst) {
add(inst, null, null, null);
}
int labIndex = 0;
public String addString(String s) {
//makes sure we don't have duplicate strings in memory
if(stringMap.containsKey(s)) return stringMap.get(s);
//generate a label
String label = "string" + labIndex++;
data.addLast(label+":");
data.addLast("#string " +s);
stringMap.put(s, label);
return label;
}
public void addGlobal(String data, String label) {
//generate a label
this.data.addLast(label+":");
this.data.addLast(data);
}
public void put(String s) {
if(!deadCode)
functionLines.addLast(new stdLineWrapper(s));
}
private String fname;
public void beginFunction(String name) {
functionLines = new LinkedList<lineWrapper>();
fname = name;
hasFCall = false;
}
public void endFunction(int varCount) {
lines.addLast("#global " + fname);
lines.addLast(fname+":");
if(hasFCall) {
lines.addLast("\tpush\tra");
}
if(varCount != 0) {
lines.addLast("\tpush\tfp");
lines.addLast("\tmov\tfp, sp");
lines.addLast("\taddi\tsp, sp, " + varCount);
}
for(lineWrapper w : functionLines) {
lines.addLast(w.compile(varCount, hasFCall));
}
}
public void setHasFCall() {
if(deadCode) return;
hasFCall = true;
}
public void functionReturn() {
if(deadCode) return;
functionLines.addLast(new functionReturnWrapper());
}
public void write(String filename) {
//System.out.println(".text");
//for(String s : lines) {
// System.out.println(s);
//}
//System.out.println(".data");
//for(String s : data) {
// System.out.println(s);
//}
System.out.println("Compilation successful!");
System.out.println("Writing...");
try {
PrintWriter out = new PrintWriter(new FileWriter(filename+".asm"));
out.println(".text");
for(String s : lines) {
out.println(s);
}
out.println(".data");
for(String s : data) {
out.println(s);
}
out.close();
} catch(IOException e) {
System.out.println("Writing failed");
return;
}
System.out.println("Program created!");
}
boolean deadCode;
public void deadCode(boolean codeIsDead) {
deadCode = codeIsDead;
}
}<|fim▁end|>
|
import java.util.LinkedList;
|
<|file_name|>wanderer.js<|end_file_name|><|fim▁begin|>var name = "Wanderer";
var collection_type = 0;
var is_secret = 0;
var desc = "Visited 503 new locations.";
var status_text = "Gosh, where HAVEN'T you traveled? Your peregrinations have earned you this footworn-but-carefree Wanderer badge.";
var last_published = 1348803094;
var is_shareworthy = 1;
var url = "wanderer";
var category = "exploring";
var url_swf = "\/c2.glitch.bz\/achievements\/2011-09-18\/wanderer_1316414516.swf";
var url_img_180 = "\/c2.glitch.bz\/achievements\/2011-09-18\/wanderer_1316414516_180.png";
var url_img_60 = "\/c2.glitch.bz\/achievements\/2011-09-18\/wanderer_1316414516_60.png";
var url_img_40 = "\/c2.glitch.bz\/achievements\/2011-09-18\/wanderer_1316414516_40.png";
function on_apply(pc){
}
var conditions = {
8 : {
type : "group_count",
group : "locations_visited",
value : "503"
},
};
function onComplete(pc){ // generated from rewards
var multiplier = pc.buffs_has('gift_of_gab') ? 1.2 : pc.buffs_has('silvertongue') ? 1.05 : 1.0;
multiplier += pc.imagination_get_achievement_modifier();
if (/completist/i.exec(this.name)) {
var level = pc.stats_get_level();
if (level > 4) {
multiplier *= (pc.stats_get_level()/4);
}
}
pc.stats_add_xp(round_to_5(1000 * multiplier), true);
pc.stats_add_favor_points("lem", round_to_5(200 * multiplier));
if(pc.buffs_has('gift_of_gab')) {
pc.buffs_remove('gift_of_gab');
}
else if(pc.buffs_has('silvertongue')) {
pc.buffs_remove('silvertongue');
}<|fim▁hole|>}
var rewards = {
"xp" : 1000,
"favor" : {
"giant" : "lem",
"points" : 200
}
};
// generated ok (NO DATE)<|fim▁end|>
| |
<|file_name|>saltkeys.go<|end_file_name|><|fim▁begin|>// Obdi - a REST interface and GUI for deploying software
// Copyright (C) 2014 Mark Clarkson
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"encoding/json"
"fmt"
"net"
"net/rpc"
"os"
)
type PostedData struct {
Hostname string
}
// ***************************************************************************
// GO RPC PLUGIN
// ***************************************************************************
func (t *Plugin) GetRequest(args *Args, response *[]byte) error {
// List all keys
if len(args.QueryString["env_id"]) == 0 {
ReturnError("'env_id' must be set", response)
return nil
}
sa := ScriptArgs{
ScriptName: "saltkey-showkeys.sh",
CmdArgs: "",
EnvVars: "",
EnvCapDesc: "SALT_WORKER",
Type: 2,
}
var jobid int64
var err error
if jobid, err = t.RunScript(args, sa, response); err != nil {
// RunScript wrote the error
return nil
}
reply := Reply{jobid, "", SUCCESS, ""}
jsondata, err := json.Marshal(reply)
if err != nil {
ReturnError("Marshal error: "+err.Error(), response)
return nil
}
*response = jsondata
return nil
}
func (t *Plugin) PostRequest(args *Args, response *[]byte) error {
// Check for required query string entries
if len(args.QueryString["hostname"]) == 0 {
ReturnError("'type' must be set", response)
return nil
}
if len(args.QueryString["type"]) == 0 {
ReturnError("'type' must be set", response)
return nil
}
if len(args.QueryString["env_id"]) == 0 {
ReturnError("'env_id' must be set", response)
return nil
}
// Get the ScriptId from the scripts table for:
scriptName := ""
if args.QueryString["type"][0] == "accept" {
scriptName = "saltkey-acceptkeys.sh"
} else {
scriptName = "saltkey-rejectkeys.sh"
}
var jobid int64
sa := ScriptArgs{
ScriptName: scriptName,
CmdArgs: args.QueryString["hostname"][0],
EnvVars: "",
EnvCapDesc: "SALT_WORKER",
Type: 2,
}
var err error
if jobid, err = t.RunScript(args, sa, response); err != nil {
// RunScript wrote the error
return nil
}
reply := Reply{jobid, "", SUCCESS, ""}
jsondata, err := json.Marshal(reply)
if err != nil {
ReturnError("Marshal error: "+err.Error(), response)
return nil
}
*response = jsondata
return nil
}
func (t *Plugin) DeleteRequest(args *Args, response *[]byte) error {
// Check for required query string entries
if len(args.QueryString["env_id"]) == 0 {
ReturnError("'env_id' must be set", response)
return nil
}
var jobid int64
sa := ScriptArgs{
ScriptName: "saltkey-deletekeys.sh",
CmdArgs: args.PathParams["id"],
EnvVars: "",
EnvCapDesc: "SALT_WORKER",
Type: 2,
}
var err error
if jobid, err = t.RunScript(args, sa, response); err != nil {
// RunScript wrote the error
return nil
}
reply := Reply{jobid, "", SUCCESS, ""}
jsondata, err := json.Marshal(reply)
if err != nil {
ReturnError("Marshal error: "+err.Error(), response)
return nil
}
*response = jsondata
return nil
}
func (t *Plugin) HandleRequest(args *Args, response *[]byte) error {
// All plugins must have this.
if len(args.QueryType) > 0 {
switch args.QueryType {
case "GET":
t.GetRequest(args, response)
return nil
case "POST":
t.PostRequest(args, response)<|fim▁hole|> }
ReturnError("Internal error: Invalid HTTP request type for this plugin "+
args.QueryType, response)
return nil
} else {
ReturnError("Internal error: HTTP request type was not set", response)
return nil
}
}
func main() {
//logit("Plugin starting")
plugin := new(Plugin)
rpc.Register(plugin)
listener, err := net.Listen("tcp", ":"+os.Args[1])
if err != nil {
txt := fmt.Sprintf("Listen error. ", err)
logit(txt)
}
//logit("Plugin listening on port " + os.Args[1])
if conn, err := listener.Accept(); err != nil {
txt := fmt.Sprintf("Accept error. ", err)
logit(txt)
} else {
//logit("New connection established")
rpc.ServeConn(conn)
}
}<|fim▁end|>
|
return nil
case "DELETE":
t.DeleteRequest(args, response)
return nil
|
<|file_name|>test.js<|end_file_name|><|fim▁begin|>/*
* @flow
*/
import type {Suite} from "flow-dev-tools/src/test/Suite";
const { suite, test } = require("flow-dev-tools/src/test/Tester");
module.exports = (suite(({ addFile, addFiles, addCode }) => [
test("BigInt invalid decimal type literal", [
addCode(`
type InvalidDecimal = 1.0n;
`).newErrors(
`
test.js:4
4: type InvalidDecimal = 1.0n;
^^^^ A bigint literal must be an integer
`,
)
]),
test("BigInt invalid negative decimal type literal", [
addCode(`
type InvalidNegDecimal = -1.0n;
`).newErrors(
`
test.js:4
4: type InvalidNegDecimal = -1.0n;
^^^^^ A bigint literal must be an integer
`,
)
]),
test("BigInt invalid decimal literal", [
addCode(`
const invalid_decimal = 1.0n;
`).newErrors(
`
test.js:4
4: const invalid_decimal = 1.0n;
^^^^ A bigint literal must be an integer
`,
)
]),
test("BigInt invalid negative decimal literal", [
addCode(`
const invalid_neg_decimal = -1.0n;
`).newErrors(
`
test.js:4
4: const invalid_neg_decimal = -1.0n;
^^^^ A bigint literal must be an integer
`,
)
]),
test("BigInt invalid scientific type literal", [
addCode(`
type InvalidE = 2e9n;
`).newErrors(
`
test.js:4
4: type InvalidE = 2e9n;
^^^^ A bigint literal cannot use exponential notation
`,
)
]),
test("BigInt invalid negative scientific type literal", [
addCode(`
type InvalidNegE = -2e9n;
`).newErrors(
`
test.js:4
4: type InvalidNegE = -2e9n;
^^^^^ A bigint literal cannot use exponential notation
`,
)
]),
test("BigInt invalid scientific decimal type literal", [
addCode(`
type InvalidNegDecimalE = 2.0e9n;
`).newErrors(
`
test.js:4
4: type InvalidNegDecimalE = 2.0e9n;
^^^^^^ A bigint literal cannot use exponential notation
`,
)
]),
test("BigInt invalid negative scientific decimal type literal", [
addCode(`
type InvalidNegDecimalE = -2.0e9n;
`).newErrors(
`
test.js:4
4: type InvalidNegDecimalE = -2.0e9n;
^^^^^^^ A bigint literal cannot use exponential notation
`,
)
]),
test("BigInt invalid scientific literal", [
addCode(`
const invalid_e = 2e9n;
`).newErrors(
`
test.js:4
4: const invalid_e = 2e9n;
^^^^ A bigint literal cannot use exponential notation
`,
)
]),
test("BigInt invalid negative scientific literal", [
addCode(`
const invalid_neg_e = -2e9n;
`).newErrors(
`
test.js:4
4: const invalid_neg_e = -2e9n;
^^^^ A bigint literal cannot use exponential notation
`,
)
]),
test("BigInt invalid octal legacy type literal", [
addCode(`
type InvalidOctalLegacy = 016432n;
`).newErrors(
`
test.js:4
4: type InvalidOctalLegacy = 016432n;
^^^^^^^ Unexpected token ILLEGAL
`,
)
]),
test("BigInt invalid negative octal legacy type literal", [
addCode(`
type InvalidNegOctalLegacy = -016432n;
`).newErrors(
`
test.js:4
4: type InvalidNegOctalLegacy = -016432n;
^^^^^^^^ Unexpected token ILLEGAL
`,
)
]),<|fim▁hole|>
test("BigInt invalid octal legacy literal", [
addCode(`
const invalid_octal_legacy = 016432n;
`).newErrors(
`
test.js:4
4: const invalid_octal_legacy = 016432n;
^^^^^^^ Unexpected token ILLEGAL
`,
)
]),
test("BigInt invalid negative octal legacy literal", [
addCode(`
const invalid_neg_octal_legacy = -016432n;
`).newErrors(
`
test.js:4
4: const invalid_neg_octal_legacy = -016432n;
^^^^^^^ Unexpected token ILLEGAL
`,
)
]),
test("BigInt is not supported yet", [
addCode(`
type S = bigint;
const valid_binary = 0b101011101n;
const valid_neg_binary = -0b101011101n;
type ValidBinary = 0b101011101n;
type ValidNegBinary = -0b101011101n;
const valid_hex = 0xfff123n;
const valid_neg_hex = -0xfff123n;
type ValidHex = 0xfff123n;
type ValidNegHex = -0xfff123n;
const valid_large = 9223372036854775807n;
const valid_neg_large = -9223372036854775807n;
type ValidLarge = 9223372036854775807n;
type ValidNegLarge = -9223372036854775807n;
const valid_octal_new = 0o16432n;
const valid_neg_octal_new = -0o16432n;
type ValidOctalNew = 0o16432n;
type ValidNegOctalNew = -0o16432n;
const valid_small = 100n;
const valid_neg_small = -100n;
type ValidSmall = 100n;
type ValidNegSmall = -1n;
`).newErrors(
`
test.js:4
4: type S = bigint;
^^^^^^ BigInt bigint [1] is not yet supported. [bigint-unsupported]
References:
4: type S = bigint;
^^^^^^ [1]
test.js:6
6: const valid_binary = 0b101011101n;
^^^^^^^^^^^^ BigInt bigint literal \`0b101011101n\` [1] is not yet supported. [bigint-unsupported]
References:
6: const valid_binary = 0b101011101n;
^^^^^^^^^^^^ [1]
test.js:7
7: const valid_neg_binary = -0b101011101n;
^^^^^^^^^^^^ BigInt bigint literal \`0b101011101n\` [1] is not yet supported. [bigint-unsupported]
References:
7: const valid_neg_binary = -0b101011101n;
^^^^^^^^^^^^ [1]
test.js:8
8: type ValidBinary = 0b101011101n;
^^^^^^^^^^^^ BigInt bigint literal \`0b101011101n\` [1] is not yet supported. [bigint-unsupported]
References:
8: type ValidBinary = 0b101011101n;
^^^^^^^^^^^^ [1]
test.js:9
9: type ValidNegBinary = -0b101011101n;
^^^^^^^^^^^^^ BigInt bigint literal \`-0b101011101n\` [1] is not yet supported. [bigint-unsupported]
References:
9: type ValidNegBinary = -0b101011101n;
^^^^^^^^^^^^^ [1]
test.js:11
11: const valid_hex = 0xfff123n;
^^^^^^^^^ BigInt bigint literal \`0xfff123n\` [1] is not yet supported. [bigint-unsupported]
References:
11: const valid_hex = 0xfff123n;
^^^^^^^^^ [1]
test.js:12
12: const valid_neg_hex = -0xfff123n;
^^^^^^^^^ BigInt bigint literal \`0xfff123n\` [1] is not yet supported. [bigint-unsupported]
References:
12: const valid_neg_hex = -0xfff123n;
^^^^^^^^^ [1]
test.js:13
13: type ValidHex = 0xfff123n;
^^^^^^^^^ BigInt bigint literal \`0xfff123n\` [1] is not yet supported. [bigint-unsupported]
References:
13: type ValidHex = 0xfff123n;
^^^^^^^^^ [1]
test.js:14
14: type ValidNegHex = -0xfff123n;
^^^^^^^^^^ BigInt bigint literal \`-0xfff123n\` [1] is not yet supported. [bigint-unsupported]
References:
14: type ValidNegHex = -0xfff123n;
^^^^^^^^^^ [1]
test.js:16
16: const valid_large = 9223372036854775807n;
^^^^^^^^^^^^^^^^^^^^ BigInt bigint literal \`9223372036854775807n\` [1] is not yet supported. [bigint-unsupported]
References:
16: const valid_large = 9223372036854775807n;
^^^^^^^^^^^^^^^^^^^^ [1]
test.js:17
17: const valid_neg_large = -9223372036854775807n;
^^^^^^^^^^^^^^^^^^^^ BigInt bigint literal \`9223372036854775807n\` [1] is not yet supported. [bigint-unsupported]
References:
17: const valid_neg_large = -9223372036854775807n;
^^^^^^^^^^^^^^^^^^^^ [1]
test.js:18
18: type ValidLarge = 9223372036854775807n;
^^^^^^^^^^^^^^^^^^^^ BigInt bigint literal \`9223372036854775807n\` [1] is not yet supported. [bigint-unsupported]
References:
18: type ValidLarge = 9223372036854775807n;
^^^^^^^^^^^^^^^^^^^^ [1]
test.js:19
19: type ValidNegLarge = -9223372036854775807n;
^^^^^^^^^^^^^^^^^^^^^ BigInt bigint literal \`-9223372036854775807n\` [1] is not yet supported. [bigint-unsupported]
References:
19: type ValidNegLarge = -9223372036854775807n;
^^^^^^^^^^^^^^^^^^^^^ [1]
test.js:21
21: const valid_octal_new = 0o16432n;
^^^^^^^^ BigInt bigint literal \`0o16432n\` [1] is not yet supported. [bigint-unsupported]
References:
21: const valid_octal_new = 0o16432n;
^^^^^^^^ [1]
test.js:22
22: const valid_neg_octal_new = -0o16432n;
^^^^^^^^ BigInt bigint literal \`0o16432n\` [1] is not yet supported. [bigint-unsupported]
References:
22: const valid_neg_octal_new = -0o16432n;
^^^^^^^^ [1]
test.js:23
23: type ValidOctalNew = 0o16432n;
^^^^^^^^ BigInt bigint literal \`0o16432n\` [1] is not yet supported. [bigint-unsupported]
References:
23: type ValidOctalNew = 0o16432n;
^^^^^^^^ [1]
test.js:24
24: type ValidNegOctalNew = -0o16432n;
^^^^^^^^^ BigInt bigint literal \`-0o16432n\` [1] is not yet supported. [bigint-unsupported]
References:
24: type ValidNegOctalNew = -0o16432n;
^^^^^^^^^ [1]
test.js:26
26: const valid_small = 100n;
^^^^ BigInt bigint literal \`100n\` [1] is not yet supported. [bigint-unsupported]
References:
26: const valid_small = 100n;
^^^^ [1]
test.js:27
27: const valid_neg_small = -100n;
^^^^ BigInt bigint literal \`100n\` [1] is not yet supported. [bigint-unsupported]
References:
27: const valid_neg_small = -100n;
^^^^ [1]
test.js:28
28: type ValidSmall = 100n;
^^^^ BigInt bigint literal \`100n\` [1] is not yet supported. [bigint-unsupported]
References:
28: type ValidSmall = 100n;
^^^^ [1]
test.js:29
29: type ValidNegSmall = -1n;
^^^ BigInt bigint literal \`-1n\` [1] is not yet supported. [bigint-unsupported]
References:
29: type ValidNegSmall = -1n;
^^^ [1]
`,
)
]),
test("BigInt can be suppressed", [
addCode(`
//$FlowFixMe
type S = bigint;
//$FlowFixMe
type A = 1n;
//$FlowFixMe
const valid_binary = 0b101011101n;
//$FlowFixMe
const valid_hex = 0xfff123n;
//$FlowFixMe
const valid_large = 9223372036854775807n;
//$FlowFixMe
const valid_octal_new = 0o16432n;
//$FlowFixMe
const valid_small = 100n;
`).noNewErrors()
])
]): Suite);<|fim▁end|>
| |
<|file_name|>event.py<|end_file_name|><|fim▁begin|>import asyncio
import inspect
import logging
from typing import List, Tuple, Callable, NamedTuple
from lightbus.schema.schema import Parameter
from lightbus.message import EventMessage
from lightbus.client.subclients.base import BaseSubClient
from lightbus.client.utilities import validate_event_or_rpc_name, queue_exception_checker, OnError
from lightbus.client.validator import validate_outgoing, validate_incoming
from lightbus.exceptions import (
UnknownApi,
EventNotFound,
InvalidEventArguments,
InvalidEventListener,
ListenersAlreadyStarted,
DuplicateListenerName,
)
from lightbus.log import L, Bold
from lightbus.client.commands import (
SendEventCommand,
AcknowledgeEventCommand,
ConsumeEventsCommand,
CloseCommand,
)
from lightbus.utilities.async_tools import run_user_provided_callable, cancel_and_log_exceptions
from lightbus.utilities.internal_queue import InternalQueue
from lightbus.utilities.casting import cast_to_signature
from lightbus.utilities.deforming import deform_to_bus
from lightbus.utilities.singledispatch import singledispatchmethod
logger = logging.getLogger(__name__)
class EventClient(BaseSubClient):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._event_listeners: List[Listener] = []
self._event_listener_tasks = set()
self._listeners_started = False
async def fire_event(
self, api_name, name, kwargs: dict = None, options: dict = None
) -> EventMessage:
kwargs = kwargs or {}
try:
api = self.api_registry.get(api_name)
except UnknownApi:
raise UnknownApi(
"Lightbus tried to fire the event {api_name}.{name}, but no API named {api_name}"
" was found in the registry. An API being in the registry implies you are an"
" authority on that API. Therefore, Lightbus requires the API to be in the registry"
" as it is a bad idea to fire events on behalf of remote APIs. However, this could"
" also be caused by a typo in the API name or event name, or be because the API"
" class has not been registered using bus.client.register_api(). ".format(
**locals()
)
)
validate_event_or_rpc_name(api_name, "event", name)
try:
event = api.get_event(name)
except EventNotFound:
raise EventNotFound(
"Lightbus tried to fire the event {api_name}.{name}, but the API {api_name} does"
" not seem to contain an event named {name}. You may need to define the event, you"
" may also be using the incorrect API. Also check for typos.".format(**locals())
)
p: Parameter
parameter_names = {p.name if isinstance(p, Parameter) else p for p in event.parameters}
required_parameter_names = {
p.name if isinstance(p, Parameter) else p
for p in event.parameters
if getattr(p, "is_required", True)
}
if required_parameter_names and not required_parameter_names.issubset(set(kwargs.keys())):
raise InvalidEventArguments(
"Missing required arguments when firing event {}.{}. Attempted to fire event with "
"{} arguments: {}. Event requires {}: {}".format(
api_name,
name,
len(kwargs),
sorted(kwargs.keys()),
len(parameter_names),
sorted(parameter_names),
)
)
extra_arguments = set(kwargs.keys()) - parameter_names
if extra_arguments:
raise InvalidEventArguments(
"Unexpected argument supplied when firing event {}.{}. Attempted to fire event with"
" {} arguments: {}. Unexpected argument(s): {}".format(
api_name, name, len(kwargs), sorted(kwargs.keys()), sorted(extra_arguments),
)
)
kwargs = deform_to_bus(kwargs)
event_message = EventMessage(
api_name=api.meta.name, event_name=name, kwargs=kwargs, version=api.meta.version
)
validate_outgoing(self.config, self.schema, event_message)
await self.hook_registry.execute("before_event_sent", event_message=event_message)
logger.info(L("📤 Sending event {}.{}".format(Bold(api_name), Bold(name))))
await self.producer.send(SendEventCommand(message=event_message, options=options)).wait()
await self.hook_registry.execute("after_event_sent", event_message=event_message)
return event_message
def listen(
self,
events: List[Tuple[str, str]],
listener: Callable,
listener_name: str,
options: dict = None,
on_error: OnError = OnError.SHUTDOWN,
):
if self._listeners_started:
# We are actually technically able to support starting listeners after worker
# startup, but it seems like it is a bad idea and a bit of an edge case.
# We may revisit this if sufficient demand arises.
raise ListenersAlreadyStarted(
"You are trying to register a new listener after the worker has started running."
" Listeners should be setup in your @bus.client.on_start() hook, in your bus.py"
" file."
)
sanity_check_listener(listener)
for listener_api_name, _ in events:
duplicate_listener = self.get_event_listener(listener_api_name, listener_name)
if duplicate_listener:
raise DuplicateListenerName(
f"A listener with name '{listener_name}' is already registered for API"
f" '{listener_api_name}'. You cannot have multiple listeners with the same name"
" for a given API. Rename one of your listeners to resolve this problem."
)
for api_name, name in events:
validate_event_or_rpc_name(api_name, "event", name)
self._event_listeners.append(
Listener(
callable=listener,
options=options or {},
events=events,
name=listener_name,
on_error=on_error,
)
)
def get_event_listener(self, api_name: str, listener_name: str):
for listener in self._event_listeners:
if listener.name == listener_name:
for listener_api_name, _ in listener.events:
if listener_api_name == api_name:
return listener
return None
<|fim▁hole|>
# TODO: Check events match those requested
logger.info(
L(
"📩 Received event {}.{} with ID {}".format(
Bold(event_message.api_name), Bold(event_message.event_name), event_message.id
)
)
)
validate_incoming(self.config, self.schema, event_message)
await self.hook_registry.execute("before_event_execution", event_message=event_message)
if self.config.api(event_message.api_name).cast_values:
parameters = cast_to_signature(parameters=event_message.kwargs, callable=listener)
else:
parameters = event_message.kwargs
# Call the listener.
# Pass the event message as a positional argument,
# thereby allowing listeners to have flexibility in the argument names.
# (And therefore allowing listeners to use the `event` parameter themselves)
if on_error == OnError.SHUTDOWN:
# Run the callback in the queue_exception_checker(). This will
# put any errors into Lightbus' error queue, and therefore
# cause a shutdown
await queue_exception_checker(
run_user_provided_callable(listener, args=[event_message], kwargs=parameters),
self.error_queue,
help=(
f"An error occurred while {listener} was handling an event. Lightbus will now"
" shutdown. If you wish to continue you can use the on_error parameter when"
" setting up your event. For example:\n\n bus.my_api.my_event.listen(fn,"
" listener_name='example', on_error=lightbus.OnError.ACKNOWLEDGE_AND_LOG)"
),
)
elif on_error == on_error.ACKNOWLEDGE_AND_LOG:
try:
await listener(event_message, **parameters)
except asyncio.CancelledError:
raise
except Exception as e:
# Log here. Acknowledgement will follow in below
logger.exception(e)
# Acknowledge the successfully processed message
await self.producer.send(
AcknowledgeEventCommand(message=event_message, options=options)
).wait()
await self.hook_registry.execute("after_event_execution", event_message=event_message)
async def close(self):
await super().close()
await cancel_and_log_exceptions(*self._event_listener_tasks)
await self.producer.send(CloseCommand()).wait()
await self.consumer.close()
await self.producer.close()
@singledispatchmethod
async def handle(self, command):
raise NotImplementedError(f"Did not recognise command {command.__class__.__name__}")
async def start_registered_listeners(self):
"""Start all listeners which have been previously registered via listen()"""
self._listeners_started = True
for listener in self._event_listeners:
await self._start_listener(listener)
async def _start_listener(self, listener: "Listener"):
# Setting the maxsize to 1 ensures the transport cannot load
# messages faster than we can consume them
queue: InternalQueue[EventMessage] = InternalQueue(maxsize=1)
async def consume_events():
while True:
logger.debug("Event listener now waiting for event on the internal queue")
event_message = await queue.get()
logger.debug(
"Event listener has now received an event on the internal queue, processing now"
)
await self._on_message(
event_message=event_message,
listener=listener.callable,
options=listener.options,
on_error=listener.on_error,
)
queue.task_done()
# Start the consume_events() consumer running
task = asyncio.ensure_future(queue_exception_checker(consume_events(), self.error_queue))
self._event_listener_tasks.add(task)
await self.producer.send(
ConsumeEventsCommand(
events=listener.events,
destination_queue=queue,
listener_name=listener.name,
options=listener.options,
)
).wait()
class Listener(NamedTuple):
callable: Callable
options: dict
events: List[Tuple[str, str]]
name: str
on_error: OnError
def sanity_check_listener(listener):
if not callable(listener):
raise InvalidEventListener(
f"The specified event listener {listener} is not callable. Perhaps you called the"
" function rather than passing the function itself?"
)
total_positional_args = 0
has_variable_positional_args = False # Eg: *args
for parameter in inspect.signature(listener).parameters.values():
if parameter.kind in (
inspect.Parameter.POSITIONAL_ONLY,
inspect.Parameter.POSITIONAL_OR_KEYWORD,
):
total_positional_args += 1
elif parameter.kind == inspect.Parameter.VAR_POSITIONAL:
has_variable_positional_args = True
if has_variable_positional_args:
return
if not total_positional_args:
raise InvalidEventListener(
f"The specified event listener {listener} must take at one positional argument. "
"This will be the event message. For example: "
"my_listener(event, other, ...)"
)<|fim▁end|>
|
async def _on_message(
self, event_message: EventMessage, listener: Callable, options: dict, on_error: OnError
):
|
<|file_name|>project.js<|end_file_name|><|fim▁begin|>'use strict';
var page = 'projects';
module.exports = {
renderPage: function(req, res) {
if (!req.user) {
res.redirect('/login');
} else {
res.render(page, {
helpers: {
activeClass: function(section) {<|fim▁hole|> return '';
}
}
},
user: req.user ? req.user.toJSON() : null
});
}
}
}<|fim▁end|>
|
if (section === 'projects') {
return 'active';
} else {
|
<|file_name|>about_scoring_project.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
# Greed is a dice game where you roll up to five dice to accumulate
# points. The following "score" function will be used calculate the
# score of a single roll of the dice.
#
# A greed roll is scored as follows:
#
# * A set of three ones is 1000 points
#
# * A set of three numbers (other than ones) is worth 100 times the
# number. (e.g. three fives is 500 points).
#
# * A one (that is not part of a set of three) is worth 100 points.
#
# * A five (that is not part of a set of three) is worth 50 points.
#
# * Everything else is worth 0 points.
#
#
# Examples:
#
# score([1,1,1,5,1]) => 1150 points
# score([2,3,4,6,2]) => 0 points
# score([3,4,5,3,3]) => 350 points
# score([1,5,1,2,4]) => 250 points
#
# More scoring examples are given in the tests below:
#
# Your goal is to write the score method.
<|fim▁hole|>def score(dice):
'''
Calculate the scores for results of up to fice dice rolls
'''
return sum((score_of_three(k) * (v//3) + score_of_one(k) * (v%3) for k, v in Counter(dice).items()))
def score_of_three(num):
'''
Calculate score for set of three
'''
if num == 1:
return 1000
else:
return num*100
def score_of_one(num):
'''
Calculate score for a roll not in a set of three
'''
if num == 1:
return 100
elif num == 5:
return 50
else:
return 0
class AboutScoringProject(Koan):
def test_score_of_an_empty_list_is_zero(self):
self.assertEqual(0, score([]))
def test_score_of_a_single_roll_of_5_is_50(self):
self.assertEqual(50, score([5]))
def test_score_of_a_single_roll_of_1_is_100(self):
self.assertEqual(100, score([1]))
def test_score_of_multiple_1s_and_5s_is_the_sum_of_individual_scores(self):
self.assertEqual(300, score([1,5,5,1]))
def test_score_of_single_2s_3s_4s_and_6s_are_zero(self):
self.assertEqual(0, score([2,3,4,6]))
def test_score_of_a_triple_1_is_1000(self):
self.assertEqual(1000, score([1,1,1]))
def test_score_of_other_triples_is_100x(self):
self.assertEqual(200, score([2,2,2]))
self.assertEqual(300, score([3,3,3]))
self.assertEqual(400, score([4,4,4]))
self.assertEqual(500, score([5,5,5]))
self.assertEqual(600, score([6,6,6]))
def test_score_of_mixed_is_sum(self):
self.assertEqual(250, score([2,5,2,2,3]))
self.assertEqual(550, score([5,5,5,5]))
self.assertEqual(1150, score([1,1,1,5,1]))
def test_ones_not_left_out(self):
self.assertEqual(300, score([1,2,2,2]))
self.assertEqual(350, score([1,5,2,2,2]))<|fim▁end|>
|
from collections import Counter
|
<|file_name|>serialwin32.py<|end_file_name|><|fim▁begin|>#! python
# Python Serial Port Extension for Win32, Linux, BSD, Jython
# serial driver for win32
# see __init__.py
#
# (C) 2001-2015 Chris Liechti <[email protected]>
#
# SPDX-License-Identifier: BSD-3-Clause
#
# Initial patch to use ctypes by Giovanni Bajo <[email protected]>
import ctypes
import time
from serial import win32
import serial
from serial.serialutil import SerialBase, SerialException, to_bytes, portNotOpenError, writeTimeoutError
class Serial(SerialBase):
"""Serial port implementation for Win32 based on ctypes."""
BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
9600, 19200, 38400, 57600, 115200)
def __init__(self, *args, **kwargs):
super(SerialBase, self).__init__()
self._port_handle = None
self._overlapped_read = None
self._overlapped_write = None
SerialBase.__init__(self, *args, **kwargs)
def open(self):
"""\
Open port with current settings. This may throw a SerialException
if the port cannot be opened.
"""
if self._port is None:
raise SerialException("Port must be configured before it can be used.")
if self.is_open:
raise SerialException("Port is already open.")
# the "\\.\COMx" format is required for devices other than COM1-COM8
# not all versions of windows seem to support this properly
# so that the first few ports are used with the DOS device name
port = self.name
try:
if port.upper().startswith('COM') and int(port[3:]) > 8:
port = '\\\\.\\' + port
except ValueError:
# for like COMnotanumber
pass
self._port_handle = win32.CreateFile(
port,
win32.GENERIC_READ | win32.GENERIC_WRITE,
0, # exclusive access
None, # no security
win32.OPEN_EXISTING,
win32.FILE_ATTRIBUTE_NORMAL | win32.FILE_FLAG_OVERLAPPED,
0)
if self._port_handle == win32.INVALID_HANDLE_VALUE:
self._port_handle = None # 'cause __del__ is called anyway
raise SerialException("could not open port %r: %r" % (self.portstr, ctypes.WinError()))
try:
self._overlapped_read = win32.OVERLAPPED()
self._overlapped_read.hEvent = win32.CreateEvent(None, 1, 0, None)
self._overlapped_write = win32.OVERLAPPED()
#~ self._overlapped_write.hEvent = win32.CreateEvent(None, 1, 0, None)
self._overlapped_write.hEvent = win32.CreateEvent(None, 0, 0, None)
# Setup a 4k buffer
win32.SetupComm(self._port_handle, 4096, 4096)
# Save original timeout values:
self._orgTimeouts = win32.COMMTIMEOUTS()
win32.GetCommTimeouts(self._port_handle, ctypes.byref(self._orgTimeouts))
self._reconfigure_port()
# Clear buffers:
# Remove anything that was there
win32.PurgeComm(
self._port_handle,
win32.PURGE_TXCLEAR | win32.PURGE_TXABORT |
win32.PURGE_RXCLEAR | win32.PURGE_RXABORT)
except:
try:
self._close()
except:
# ignore any exception when closing the port
# also to keep original exception that happened when setting up
pass
self._port_handle = None
raise
else:
self.is_open = True
def _reconfigure_port(self):
"""Set communication parameters on opened port."""
if not self._port_handle:
raise SerialException("Can only operate on a valid port handle")
# Set Windows timeout values
# timeouts is a tuple with the following items:
# (ReadIntervalTimeout,ReadTotalTimeoutMultiplier,
# ReadTotalTimeoutConstant,WriteTotalTimeoutMultiplier,
# WriteTotalTimeoutConstant)
timeouts = win32.COMMTIMEOUTS()
if self._timeout is None:
pass # default of all zeros is OK
elif self._timeout == 0:
timeouts.ReadIntervalTimeout = win32.MAXDWORD
else:
timeouts.ReadTotalTimeoutConstant = max(int(self._timeout * 1000), 1)
if self._timeout != 0 and self._inter_byte_timeout is not None:
timeouts.ReadIntervalTimeout = max(int(self._inter_byte_timeout * 1000), 1)
if self._write_timeout is None:
pass
elif self._write_timeout == 0:
timeouts.WriteTotalTimeoutConstant = win32.MAXDWORD
else:
timeouts.WriteTotalTimeoutConstant = max(int(self._write_timeout * 1000), 1)
win32.SetCommTimeouts(self._port_handle, ctypes.byref(timeouts))
win32.SetCommMask(self._port_handle, win32.EV_ERR)
# Setup the connection info.
# Get state and modify it:
comDCB = win32.DCB()
win32.GetCommState(self._port_handle, ctypes.byref(comDCB))
comDCB.BaudRate = self._baudrate
if self._bytesize == serial.FIVEBITS:
comDCB.ByteSize = 5
elif self._bytesize == serial.SIXBITS:
comDCB.ByteSize = 6
elif self._bytesize == serial.SEVENBITS:
comDCB.ByteSize = 7
elif self._bytesize == serial.EIGHTBITS:
comDCB.ByteSize = 8
else:
raise ValueError("Unsupported number of data bits: %r" % self._bytesize)
if self._parity == serial.PARITY_NONE:
comDCB.Parity = win32.NOPARITY
comDCB.fParity = 0 # Disable Parity Check
elif self._parity == serial.PARITY_EVEN:
comDCB.Parity = win32.EVENPARITY
comDCB.fParity = 1 # Enable Parity Check
elif self._parity == serial.PARITY_ODD:
comDCB.Parity = win32.ODDPARITY
comDCB.fParity = 1 # Enable Parity Check
elif self._parity == serial.PARITY_MARK:
comDCB.Parity = win32.MARKPARITY
comDCB.fParity = 1 # Enable Parity Check
elif self._parity == serial.PARITY_SPACE:
comDCB.Parity = win32.SPACEPARITY
comDCB.fParity = 1 # Enable Parity Check
else:
raise ValueError("Unsupported parity mode: %r" % self._parity)
if self._stopbits == serial.STOPBITS_ONE:
comDCB.StopBits = win32.ONESTOPBIT
elif self._stopbits == serial.STOPBITS_ONE_POINT_FIVE:
comDCB.StopBits = win32.ONE5STOPBITS
elif self._stopbits == serial.STOPBITS_TWO:
comDCB.StopBits = win32.TWOSTOPBITS
else:
raise ValueError("Unsupported number of stop bits: %r" % self._stopbits)
comDCB.fBinary = 1 # Enable Binary Transmission
# Char. w/ Parity-Err are replaced with 0xff (if fErrorChar is set to TRUE)
if self._rs485_mode is None:
if self._rtscts:
comDCB.fRtsControl = win32.RTS_CONTROL_HANDSHAKE
else:
comDCB.fRtsControl = win32.RTS_CONTROL_ENABLE if self._rts_state else win32.RTS_CONTROL_DISABLE
comDCB.fOutxCtsFlow = self._rtscts
else:
# checks for unsupported settings
# XXX verify if platform really does not have a setting for those
if not self._rs485_mode.rts_level_for_tx:
raise ValueError(
'Unsupported value for RS485Settings.rts_level_for_tx: %r' % (
self._rs485_mode.rts_level_for_tx,))
if self._rs485_mode.rts_level_for_rx:
raise ValueError(
'Unsupported value for RS485Settings.rts_level_for_rx: %r' % (
self._rs485_mode.rts_level_for_rx,))
if self._rs485_mode.delay_before_tx is not None:
raise ValueError(
'Unsupported value for RS485Settings.delay_before_tx: %r' % (
self._rs485_mode.delay_before_tx,))
if self._rs485_mode.delay_before_rx is not None:
raise ValueError(
'Unsupported value for RS485Settings.delay_before_rx: %r' % (
self._rs485_mode.delay_before_rx,))
if self._rs485_mode.loopback:
raise ValueError(
'Unsupported value for RS485Settings.loopback: %r' % (
self._rs485_mode.loopback,))
comDCB.fRtsControl = win32.RTS_CONTROL_TOGGLE
comDCB.fOutxCtsFlow = 0
if self._dsrdtr:
comDCB.fDtrControl = win32.DTR_CONTROL_HANDSHAKE
else:
comDCB.fDtrControl = win32.DTR_CONTROL_ENABLE if self._dtr_state else win32.DTR_CONTROL_DISABLE
comDCB.fOutxDsrFlow = self._dsrdtr
comDCB.fOutX = self._xonxoff
comDCB.fInX = self._xonxoff
comDCB.fNull = 0
comDCB.fErrorChar = 0
comDCB.fAbortOnError = 0
comDCB.XonChar = serial.XON
comDCB.XoffChar = serial.XOFF
if not win32.SetCommState(self._port_handle, ctypes.byref(comDCB)):
raise ValueError("Cannot configure port, some setting was wrong. Original message: %r" % ctypes.WinError())
#~ def __del__(self):
#~ self.close()
def _close(self):
"""internal close port helper"""
if self._port_handle:
# Restore original timeout values:
win32.SetCommTimeouts(self._port_handle, self._orgTimeouts)
# Close COM-Port:
win32.CloseHandle(self._port_handle)
if self._overlapped_read is not None:
win32.CloseHandle(self._overlapped_read.hEvent)
self._overlapped_read = None
if self._overlapped_write is not None:
win32.CloseHandle(self._overlapped_write.hEvent)
self._overlapped_write = None
self._port_handle = None
def close(self):
"""Close port"""
if self.is_open:
self._close()
self.is_open = False
# - - - - - - - - - - - - - - - - - - - - - - - -
@property
def in_waiting(self):
"""Return the number of bytes currently in the input buffer."""
flags = win32.DWORD()
comstat = win32.COMSTAT()
if not win32.ClearCommError(self._port_handle, ctypes.byref(flags), ctypes.byref(comstat)):
raise SerialException('call to ClearCommError failed')
return comstat.cbInQue
def read(self, size=1):
"""\
Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read."""
if not self._port_handle:
raise portNotOpenError
if size > 0:
win32.ResetEvent(self._overlapped_read.hEvent)
flags = win32.DWORD()
comstat = win32.COMSTAT()
if not win32.ClearCommError(self._port_handle, ctypes.byref(flags), ctypes.byref(comstat)):
raise SerialException('call to ClearCommError failed')
if self.timeout == 0:
n = min(comstat.cbInQue, size)
if n > 0:
buf = ctypes.create_string_buffer(n)
rc = win32.DWORD()
read_ok = win32.ReadFile(self._port_handle, buf, n, ctypes.byref(rc), ctypes.byref(self._overlapped_read))
if not read_ok and win32.GetLastError() not in (win32.ERROR_SUCCESS, win32.ERROR_IO_PENDING):
raise SerialException("ReadFile failed (%r)" % ctypes.WinError())
win32.WaitForSingleObject(self._overlapped_read.hEvent, win32.INFINITE)
read = buf.raw[:rc.value]
else:
read = bytes()
else:
buf = ctypes.create_string_buffer(size)
rc = win32.DWORD()
read_ok = win32.ReadFile(self._port_handle, buf, size, ctypes.byref(rc), ctypes.byref(self._overlapped_read))
if not read_ok and win32.GetLastError() not in (win32.ERROR_SUCCESS, win32.ERROR_IO_PENDING):
raise SerialException("ReadFile failed (%r)" % ctypes.WinError())
win32.GetOverlappedResult(self._port_handle, ctypes.byref(self._overlapped_read), ctypes.byref(rc), True)
read = buf.raw[:rc.value]
else:
read = bytes()<|fim▁hole|> if not self._port_handle:
raise portNotOpenError
#~ if not isinstance(data, (bytes, bytearray)):
#~ raise TypeError('expected %s or bytearray, got %s' % (bytes, type(data)))
# convert data (needed in case of memoryview instance: Py 3.1 io lib), ctypes doesn't like memoryview
data = to_bytes(data)
if data:
#~ win32event.ResetEvent(self._overlapped_write.hEvent)
n = win32.DWORD()
err = win32.WriteFile(self._port_handle, data, len(data), ctypes.byref(n), self._overlapped_write)
if not err and win32.GetLastError() != win32.ERROR_IO_PENDING:
raise SerialException("WriteFile failed (%r)" % ctypes.WinError())
if self._write_timeout != 0: # if blocking (None) or w/ write timeout (>0)
# Wait for the write to complete.
#~ win32.WaitForSingleObject(self._overlapped_write.hEvent, win32.INFINITE)
err = win32.GetOverlappedResult(self._port_handle, self._overlapped_write, ctypes.byref(n), True)
if n.value != len(data):
raise writeTimeoutError
return n.value
else:
return 0
def flush(self):
"""\
Flush of file like objects. In this case, wait until all data
is written.
"""
while self.out_waiting:
time.sleep(0.05)
# XXX could also use WaitCommEvent with mask EV_TXEMPTY, but it would
# require overlapped IO and its also only possible to set a single mask
# on the port---
def reset_input_buffer(self):
"""Clear input buffer, discarding all that is in the buffer."""
if not self._port_handle:
raise portNotOpenError
win32.PurgeComm(self._port_handle, win32.PURGE_RXCLEAR | win32.PURGE_RXABORT)
def reset_output_buffer(self):
"""\
Clear output buffer, aborting the current output and discarding all
that is in the buffer.
"""
if not self._port_handle:
raise portNotOpenError
win32.PurgeComm(self._port_handle, win32.PURGE_TXCLEAR | win32.PURGE_TXABORT)
def _update_break_state(self):
"""Set break: Controls TXD. When active, to transmitting is possible."""
if not self._port_handle:
raise portNotOpenError
if self._break_state:
win32.SetCommBreak(self._port_handle)
else:
win32.ClearCommBreak(self._port_handle)
def _update_rts_state(self):
"""Set terminal status line: Request To Send"""
if self._rts_state:
win32.EscapeCommFunction(self._port_handle, win32.SETRTS)
else:
win32.EscapeCommFunction(self._port_handle, win32.CLRRTS)
def _update_dtr_state(self):
"""Set terminal status line: Data Terminal Ready"""
if self._dtr_state:
win32.EscapeCommFunction(self._port_handle, win32.SETDTR)
else:
win32.EscapeCommFunction(self._port_handle, win32.CLRDTR)
def _GetCommModemStatus(self):
if not self._port_handle:
raise portNotOpenError
stat = win32.DWORD()
win32.GetCommModemStatus(self._port_handle, ctypes.byref(stat))
return stat.value
@property
def cts(self):
"""Read terminal status line: Clear To Send"""
return win32.MS_CTS_ON & self._GetCommModemStatus() != 0
@property
def dsr(self):
"""Read terminal status line: Data Set Ready"""
return win32.MS_DSR_ON & self._GetCommModemStatus() != 0
@property
def ri(self):
"""Read terminal status line: Ring Indicator"""
return win32.MS_RING_ON & self._GetCommModemStatus() != 0
@property
def cd(self):
"""Read terminal status line: Carrier Detect"""
return win32.MS_RLSD_ON & self._GetCommModemStatus() != 0
# - - platform specific - - - -
def set_buffer_size(self, rx_size=4096, tx_size=None):
"""\
Recommend a buffer size to the driver (device driver can ignore this
value). Must be called before the port is opended.
"""
if tx_size is None:
tx_size = rx_size
win32.SetupComm(self._port_handle, rx_size, tx_size)
def set_output_flow_control(self, enable=True):
"""\
Manually control flow - when software flow control is enabled.
This will do the same as if XON (true) or XOFF (false) are received
from the other device and control the transmission accordingly.
WARNING: this function is not portable to different platforms!
"""
if not self._port_handle:
raise portNotOpenError
if enable:
win32.EscapeCommFunction(self._port_handle, win32.SETXON)
else:
win32.EscapeCommFunction(self._port_handle, win32.SETXOFF)
@property
def out_waiting(self):
"""Return how many bytes the in the outgoing buffer"""
flags = win32.DWORD()
comstat = win32.COMSTAT()
if not win32.ClearCommError(self._port_handle, ctypes.byref(flags), ctypes.byref(comstat)):
raise SerialException('call to ClearCommError failed')
return comstat.cbOutQue
# Nur Testfunktion!!
if __name__ == '__main__':
import sys
s = Serial(0)
sys.stdout.write("%s\n" % s)
s = Serial()
sys.stdout.write("%s\n" % s)
s.baudrate = 19200
s.databits = 7
s.close()
s.port = 0
s.open()
sys.stdout.write("%s\n" % s)<|fim▁end|>
|
return bytes(read)
def write(self, data):
"""Output the given byte string over the serial port."""
|
<|file_name|>ntddscsi.rs<|end_file_name|><|fim▁begin|>// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// All files in the project carrying such notice may not be copied, modified, or distributed
// except according to those terms.
//! Constants and types for accessing SCSI port adapters.
use shared::basetsd::{ULONG32, ULONG_PTR};
use shared::minwindef::{UCHAR, ULONG, USHORT};
use shared::ntdef::{LARGE_INTEGER, LONG, LONGLONG, PVOID, ULONGLONG, VOID, WCHAR};
use um::winioctl::{
DEVICE_TYPE, FILE_ANY_ACCESS, FILE_DEVICE_CONTROLLER, FILE_READ_ACCESS,
FILE_WRITE_ACCESS, METHOD_BUFFERED
};
use um::winnt::{ANYSIZE_ARRAY, BOOLEAN, PBOOLEAN};
DEFINE_GUID!{ScsiRawInterfaceGuid,
0x53f56309, 0xb6bf, 0x11d0, 0x94, 0xf2, 0x00, 0xa0, 0xc9, 0x1e, 0xfb, 0x8b}
DEFINE_GUID!{WmiScsiAddressGuid,
0x53f5630f, 0xb6bf, 0x11d0, 0x94, 0xf2, 0x00, 0xa0, 0xc9, 0x1e, 0xfb, 0x8b}
pub const IOCTL_SCSI_BASE: DEVICE_TYPE = FILE_DEVICE_CONTROLLER;
pub const FILE_DEVICE_SCSI: ULONG = 0x0000001;
pub const DD_SCSI_DEVICE_NAME: &'static str = "\\Device\\ScsiPort";
pub const IOCTL_SCSI_PASS_THROUGH: ULONG =
CTL_CODE!(IOCTL_SCSI_BASE, 0x0401, METHOD_BUFFERED, FILE_READ_ACCESS | FILE_WRITE_ACCESS);
pub const IOCTL_SCSI_MINIPORT: ULONG =
CTL_CODE!(IOCTL_SCSI_BASE, 0x0402, METHOD_BUFFERED, FILE_READ_ACCESS | FILE_WRITE_ACCESS);
pub const IOCTL_SCSI_GET_INQUIRY_DATA: ULONG =
CTL_CODE!(IOCTL_SCSI_BASE, 0x0403, METHOD_BUFFERED, FILE_ANY_ACCESS);
pub const IOCTL_SCSI_GET_CAPABILITIES: ULONG =
CTL_CODE!(IOCTL_SCSI_BASE, 0x0404, METHOD_BUFFERED, FILE_ANY_ACCESS);
pub const IOCTL_SCSI_PASS_THROUGH_DIRECT: ULONG =
CTL_CODE!(IOCTL_SCSI_BASE, 0x0405, METHOD_BUFFERED, FILE_READ_ACCESS | FILE_WRITE_ACCESS);
pub const IOCTL_SCSI_GET_ADDRESS: ULONG =
CTL_CODE!(IOCTL_SCSI_BASE, 0x0406, METHOD_BUFFERED, FILE_ANY_ACCESS);
pub const IOCTL_SCSI_RESCAN_BUS: ULONG =
CTL_CODE!(IOCTL_SCSI_BASE, 0x0407, METHOD_BUFFERED, FILE_ANY_ACCESS);
pub const IOCTL_SCSI_GET_DUMP_POINTERS: ULONG =
CTL_CODE!(IOCTL_SCSI_BASE, 0x0408, METHOD_BUFFERED, FILE_ANY_ACCESS);
pub const IOCTL_SCSI_FREE_DUMP_POINTERS: ULONG =
CTL_CODE!(IOCTL_SCSI_BASE, 0x0409, METHOD_BUFFERED, FILE_ANY_ACCESS);
pub const IOCTL_IDE_PASS_THROUGH: ULONG =
CTL_CODE!(IOCTL_SCSI_BASE, 0x040a, METHOD_BUFFERED, FILE_READ_ACCESS | FILE_WRITE_ACCESS);
pub const IOCTL_ATA_PASS_THROUGH: ULONG =
CTL_CODE!(IOCTL_SCSI_BASE, 0x040b, METHOD_BUFFERED, FILE_READ_ACCESS | FILE_WRITE_ACCESS);
pub const IOCTL_ATA_PASS_THROUGH_DIRECT: ULONG =
CTL_CODE!(IOCTL_SCSI_BASE, 0x040c, METHOD_BUFFERED, FILE_READ_ACCESS | FILE_WRITE_ACCESS);
pub const IOCTL_ATA_MINIPORT: ULONG =
CTL_CODE!(IOCTL_SCSI_BASE, 0x040d, METHOD_BUFFERED, FILE_READ_ACCESS | FILE_WRITE_ACCESS);
pub const IOCTL_MINIPORT_PROCESS_SERVICE_IRP: ULONG =
CTL_CODE!(IOCTL_SCSI_BASE, 0x040e, METHOD_BUFFERED, FILE_READ_ACCESS | FILE_WRITE_ACCESS);
pub const IOCTL_MPIO_PASS_THROUGH_PATH: ULONG =
CTL_CODE!(IOCTL_SCSI_BASE, 0x040f, METHOD_BUFFERED, FILE_READ_ACCESS | FILE_WRITE_ACCESS);
pub const IOCTL_MPIO_PASS_THROUGH_PATH_DIRECT: ULONG =
CTL_CODE!(IOCTL_SCSI_BASE, 0x0410, METHOD_BUFFERED, FILE_READ_ACCESS | FILE_WRITE_ACCESS);
pub const IOCTL_SCSI_PASS_THROUGH_EX: ULONG =
CTL_CODE!(IOCTL_SCSI_BASE, 0x0411, METHOD_BUFFERED, FILE_READ_ACCESS | FILE_WRITE_ACCESS);
pub const IOCTL_SCSI_PASS_THROUGH_DIRECT_EX: ULONG =
CTL_CODE!(IOCTL_SCSI_BASE, 0x0412, METHOD_BUFFERED, FILE_READ_ACCESS | FILE_WRITE_ACCESS);
pub const IOCTL_MPIO_PASS_THROUGH_PATH_EX: ULONG =
CTL_CODE!(IOCTL_SCSI_BASE, 0x0413, METHOD_BUFFERED, FILE_READ_ACCESS | FILE_WRITE_ACCESS);
pub const IOCTL_MPIO_PASS_THROUGH_PATH_DIRECT_EX: ULONG =
CTL_CODE!(IOCTL_SCSI_BASE, 0x0414, METHOD_BUFFERED, FILE_READ_ACCESS | FILE_WRITE_ACCESS);
pub const IOCTL_SCSI_MINIPORT_NVCACHE: ULONG = (FILE_DEVICE_SCSI << 16) + 0x0600;
pub const IOCTL_SCSI_MINIPORT_HYBRID: ULONG = (FILE_DEVICE_SCSI << 16) + 0x0620;
pub const IOCTL_SCSI_MINIPORT_FIRMWARE: ULONG = (FILE_DEVICE_SCSI << 16) + 0x0780;
STRUCT!{struct SCSI_PASS_THROUGH {
Length: USHORT,
ScsiStatus: UCHAR,
PathId: UCHAR,
TargetId: UCHAR,
Lun: UCHAR,
CdbLength: UCHAR,
SenseInfoLength: UCHAR,
DataIn: UCHAR,
DataTransferLength: ULONG,
TimeOutValue: ULONG,
DataBufferOffset: ULONG_PTR,
SenseInfoOffset: ULONG,
Cdb: [UCHAR; 16],
}}
pub type PSCSI_PASS_THROUGH = *mut SCSI_PASS_THROUGH;
STRUCT!{struct SCSI_PASS_THROUGH_DIRECT {
Length: USHORT,
ScsiStatus: UCHAR,
PathId: UCHAR,
TargetId: UCHAR,
Lun: UCHAR,
CdbLength: UCHAR,
SenseInfoLength: UCHAR,
DataIn: UCHAR,
DataTransferLength: ULONG,
TimeOutValue: ULONG,
DataBuffer: PVOID,
SenseInfoOffset: ULONG,
Cdb: [UCHAR; 16],
}}
pub type PSCSI_PASS_THROUGH_DIRECT = *mut SCSI_PASS_THROUGH_DIRECT;
STRUCT!{struct SCSI_PASS_THROUGH32 {
Length: USHORT,
ScsiStatus: UCHAR,
PathId: UCHAR,
TargetId: UCHAR,
Lun: UCHAR,
CdbLength: UCHAR,
SenseInfoLength: UCHAR,
DataIn: UCHAR,
DataTransferLength: ULONG,
TimeOutValue: ULONG,
DataBufferOffset: ULONG32,
SenseInfoOffset: ULONG,
Cdb: [UCHAR; 16],
}}
#[cfg(target_arch = "x86_64")]
IFDEF!{
pub type PSCSI_PASS_THROUGH32 = *mut SCSI_PASS_THROUGH32;
STRUCT!{struct SCSI_PASS_THROUGH_DIRECT32 {
Length: USHORT,
ScsiStatus: UCHAR,
PathId: UCHAR,
TargetId: UCHAR,
Lun: UCHAR,
CdbLength: UCHAR,
SenseInfoLength: UCHAR,
DataIn: UCHAR,
DataTransferLength: ULONG,
TimeOutValue: ULONG,
DataBuffer: ULONG32, // Rust doesn't have anything like __ptr32
SenseInfoOffset: ULONG,
Cdb: [UCHAR; 16],
}}
pub type PSCSI_PASS_THROUGH_DIRECT32 = *mut SCSI_PASS_THROUGH_DIRECT32;
}
STRUCT!{struct SCSI_PASS_THROUGH_EX {
Version: ULONG,
Length: ULONG,
CdbLength: ULONG,
StorAddressLength: ULONG,
ScsiStatus: UCHAR,
SenseInfolength: UCHAR,
DataDirection: UCHAR,
Reserved: UCHAR,
TimeOutValue: ULONG,
StorAddressOffset: ULONG,
SenseInfoOffset: ULONG,
DataOutTransferLength: ULONG,
DataInTransferLength: ULONG,
DataOutBufferOffset: ULONG_PTR,
DataInBufferOffset: ULONG_PTR,
Cdb: [UCHAR; ANYSIZE_ARRAY],
}}
pub type PSCSI_PASS_THROUGH_EX = *mut SCSI_PASS_THROUGH_EX;
STRUCT!{struct SCSI_PASS_THROUGH_DIRECT_EX {
Version: ULONG,
Length: ULONG,
CdbLength: ULONG,
StorAddressLength: ULONG,
ScsiStatus: UCHAR,
SenseInfolength: UCHAR,
DataDirection: UCHAR,
Reserved: UCHAR,
TimeOutValue: ULONG,
StorAddressOffset: ULONG,
SenseInfoOffset: ULONG,
DataOutTransferLength: ULONG,
DataInTransferLength: ULONG,
DataOutBuffer: *mut VOID,
DataInBuffer: *mut VOID,
Cdb: [UCHAR; ANYSIZE_ARRAY],
}}
pub type PSCSI_PASS_THROUGH_DIRECT_EX = *mut SCSI_PASS_THROUGH_DIRECT_EX;
#[cfg(target_arch = "x86_64")]
IFDEF!{
STRUCT!{struct SCSI_PASS_THROUGH32_EX {
Version: ULONG,
Length: ULONG,
CdbLength: ULONG,
StorAddressLength: ULONG,
ScsiStatus: UCHAR,
SenseInfolength: UCHAR,
DataDirection: UCHAR,
Reserved: UCHAR,
TimeOutValue: ULONG,
StorAddressOffset: ULONG,
SenseInfoOffset: ULONG,
DataOutTransferLength: ULONG,
DataInTransferLength: ULONG,
DataOutBufferOffset: ULONG32,
DataInBufferOffset: ULONG32,
Cdb: [UCHAR; ANYSIZE_ARRAY],
}}
pub type PSCSI_PASS_THROUGH32_EX = *mut SCSI_PASS_THROUGH32_EX;
STRUCT!{struct SCSI_PASS_THROUGH_DIRECT32_EX {
Version: ULONG,
Length: ULONG,
CdbLength: ULONG,
StorAddressLength: ULONG,
ScsiStatus: UCHAR,
SenseInfolength: UCHAR,
DataDirection: UCHAR,
Reserved: UCHAR,
TimeOutValue: ULONG,
StorAddressOffset: ULONG,
SenseInfoOffset: ULONG,
DataOutTransferLength: ULONG,
DataInTransferLength: ULONG,
DataOutBuffer: ULONG32,
DataInBuffer: ULONG32,
Cdb: [UCHAR; ANYSIZE_ARRAY],
}}
pub type PSCSI_PASS_THROUGH_DIRECT32_EX = *mut SCSI_PASS_THROUGH_DIRECT32_EX;
}
STRUCT!{struct ATA_PASS_THROUGH_EX {
Length: USHORT,
AtaFlags: USHORT,
PathId: UCHAR,
TargetId: UCHAR,
Lun: UCHAR,
ReservedAsUchar: UCHAR,
DataTransferLength: ULONG,
TimeOutValue: ULONG,
ReservedAsUlong: ULONG,
DataBufferOffset: ULONG_PTR,
PreviousTaskFile: [UCHAR; 8],
CurrentTaskFile: [UCHAR; 8],
}}
pub type PATA_PASS_THROUGH_EX = *mut ATA_PASS_THROUGH_EX;
STRUCT!{struct ATA_PASS_THROUGH_DIRECT {
Length: USHORT,
AtaFlags: USHORT,
PathId: UCHAR,
TargetId: UCHAR,
Lun: UCHAR,
ReservedAsUchar: UCHAR,
DataTransferLength: ULONG,
TimeOutValue: ULONG,
ReservedAsUlong: ULONG,
DataBuffer: PVOID,
PreviousTaskFile: [UCHAR; 8],
CurrentTaskFile: [UCHAR; 8],
}}
pub type PATA_PASS_THROUGH_DIRECT = *mut ATA_PASS_THROUGH_DIRECT;
#[cfg(target_arch = "x86_64")]
IFDEF!{
STRUCT!{struct ATA_PASS_THROUGH_EX32 {
Length: USHORT,
AtaFlags: USHORT,
PathId: UCHAR,
TargetId: UCHAR,
Lun: UCHAR,
ReservedAsUchar: UCHAR,
DataTransferLength: ULONG,
TimeOutValue: ULONG,
ReservedAsUlong: ULONG,
DataBufferOffset: ULONG32,
PreviousTaskFile: [UCHAR; 8],
CurrentTaskFile: [UCHAR; 8],
}}
pub type PATA_PASS_THROUGH_EX32 = *mut ATA_PASS_THROUGH_EX32;
STRUCT!{struct ATA_PASS_THROUGH_DIRECT32 {
Length: USHORT,
AtaFlags: USHORT,
PathId: UCHAR,
TargetId: UCHAR,
Lun: UCHAR,
ReservedAsUchar: UCHAR,
DataTransferLength: ULONG,
TimeOutValue: ULONG,
ReservedAsUlong: ULONG,
DataBuffer: ULONG32,
PreviousTaskFile: [UCHAR; 8],
CurrentTaskFile: [UCHAR; 8],
}}
pub type PATA_PASS_THROUGH_DIRECT32 = *mut ATA_PASS_THROUGH_DIRECT32;
}
pub const ATA_FLAGS_DRDY_REQUIRED: USHORT = 1 << 0;
pub const ATA_FLAGS_DATA_IN: USHORT = 1 << 1;
pub const ATA_FLAGS_DATA_OUT: USHORT = 1 << 2;
pub const ATA_FLAGS_48BIT_COMMAND: USHORT = 1 << 3;
pub const ATA_FLAGS_USE_DMA: USHORT = 1 << 4;
pub const ATA_FLAGS_NO_MULTIPLE: USHORT = 1 << 5;
STRUCT!{struct IDE_IO_CONTROL {
HeaderLength: ULONG,
Signature: [UCHAR; 8],
Timeout: ULONG,
ControlCode: ULONG,
ReturnStatus: ULONG,
DataLength: ULONG,
}}
pub type PIDE_IO_CONTROL = *mut IDE_IO_CONTROL;
STRUCT!{struct MPIO_PASS_THROUGH_PATH {
PassThrough: SCSI_PASS_THROUGH,
Version: ULONG,
Length: USHORT,
Flags: UCHAR,
PortNumber: UCHAR,
MpioPathId: ULONGLONG,
}}
pub type PMPIO_PASS_THROUGH_PATH = *mut MPIO_PASS_THROUGH_PATH;
STRUCT!{struct MPIO_PASS_THROUGH_PATH_DIRECT {
PassThrough: SCSI_PASS_THROUGH_DIRECT,
Version: ULONG,
Length: USHORT,
Flags: UCHAR,
PortNumber: UCHAR,
MpioPathId: ULONGLONG,
}}
pub type PMPIO_PASS_THROUGH_PATH_DIRECT = *mut MPIO_PASS_THROUGH_PATH_DIRECT;
STRUCT!{struct MPIO_PASS_THROUGH_PATH_EX {
PassThroughOffset: ULONG,
Version: ULONG,
Length: USHORT,
Flags: UCHAR,
PortNumber: UCHAR,
MpioPathId: ULONGLONG,
}}
pub type PMPIO_PASS_THROUGH_PATH_EX = *mut MPIO_PASS_THROUGH_PATH_EX;
STRUCT!{struct MPIO_PASS_THROUGH_PATH_DIRECT_EX {
PassThroughOffset: ULONG,
Version: ULONG,
Length: USHORT,
Flags: UCHAR,
PortNumber: UCHAR,
MpioPathId: ULONGLONG,
}}
pub type PMPIO_PASS_THROUGH_PATH_DIRECT_EX = *mut MPIO_PASS_THROUGH_PATH_DIRECT_EX;
#[cfg(target_arch = "x86_64")]
IFDEF!{
STRUCT!{struct MPIO_PASS_THROUGH_PATH32 {
PassThrough: SCSI_PASS_THROUGH32,
Version: ULONG,
Length: USHORT,
Flags: UCHAR,
PortNumber: UCHAR,
MpioPathId: ULONGLONG,
}}
pub type PMPIO_PASS_THROUGH_PATH32 = *mut MPIO_PASS_THROUGH_PATH32;
STRUCT!{struct MPIO_PASS_THROUGH_PATH_DIRECT32 {
PassThrough: SCSI_PASS_THROUGH_DIRECT32,
Version: ULONG,
Length: USHORT,
Flags: UCHAR,
PortNumber: UCHAR,
MpioPathId: ULONGLONG,
}}
pub type PMPIO_PASS_THROUGH_PATH_DIRECT32 = *mut MPIO_PASS_THROUGH_PATH_DIRECT32;
STRUCT!{struct MPIO_PASS_THROUGH_PATH32_EX {
PassThroughOffset: ULONG,
Version: ULONG,
Length: USHORT,
Flags: UCHAR,
PortNumber: UCHAR,
MpioPathId: ULONGLONG,
}}
pub type PMPIO_PASS_THROUGH_PATH32_EX = *mut MPIO_PASS_THROUGH_PATH32_EX;
STRUCT!{struct MPIO_PASS_THROUGH_PATH_DIRECT32_EX {
PassThroughOffset: ULONG,
Version: ULONG,
Length: USHORT,
Flags: UCHAR,
PortNumber: UCHAR,
MpioPathId: ULONGLONG,
}}
pub type PMPIO_PASS_THROUGH_PATH_DIRECT32_EX = *mut MPIO_PASS_THROUGH_PATH_DIRECT32_EX;
}
STRUCT!{struct SCSI_BUS_DATA {
NumberOfLogicalUnits: UCHAR,
InitiatorBusId: UCHAR,
InquiryDataOffset: ULONG,
}}
pub type PSCSI_BUS_DATA = *mut SCSI_BUS_DATA;
STRUCT!{struct SCSI_ADAPTER_BUS_INFO {
NumberOfBuses: UCHAR,
BusData: [SCSI_BUS_DATA; 1],
}}
pub type PSCSI_ADAPTER_BUS_INFO = *mut SCSI_ADAPTER_BUS_INFO;
STRUCT!{struct SCSI_INQUIRY_DATA {
PathId: UCHAR,
TargetId: UCHAR,
Lun: UCHAR,
DeviceClaimed: BOOLEAN,
InquiryDataLength: ULONG,
NextInquiryDataOffset: ULONG,
InquiryData: [UCHAR; 1],
}}
pub type PSCSI_INQUIRY_DATA = *mut SCSI_INQUIRY_DATA;
pub const IOCTL_MINIPORT_SIGNATURE_SCSIDISK: &'static str = "SCSIDISK";
pub const IOCTL_MINIPORT_SIGNATURE_HYBRDISK: &'static str = "HYBRDISK";
pub const IOCTL_MINIPORT_SIGNATURE_DSM_NOTIFICATION: &'static str = "MPDSM ";
pub const IOCTL_MINIPORT_SIGNATURE_DSM_GENERAL: &'static str = "MPDSMGEN";
pub const IOCTL_MINIPORT_SIGNATURE_FIRMWARE: &'static str = "FIRMWARE";
pub const IOCTL_MINIPORT_SIGNATURE_QUERY_PROTOCOL: &'static str = "PROTOCOL";
pub const IOCTL_MINIPORT_SIGNATURE_QUERY_TEMPERATURE: &'static str = "TEMPERAT";
pub const IOCTL_MINIPORT_SIGNATURE_SET_TEMPERATURE_THRESHOLD: &'static str = "SETTEMPT";
pub const IOCTL_MINIPORT_SIGNATURE_QUERY_PHYSICAL_TOPOLOGY: &'static str = "TOPOLOGY";
STRUCT!{struct SRB_IO_CONTROL {
HeaderLength: ULONG,
Signature: [UCHAR; 8],
Timeout: ULONG,
ControlCode: ULONG,
ReturnCode: ULONG,
Length: ULONG,
}}
pub type PSRB_IO_CONTROL = *mut SRB_IO_CONTROL;
STRUCT!{struct NVCACHE_REQUEST_BLOCK {
NRBSize: ULONG,
Function: USHORT,
NRBFlags: ULONG,
NRBStatus: ULONG,
Count: ULONG,
LBA: ULONGLONG,
DataBufSize: ULONG,
NVCacheStatus: ULONG,
NVCacheSubStatus: ULONG,
}}
pub type PNVCACHE_REQUEST_BLOCK = *mut NVCACHE_REQUEST_BLOCK;
pub const NRB_FUNCTION_NVCACHE_INFO: USHORT = 0xEC;
pub const NRB_FUNCTION_SPINDLE_STATUS: USHORT = 0xE5;
pub const NRB_FUNCTION_NVCACHE_POWER_MODE_SET: USHORT = 0x00;
pub const NRB_FUNCTION_NVCACHE_POWER_MODE_RETURN: USHORT = 0x01;
pub const NRB_FUNCTION_FLUSH_NVCACHE: USHORT = 0x14;
pub const NRB_FUNCTION_QUERY_PINNED_SET: USHORT = 0x12;
pub const NRB_FUNCTION_QUERY_CACHE_MISS: USHORT = 0x13;
pub const NRB_FUNCTION_ADD_LBAS_PINNED_SET: USHORT = 0x10;
pub const NRB_FUNCTION_REMOVE_LBAS_PINNED_SET: USHORT = 0x11;
pub const NRB_FUNCTION_QUERY_ASCENDER_STATUS: USHORT = 0xD0;
pub const NRB_FUNCTION_QUERY_HYBRID_DISK_STATUS: USHORT = 0xD1;
pub const NRB_FUNCTION_PASS_HINT_PAYLOAD: USHORT = 0xE0;
pub const NRB_FUNCTION_NVSEPARATED_INFO: USHORT = 0xc0;
pub const NRB_FUNCTION_NVSEPARATED_FLUSH: USHORT = 0xc1;
pub const NRB_FUNCTION_NVSEPARATED_WB_DISABLE: USHORT = 0xc2;
pub const NRB_FUNCTION_NVSEPARATED_WB_REVERT_DEFAULT: USHORT = 0xc3;
pub const NRB_SUCCESS: ULONG = 0;
pub const NRB_ILLEGAL_REQUEST: ULONG = 1;
pub const NRB_INVALID_PARAMETER: ULONG = 2;
pub const NRB_INPUT_DATA_OVERRUN: ULONG = 3;
pub const NRB_INPUT_DATA_UNDERRUN: ULONG = 4;
pub const NRB_OUTPUT_DATA_OVERRUN: ULONG = 5;
pub const NRB_OUTPUT_DATA_UNDERRUN: ULONG = 6;
STRUCT!{struct NV_FEATURE_PARAMETER {
NVPowerModeEnabled: USHORT,
NVParameterReserv1: USHORT,
NVCmdEnabled: USHORT,
NVParameterReserv2: USHORT,
NVPowerModeVer: USHORT,
NVCmdVer: USHORT,
NVSize: ULONG,
NVReadSpeed: USHORT,
NVWrtSpeed: USHORT,
DeviceSpinUpTime: ULONG,
}}
pub type PNV_FEATURE_PARAMETER = *mut NV_FEATURE_PARAMETER;
STRUCT!{struct NVCACHE_HINT_PAYLOAD {
Command: UCHAR,
Feature7_0: UCHAR,
Feature15_8: UCHAR,
Count15_8: UCHAR,
LBA7_0: UCHAR,
LBA15_8: UCHAR,
LBA23_16: UCHAR,
LBA31_24: UCHAR,
LBA39_32: UCHAR,
LBA47_40: UCHAR,
Auxiliary7_0: UCHAR,
Auxiliary23_16: UCHAR,
Reserved: [UCHAR; 4],
}}
pub type PNVCACHE_HINT_PAYLOAD = *mut NVCACHE_HINT_PAYLOAD;
STRUCT!{struct NV_SEP_CACHE_PARAMETER {
Version: ULONG,
Size: ULONG,
Flags: NV_SEP_CACHE_PARAMETER_Flags,
WriteCacheType: UCHAR,
WriteCacheTypeEffective: UCHAR,
ParameterReserve1: [UCHAR; 3],
}}
pub type PNV_SEP_CACHE_PARAMETER = *mut NV_SEP_CACHE_PARAMETER;
UNION!{union NV_SEP_CACHE_PARAMETER_Flags {
[u8; 1],
CacheFlags CacheFlags_mut: NV_SEP_CACHE_PARAMETER_Flags_CacheFlags,
CacheFlagsSet CacheFlagsSet_mut: UCHAR,
}}
STRUCT!{struct NV_SEP_CACHE_PARAMETER_Flags_CacheFlags {
Bitfield: UCHAR,
}}
BITFIELD!{NV_SEP_CACHE_PARAMETER_Flags_CacheFlags Bitfield: UCHAR [
WriteCacheEnabled set_WriteCacheEnabled[0..1],
WriteCacheChangeable set_WriteCacheChangeable[1..2],
WriteThroughIOSupported set_WriteThroughIOSupported[2..3],
FlushCacheSupported set_FlushCacheSupported[3..4],
ReservedBits set_ReservedBits[4..8],
]}
pub const NV_SEP_CACHE_PARAMETER_VERSION_1: ULONG = 1;
pub const NV_SEP_CACHE_PARAMETER_VERSION: ULONG = NV_SEP_CACHE_PARAMETER_VERSION_1;
ENUM!{enum NV_SEP_WRITE_CACHE_TYPE {
NVSEPWriteCacheTypeUnknown = 0,
NVSEPWriteCacheTypeNone = 1,
NVSEPWriteCacheTypeWriteBack = 2,
NVSEPWriteCacheTypeWriteThrough = 3,
}}
pub type PNV_SEP_WRITE_CACHE_TYPE = *mut NV_SEP_WRITE_CACHE_TYPE;
STRUCT!{struct MP_DEVICE_DATA_SET_RANGE {
StartingOffset: LONGLONG,
LengthInBytes: ULONGLONG,
}}
pub type PMP_DEVICE_DATA_SET_RANGE = *mut MP_DEVICE_DATA_SET_RANGE;
STRUCT!{struct DSM_NOTIFICATION_REQUEST_BLOCK {
Size: ULONG,
Version: ULONG,
NotifyFlags: ULONG,
DataSetProfile: ULONG,
Reserved: [ULONG; 3],
DataSetRangesCount: ULONG,
DataSetRanges: [MP_DEVICE_DATA_SET_RANGE; ANYSIZE_ARRAY],
}}
pub type PDSM_NOTIFICATION_REQUEST_BLOCK = *mut DSM_NOTIFICATION_REQUEST_BLOCK;
pub const MINIPORT_DSM_NOTIFICATION_VERSION_1: ULONG = 1;
pub const MINIPORT_DSM_NOTIFICATION_VERSION: ULONG = MINIPORT_DSM_NOTIFICATION_VERSION_1;
pub const MINIPORT_DSM_PROFILE_UNKNOWN: ULONG = 0;
pub const MINIPORT_DSM_PROFILE_PAGE_FILE: ULONG = 1;
pub const MINIPORT_DSM_PROFILE_HIBERNATION_FILE: ULONG = 2;
pub const MINIPORT_DSM_PROFILE_CRASHDUMP_FILE: ULONG = 3;
pub const MINIPORT_DSM_NOTIFY_FLAG_BEGIN: ULONG = 0x00000001;
pub const MINIPORT_DSM_NOTIFY_FLAG_END: ULONG = 0x00000002;
pub const HYBRID_FUNCTION_GET_INFO: ULONG = 0x01;
pub const HYBRID_FUNCTION_DISABLE_CACHING_MEDIUM: ULONG = 0x10;
pub const HYBRID_FUNCTION_ENABLE_CACHING_MEDIUM: ULONG = 0x11;
pub const HYBRID_FUNCTION_SET_DIRTY_THRESHOLD: ULONG = 0x12;
pub const HYBRID_FUNCTION_DEMOTE_BY_SIZE: ULONG = 0x13;
pub const HYBRID_STATUS_SUCCESS: ULONG = 0x0;
pub const HYBRID_STATUS_ILLEGAL_REQUEST: ULONG = 0x1;
pub const HYBRID_STATUS_INVALID_PARAMETER: ULONG = 0x2;
pub const HYBRID_STATUS_OUTPUT_BUFFER_TOO_SMALL: ULONG = 0x3;
pub const HYBRID_STATUS_ENABLE_REFCOUNT_HOLD: ULONG = 0x10;
pub const HYBRID_REQUEST_BLOCK_STRUCTURE_VERSION: ULONG = 0x1;
STRUCT!{struct HYBRID_REQUEST_BLOCK {
Version: ULONG,
Size: ULONG,
Function: ULONG,
Flags: ULONG,
DataBufferOffset: ULONG,
DataBufferLength: ULONG,
}}
pub type PHYBRID_REQUEST_BLOCK = *mut HYBRID_REQUEST_BLOCK;
ENUM!{enum NVCACHE_TYPE {
NvCacheTypeUnknown = 0,
NvCacheTypeNone = 1,
NvCacheTypeWriteBack = 2,
NvCacheTypeWriteThrough = 3,
}}
ENUM!{enum NVCACHE_STATUS {
NvCacheStatusUnknown = 0,
NvCacheStatusDisabling = 1,
NvCacheStatusDisabled = 2,
NvCacheStatusEnabled = 3,
}}
STRUCT!{struct NVCACHE_PRIORITY_LEVEL_DESCRIPTOR {
PriorityLevel: UCHAR,
Reserved0: [UCHAR; 3],
ConsumedNVMSizeFraction: ULONG,
ConsumedMappingResourcesFraction: ULONG,
ConsumedNVMSizeForDirtyDataFraction: ULONG,
ConsumedMappingResourcesForDirtyDataFraction: ULONG,
Reserved1: ULONG,
}}
pub type PNVCACHE_PRIORITY_LEVEL_DESCRIPTOR = *mut NVCACHE_PRIORITY_LEVEL_DESCRIPTOR;
pub const HYBRID_REQUEST_INFO_STRUCTURE_VERSION: ULONG = 1;
STRUCT!{struct HYBRID_INFORMATION {
Version: ULONG,
Size: ULONG,
HybridSupported: BOOLEAN,
Status: NVCACHE_STATUS,
CacheTypeEffective: NVCACHE_TYPE,
CacheTypeDefault: NVCACHE_TYPE,
FractionBase: ULONG,
CacheSize: ULONGLONG,
Attributes: HYBRID_INFORMATION_Attributes,
Priorities: HYBRID_INFORMATION_Priorities,
}}
pub type PHYBRID_INFORMATION = *mut HYBRID_INFORMATION;
STRUCT!{struct HYBRID_INFORMATION_Attributes {
Bitfield: ULONG,
}}
BITFIELD!{HYBRID_INFORMATION_Attributes Bitfield: ULONG [
WriteCacheChangeable set_WriteCacheChangeable[0..1],
WriteThroughIoSupported set_WriteThroughIoSupported[1..2],
FlushCacheSupported set_FlushCacheSupported[2..3],
Removable set_Removable[3..4],
ReservedBits set_ReservedBits[4..32],
]}
STRUCT!{struct HYBRID_INFORMATION_Priorities {
PriorityLevelCount: UCHAR,
MaxPriorityBehavior: BOOLEAN,
OptimalWriteGranularity: UCHAR,
Reserved: UCHAR,
DirtyThresholdLow: ULONG,
DirtyThresholdHigh: ULONG,
SupportedCommands: HYBRID_INFORMATION_Priorities_SupportedCommands,
Priority: [NVCACHE_PRIORITY_LEVEL_DESCRIPTOR; 0],
}}
STRUCT!{struct HYBRID_INFORMATION_Priorities_SupportedCommands {
Bitfield: ULONG,
MaxEvictCommands: ULONG,
MaxLbaRangeCountForEvict: ULONG,
MaxLbaRangeCountForChangeLba: ULONG,
}}
BITFIELD!{HYBRID_INFORMATION_Priorities_SupportedCommands Bitfield: ULONG [
CacheDisable set_CacheDisable[0..1],
SetDirtyThreshold set_SetDirtyThreshold[1..2],
PriorityDemoteBySize set_PriorityDemoteBySize[2..3],
PriorityChangeByLbaRange set_PriorityChangeByLbaRange[3..4],
Evict set_Evict[4..5],
ReservedBits set_ReservedBits[5..32],
]}
STRUCT!{struct HYBRID_DIRTY_THRESHOLDS {
Version: ULONG,
Size: ULONG,
DirtyLowThreshold: ULONG,
DirtyHighThreshold: ULONG,
}}
pub type PHYBRID_DIRTY_THRESHOLDS = *mut HYBRID_DIRTY_THRESHOLDS;
STRUCT!{struct HYBRID_DEMOTE_BY_SIZE {
Version: ULONG,
Size: ULONG,
SourcePriority: UCHAR,
TargetPriority: UCHAR,
Reserved0: USHORT,
Reserved1: ULONG,
LbaCount: ULONGLONG,
}}
pub type PHYBRID_DEMOTE_BY_SIZE = *mut HYBRID_DEMOTE_BY_SIZE;
pub const FIRMWARE_FUNCTION_GET_INFO: ULONG = 0x01;
pub const FIRMWARE_FUNCTION_DOWNLOAD: ULONG = 0x02;
pub const FIRMWARE_FUNCTION_ACTIVATE: ULONG = 0x03;
pub const FIRMWARE_STATUS_SUCCESS: ULONG = 0x0;
pub const FIRMWARE_STATUS_ERROR: ULONG = 0x1;
pub const FIRMWARE_STATUS_ILLEGAL_REQUEST: ULONG = 0x2;
pub const FIRMWARE_STATUS_INVALID_PARAMETER: ULONG = 0x3;
pub const FIRMWARE_STATUS_INPUT_BUFFER_TOO_BIG: ULONG = 0x4;
pub const FIRMWARE_STATUS_OUTPUT_BUFFER_TOO_SMALL: ULONG = 0x5;
pub const FIRMWARE_STATUS_INVALID_SLOT: ULONG = 0x6;
pub const FIRMWARE_STATUS_INVALID_IMAGE: ULONG = 0x7;
pub const FIRMWARE_STATUS_CONTROLLER_ERROR: ULONG = 0x10;
pub const FIRMWARE_STATUS_POWER_CYCLE_REQUIRED: ULONG = 0x20;
pub const FIRMWARE_STATUS_DEVICE_ERROR: ULONG = 0x40;
pub const FIRMWARE_STATUS_INTERFACE_CRC_ERROR: ULONG = 0x80;
pub const FIRMWARE_STATUS_UNCORRECTABLE_DATA_ERROR: ULONG = 0x81;
pub const FIRMWARE_STATUS_MEDIA_CHANGE: ULONG = 0x82;
pub const FIRMWARE_STATUS_ID_NOT_FOUND: ULONG = 0x83;
pub const FIRMWARE_STATUS_MEDIA_CHANGE_REQUEST: ULONG = 0x84;
pub const FIRMWARE_STATUS_COMMAND_ABORT: ULONG = 0x85;
pub const FIRMWARE_STATUS_END_OF_MEDIA: ULONG = 0x86;
pub const FIRMWARE_STATUS_ILLEGAL_LENGTH: ULONG = 0x87;<|fim▁hole|> Version: ULONG,
Size: ULONG,
Function: ULONG,
Flags: ULONG,
DataBufferOffset: ULONG,
DataBufferLength: ULONG,
}}
pub type PFIRMWARE_REQUEST_BLOCK = *mut FIRMWARE_REQUEST_BLOCK;
pub const FIRMWARE_REQUEST_FLAG_CONTROLLER: ULONG = 0x00000001;
pub const FIRMWARE_REQUEST_FLAG_LAST_SEGMENT: ULONG = 0x00000002;
pub const FIRMWARE_REQUEST_FLAG_SWITCH_TO_EXISTING_FIRMWARE: ULONG = 0x80000000;
pub const STORAGE_FIRMWARE_INFO_STRUCTURE_VERSION: ULONG = 0x1;
pub const STORAGE_FIRMWARE_INFO_STRUCTURE_VERSION_V2: ULONG = 0x2;
pub const STORAGE_FIRMWARE_INFO_INVALID_SLOT: UCHAR = 0xFF;
STRUCT!{struct STORAGE_FIRMWARE_SLOT_INFO {
SlotNumber: UCHAR,
ReadOnly: BOOLEAN,
Reserved: [UCHAR; 6],
Revision: STORAGE_FIRMWARE_SLOT_INFO_Revision,
}}
pub type PSTORAGE_FIRMWARE_SLOT_INFO = *mut STORAGE_FIRMWARE_SLOT_INFO;
UNION!{union STORAGE_FIRMWARE_SLOT_INFO_Revision {
[u64; 1],
Info Info_mut: [UCHAR; 8],
AsUlonglong AsUlonglong_mut: ULONGLONG,
}}
pub const STORAGE_FIRMWARE_SLOT_INFO_V2_REVISION_LENGTH: usize = 16;
STRUCT!{struct STORAGE_FIRMWARE_SLOT_INFO_V2 {
SlotNumber: UCHAR,
ReadOnly: BOOLEAN,
Reserved: [UCHAR; 6],
Revision: [UCHAR; STORAGE_FIRMWARE_SLOT_INFO_V2_REVISION_LENGTH],
}}
pub type PSTORAGE_FIRMWARE_SLOT_INFO_V2 = *mut STORAGE_FIRMWARE_SLOT_INFO_V2;
STRUCT!{struct STORAGE_FIRMWARE_INFO {
Version: ULONG,
Size: ULONG,
UpgradeSupport: BOOLEAN,
SlotCount: UCHAR,
ActiveSlot: UCHAR,
PendingActivateSlot: UCHAR,
Reserved: ULONG,
Slot: [STORAGE_FIRMWARE_SLOT_INFO; 0],
}}
pub type PSTORAGE_FIRMWARE_INFO = *mut STORAGE_FIRMWARE_INFO;
STRUCT!{struct STORAGE_FIRMWARE_INFO_V2 {
Version: ULONG,
Size: ULONG,
UpgradeSupport: BOOLEAN,
SlotCount: UCHAR,
ActiveSlot: UCHAR,
PendingActivateSlot: UCHAR,
FirmwareShared: BOOLEAN,
Reserved: [UCHAR; 3],
ImagePayloadAlignment: ULONG,
ImagePayloadMaxSize: ULONG,
Slot: [STORAGE_FIRMWARE_SLOT_INFO_V2; 0],
}}
pub type PSTORAGE_FIRMWARE_INFO_V2 = *mut STORAGE_FIRMWARE_INFO_V2;
pub const STORAGE_FIRMWARE_DOWNLOAD_STRUCTURE_VERSION: ULONG = 0x1;
pub const STORAGE_FIRMWARE_DOWNLOAD_STRUCTURE_VERSION_V2: ULONG = 0x2;
STRUCT!{struct STORAGE_FIRMWARE_DOWNLOAD {
Version: ULONG,
Size: ULONG,
Offset: ULONGLONG,
BufferSize: ULONGLONG,
ImageBuffer: [UCHAR; 0],
}}
pub type PSTORAGE_FIRMWARE_DOWNLOAD = *mut STORAGE_FIRMWARE_DOWNLOAD;
STRUCT!{struct STORAGE_FIRMWARE_DOWNLOAD_V2 {
Version: ULONG,
Size: ULONG,
Offset: ULONGLONG,
BufferSize: ULONGLONG,
Slot: UCHAR,
Reserved: [UCHAR; 7],
ImageBuffer: [UCHAR; 0],
}}
pub type PSTORAGE_FIRMWARE_DOWNLOAD_V2 = *mut STORAGE_FIRMWARE_DOWNLOAD_V2;
pub const STORAGE_FIRMWARE_ACTIVATE_STRUCTURE_VERSION: ULONG = 0x1;
STRUCT!{struct STORAGE_FIRMWARE_ACTIVATE {
Version: ULONG,
Size: ULONG,
SlotToActivate: UCHAR,
Reserved0: [UCHAR; 3],
}}
pub type PSTORAGE_FIRMWARE_ACTIVATE = *mut STORAGE_FIRMWARE_ACTIVATE;
STRUCT!{struct IO_SCSI_CAPABILITIES {
Length: ULONG,
MaximumTransferLength: ULONG,
MaximumPhysicalPages: ULONG,
SupportedAsynchronousEvents: ULONG,
AlignmentMask: ULONG,
TaggedQueuing: BOOLEAN,
AdapterScansDown: BOOLEAN,
AdapterUsesPio: BOOLEAN,
}}
pub type PIO_SCSI_CAPABILITIES = *mut IO_SCSI_CAPABILITIES;
STRUCT!{struct SCSI_ADDRESS {
Length: ULONG,
PortNumber: UCHAR,
PathId: UCHAR,
TargetId: UCHAR,
Lun: UCHAR,
}}
pub type PSCSI_ADDRESS = *mut SCSI_ADDRESS;
pub const DUMP_POINTERS_VERSION_1: ULONG = 1;
pub const DUMP_POINTERS_VERSION_2: ULONG = 2;
pub const DUMP_POINTERS_VERSION_3: ULONG = 3;
pub const DUMP_POINTERS_VERSION_4: ULONG = 4;
pub const DUMP_DRIVER_NAME_LENGTH: usize = 15;
FN!{cdecl DUMP_DEVICE_POWERON_ROUTINE(
Context: PVOID,
) -> LONG}
pub type PDUMP_DEVICE_POWERON_ROUTINE = *mut DUMP_DEVICE_POWERON_ROUTINE;
STRUCT!{struct DUMP_POINTERS_VERSION {
Version: ULONG,
Size: ULONG,
}}
pub type PDUMP_POINTERS_VERSION = *mut DUMP_POINTERS_VERSION;
STRUCT!{struct DUMP_POINTERS {
AdapterObject: PVOID, // struct _ADAPTER_OBJECT *
MappedRegisterBase: PVOID,
DumpData: PVOID,
CommonBufferVa: PVOID,
CommonBufferPa: LARGE_INTEGER,
CommonBufferSize: ULONG,
AllocateCommonBuffers: BOOLEAN,
UseDiskDump: BOOLEAN,
Spare1: [UCHAR; 2],
DeviceObject: PVOID,
}}
pub type PDUMP_POINTERS = *mut DUMP_POINTERS;
STRUCT!{struct DUMP_POINTERS_EX {
Header: DUMP_POINTERS_VERSION,
DumpData: PVOID,
CommonBufferVa: PVOID,
CommonBufferSize: ULONG,
AllocateCommonBuffers: BOOLEAN,
DeviceObject: PVOID,
DriverList: PVOID,
dwPortFlags: ULONG,
MaxDeviceDumpSectionSize: ULONG,
MaxDeviceDumpLevel: ULONG,
MaxTransferSize: ULONG,
AdapterObject: PVOID,
MappedRegisterBase: PVOID,
DeviceReady: PBOOLEAN,
DumpDevicePowerOn: PDUMP_DEVICE_POWERON_ROUTINE,
DumpDevicePowerOnContext: PVOID,
}}
pub type PDUMP_POINTERS_EX = *mut DUMP_POINTERS_EX;
// TODO: Revisit these definitions when const size_of and offset_of! arrive.
#[cfg(target_pointer_width = "32")]
IFDEF!{
pub const DUMP_POINTERS_EX_V2_SIZE: ULONG = 32;
pub const DUMP_POINTERS_EX_V3_SIZE: ULONG = 60;
pub const DUMP_POINTERS_EX_V4_SIZE: ULONG = 68;
}
#[cfg(target_pointer_width = "64")]
IFDEF!{
pub const DUMP_POINTERS_EX_V2_SIZE: ULONG = 48;
pub const DUMP_POINTERS_EX_V3_SIZE: ULONG = 88;
pub const DUMP_POINTERS_EX_V4_SIZE: ULONG = 104;
}
pub const DUMP_EX_FLAG_SUPPORT_64BITMEMORY: ULONG = 0x00000001;
pub const DUMP_EX_FLAG_SUPPORT_DD_TELEMETRY: ULONG = 0x00000002;
pub const DUMP_EX_FLAG_RESUME_SUPPORT: ULONG = 0x00000004;
STRUCT!{struct DUMP_DRIVER {
DumpDriverList: PVOID,
DriverName: [WCHAR; DUMP_DRIVER_NAME_LENGTH],
BaseName: [WCHAR; DUMP_DRIVER_NAME_LENGTH],
}}
pub type PDUMP_DRIVER = *mut DUMP_DRIVER;
pub const SCSI_IOCTL_DATA_OUT: UCHAR = 0;
pub const SCSI_IOCTL_DATA_IN: UCHAR = 1;
pub const SCSI_IOCTL_DATA_UNSPECIFIED: UCHAR = 2;
pub const SCSI_IOCTL_DATA_BIDIRECTIONAL: UCHAR = 3;
pub const MPIO_IOCTL_FLAG_USE_PATHID: UCHAR = 1;
pub const MPIO_IOCTL_FLAG_USE_SCSIADDRESS: UCHAR = 2;
pub const MPIO_IOCTL_FLAG_INVOLVE_DSM: UCHAR = 4;<|fim▁end|>
|
pub const FIRMWARE_REQUEST_BLOCK_STRUCTURE_VERSION: ULONG = 0x1;
STRUCT!{struct FIRMWARE_REQUEST_BLOCK {
|
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#
# Copyright 2016 Dohop hf.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.<|fim▁hole|>#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Setup script for building supervisor-logstash-notifier
"""
from setuptools import setup, find_packages
# 2 step 'with open' to be python2.6 compatible
with open('requirements.txt') as requirements:
with open('test_requirements.txt') as test_requirements:
setup(
name='supervisor-logstash-notifier',
version='0.2.5',
packages=find_packages(exclude=['tests']),
url='https://github.com/dohop/supervisor-logstash-notifier',
license='Apache 2.0',
author='aodj',
author_email='[email protected]',
description='Stream supervisor events to a logstash instance',
long_description=open('README.rst').read(),
entry_points={
'console_scripts': [
'logstash_notifier = logstash_notifier:main'
]
},
install_requires=requirements.read().splitlines(),
test_suite='tests',
tests_require=test_requirements.read().splitlines(),
)<|fim▁end|>
|
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
|
<|file_name|>paypal.py<|end_file_name|><|fim▁begin|># -*- coding: utf-'8' "-*-"
import base64
try:
import simplejson as json
except ImportError:
import json
import logging
import urlparse
import werkzeug.urls
import urllib2
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment_paypal.controllers.main import PaypalController
from openerp.osv import osv, fields
from openerp.tools.float_utils import float_compare
from openerp import SUPERUSER_ID
_logger = logging.getLogger(__name__)
class AcquirerPaypal(osv.Model):
_inherit = 'payment.acquirer'
def _get_paypal_urls(self, cr, uid, environment, context=None):
""" Paypal URLS """
if environment == 'prod':
return {
'paypal_form_url': 'https://www.paypal.com/cgi-bin/webscr',
'paypal_rest_url': 'https://api.paypal.com/v1/oauth2/token',
}
else:
return {
'paypal_form_url': 'https://www.sandbox.paypal.com/cgi-bin/webscr',
'paypal_rest_url': 'https://api.sandbox.paypal.com/v1/oauth2/token',
}
def _get_providers(self, cr, uid, context=None):
providers = super(AcquirerPaypal, self)._get_providers(cr, uid, context=context)
providers.append(['paypal', 'Paypal'])
return providers
_columns = {
'paypal_email_account': fields.char('Paypal Email ID', required_if_provider='paypal', groups='base.group_user'),
'paypal_seller_account': fields.char(
'Paypal Merchant ID', groups='base.group_user',
help='The Merchant ID is used to ensure communications coming from Paypal are valid and secured.'),
'paypal_use_ipn': fields.boolean('Use IPN', help='Paypal Instant Payment Notification', groups='base.group_user'),
# Server 2 server
'paypal_api_enabled': fields.boolean('Use Rest API'),
'paypal_api_username': fields.char('Rest API Username', groups='base.group_user'),
'paypal_api_password': fields.char('Rest API Password', groups='base.group_user'),
'paypal_api_access_token': fields.char('Access Token', groups='base.group_user'),
'paypal_api_access_token_validity': fields.datetime('Access Token Validity', groups='base.group_user'),
}
_defaults = {
'paypal_use_ipn': True,
'fees_active': False,
'fees_dom_fixed': 0.35,
'fees_dom_var': 3.4,
'fees_int_fixed': 0.35,
'fees_int_var': 3.9,
'paypal_api_enabled': False,
}
def _migrate_paypal_account(self, cr, uid, context=None):
""" COMPLETE ME """
cr.execute('SELECT id, paypal_account FROM res_company')
res = cr.fetchall()
for (company_id, company_paypal_account) in res:
if company_paypal_account:
company_paypal_ids = self.search(cr, uid, [('company_id', '=', company_id), ('provider', '=', 'paypal')], limit=1, context=context)
if company_paypal_ids:
self.write(cr, uid, company_paypal_ids, {'paypal_email_account': company_paypal_account}, context=context)
else:
paypal_view = self.pool['ir.model.data'].get_object(cr, uid, 'payment_paypal', 'paypal_acquirer_button')
self.create(cr, uid, {
'name': 'Paypal',
'provider': 'paypal',
'paypal_email_account': company_paypal_account,
'view_template_id': paypal_view.id,
}, context=context)
return True
def paypal_compute_fees(self, cr, uid, id, amount, currency_id, country_id, context=None):
""" Compute paypal fees.
:param float amount: the amount to pay
:param integer country_id: an ID of a res.country, or None. This is
the customer's country, to be compared to
the acquirer company country.
:return float fees: computed fees
"""
acquirer = self.browse(cr, uid, id, context=context)
if not acquirer.fees_active:
return 0.0
country = self.pool['res.country'].browse(cr, uid, country_id, context=context)
if country and acquirer.company_id.country_id.id == country.id:<|fim▁hole|> fixed = acquirer.fees_int_fixed
fees = (percentage / 100.0 * amount + fixed ) / (1 - percentage / 100.0)
return fees
def paypal_form_generate_values(self, cr, uid, id, partner_values, tx_values, context=None):
base_url = self.pool['ir.config_parameter'].get_param(cr, SUPERUSER_ID, 'web.base.url')
acquirer = self.browse(cr, uid, id, context=context)
paypal_tx_values = dict(tx_values)
paypal_tx_values.update({
'cmd': '_xclick',
'business': acquirer.paypal_email_account,
'item_name': '%s: %s' % (acquirer.company_id.name, tx_values['reference']),
'item_number': tx_values['reference'],
'amount': tx_values['amount'],
'currency_code': tx_values['currency'] and tx_values['currency'].name or '',
'address1': partner_values['address'],
'city': partner_values['city'],
'country': partner_values['country'] and partner_values['country'].code or '',
'state': partner_values['state'] and (partner_values['state'].code or partner_values['state'].name) or '',
'email': partner_values['email'],
'zip': partner_values['zip'],
'first_name': partner_values['first_name'],
'last_name': partner_values['last_name'],
'return': '%s' % urlparse.urljoin(base_url, PaypalController._return_url),
'notify_url': '%s' % urlparse.urljoin(base_url, PaypalController._notify_url),
'cancel_return': '%s' % urlparse.urljoin(base_url, PaypalController._cancel_url),
})
if acquirer.fees_active:
paypal_tx_values['handling'] = '%.2f' % paypal_tx_values.pop('fees', 0.0)
if paypal_tx_values.get('return_url'):
paypal_tx_values['custom'] = json.dumps({'return_url': '%s' % paypal_tx_values.pop('return_url')})
return partner_values, paypal_tx_values
def paypal_get_form_action_url(self, cr, uid, id, context=None):
acquirer = self.browse(cr, uid, id, context=context)
return self._get_paypal_urls(cr, uid, acquirer.environment, context=context)['paypal_form_url']
def _paypal_s2s_get_access_token(self, cr, uid, ids, context=None):
"""
Note: see # see http://stackoverflow.com/questions/2407126/python-urllib2-basic-auth-problem
for explanation why we use Authorization header instead of urllib2
password manager
"""
res = dict.fromkeys(ids, False)
parameters = werkzeug.url_encode({'grant_type': 'client_credentials'})
for acquirer in self.browse(cr, uid, ids, context=context):
tx_url = self._get_paypal_urls(cr, uid, acquirer.environment)['paypal_rest_url']
request = urllib2.Request(tx_url, parameters)
# add other headers (https://developer.paypal.com/webapps/developer/docs/integration/direct/make-your-first-call/)
request.add_header('Accept', 'application/json')
request.add_header('Accept-Language', tools.config.defaultLang)
# add authorization header
base64string = base64.encodestring('%s:%s' % (
acquirer.paypal_api_username,
acquirer.paypal_api_password)
).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
request = urllib2.urlopen(request)
result = request.read()
res[acquirer.id] = json.loads(result).get('access_token')
request.close()
return res
class TxPaypal(osv.Model):
_inherit = 'payment.transaction'
_columns = {
'paypal_txn_id': fields.char('Transaction ID'),
'paypal_txn_type': fields.char('Transaction type'),
}
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
def _paypal_form_get_tx_from_data(self, cr, uid, data, context=None):
reference, txn_id = data.get('item_number'), data.get('txn_id')
if not reference or not txn_id:
error_msg = 'Paypal: received data with missing reference (%s) or txn_id (%s)' % (reference, txn_id)
_logger.error(error_msg)
raise ValidationError(error_msg)
# find tx -> @TDENOTE use txn_id ?
tx_ids = self.pool['payment.transaction'].search(cr, uid, [('reference', '=', reference)], context=context)
if not tx_ids or len(tx_ids) > 1:
error_msg = 'Paypal: received data for reference %s' % (reference)
if not tx_ids:
error_msg += '; no order found'
else:
error_msg += '; multiple order found'
_logger.error(error_msg)
raise ValidationError(error_msg)
return self.browse(cr, uid, tx_ids[0], context=context)
def _paypal_form_get_invalid_parameters(self, cr, uid, tx, data, context=None):
invalid_parameters = []
_logger.info('Received a notification from Paypal with IPN version %s', data.get('notify_version'))
if data.get('test_ipn'):
_logger.warning(
'Received a notification from Paypal using sandbox'
),
# TODO: txn_id: shoudl be false at draft, set afterwards, and verified with txn details
if tx.acquirer_reference and data.get('txn_id') != tx.acquirer_reference:
invalid_parameters.append(('txn_id', data.get('txn_id'), tx.acquirer_reference))
# check what is buyed
if float_compare(float(data.get('mc_gross', '0.0')), (tx.amount + tx.fees), 2) != 0:
invalid_parameters.append(('mc_gross', data.get('mc_gross'), '%.2f' % tx.amount)) # mc_gross is amount + fees
if data.get('mc_currency') != tx.currency_id.name:
invalid_parameters.append(('mc_currency', data.get('mc_currency'), tx.currency_id.name))
if 'handling_amount' in data and float_compare(float(data.get('handling_amount')), tx.fees, 2) != 0:
invalid_parameters.append(('handling_amount', data.get('handling_amount'), tx.fees))
# check buyer
if tx.partner_reference and data.get('payer_id') != tx.partner_reference:
invalid_parameters.append(('payer_id', data.get('payer_id'), tx.partner_reference))
# check seller
if data.get('receiver_id') and tx.acquirer_id.paypal_seller_account and data['receiver_id'] != tx.acquirer_id.paypal_seller_account:
invalid_parameters.append(('receiver_id', data.get('receiver_id'), tx.acquirer_id.paypal_seller_account))
if not data.get('receiver_id') or not tx.acquirer_id.paypal_seller_account:
# Check receiver_email only if receiver_id was not checked.
# In Paypal, this is possible to configure as receiver_email a different email than the business email (the login email)
# In Odoo, there is only one field for the Paypal email: the business email. This isn't possible to set a receiver_email
# different than the business email. Therefore, if you want such a configuration in your Paypal, you are then obliged to fill
# the Merchant ID in the Paypal payment acquirer in Odoo, so the check is performed on this variable instead of the receiver_email.
# At least one of the two checks must be done, to avoid fraudsters.
if data.get('receiver_email') != tx.acquirer_id.paypal_email_account:
invalid_parameters.append(('receiver_email', data.get('receiver_email'), tx.acquirer_id.paypal_email_account))
return invalid_parameters
def _paypal_form_validate(self, cr, uid, tx, data, context=None):
status = data.get('payment_status')
data = {
'acquirer_reference': data.get('txn_id'),
'paypal_txn_type': data.get('payment_type'),
'partner_reference': data.get('payer_id')
}
if status in ['Completed', 'Processed']:
_logger.info('Validated Paypal payment for tx %s: set as done' % (tx.reference))
data.update(state='done', date_validate=data.get('payment_date', fields.datetime.now()))
return tx.write(data)
elif status in ['Pending', 'Expired']:
_logger.info('Received notification for Paypal payment %s: set as pending' % (tx.reference))
data.update(state='pending', state_message=data.get('pending_reason', ''))
return tx.write(data)
else:
error = 'Received unrecognized status for Paypal payment %s: %s, set as error' % (tx.reference, status)
_logger.info(error)
data.update(state='error', state_message=error)
return tx.write(data)
# --------------------------------------------------
# SERVER2SERVER RELATED METHODS
# --------------------------------------------------
def _paypal_try_url(self, request, tries=3, context=None):
""" Try to contact Paypal. Due to some issues, internal service errors
seem to be quite frequent. Several tries are done before considering
the communication as failed.
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
done, res = False, None
while (not done and tries):
try:
res = urllib2.urlopen(request)
done = True
except urllib2.HTTPError as e:
res = e.read()
e.close()
if tries and res and json.loads(res)['name'] == 'INTERNAL_SERVICE_ERROR':
_logger.warning('Failed contacting Paypal, retrying (%s remaining)' % tries)
tries = tries - 1
if not res:
pass
# raise openerp.exceptions.
result = res.read()
res.close()
return result
def _paypal_s2s_send(self, cr, uid, values, cc_values, context=None):
"""
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
tx_id = self.create(cr, uid, values, context=context)
tx = self.browse(cr, uid, tx_id, context=context)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % tx.acquirer_id._paypal_s2s_get_access_token()[tx.acquirer_id.id],
}
data = {
'intent': 'sale',
'transactions': [{
'amount': {
'total': '%.2f' % tx.amount,
'currency': tx.currency_id.name,
},
'description': tx.reference,
}]
}
if cc_values:
data['payer'] = {
'payment_method': 'credit_card',
'funding_instruments': [{
'credit_card': {
'number': cc_values['number'],
'type': cc_values['brand'],
'expire_month': cc_values['expiry_mm'],
'expire_year': cc_values['expiry_yy'],
'cvv2': cc_values['cvc'],
'first_name': tx.partner_name,
'last_name': tx.partner_name,
'billing_address': {
'line1': tx.partner_address,
'city': tx.partner_city,
'country_code': tx.partner_country_id.code,
'postal_code': tx.partner_zip,
}
}
}]
}
else:
# TODO: complete redirect URLs
data['redirect_urls'] = {
# 'return_url': 'http://example.com/your_redirect_url/',
# 'cancel_url': 'http://example.com/your_cancel_url/',
},
data['payer'] = {
'payment_method': 'paypal',
}
data = json.dumps(data)
request = urllib2.Request('https://api.sandbox.paypal.com/v1/payments/payment', data, headers)
result = self._paypal_try_url(request, tries=3, context=context)
return (tx_id, result)
def _paypal_s2s_get_invalid_parameters(self, cr, uid, tx, data, context=None):
"""
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
invalid_parameters = []
return invalid_parameters
def _paypal_s2s_validate(self, cr, uid, tx, data, context=None):
"""
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
values = json.loads(data)
status = values.get('state')
if status in ['approved']:
_logger.info('Validated Paypal s2s payment for tx %s: set as done' % (tx.reference))
tx.write({
'state': 'done',
'date_validate': values.get('udpate_time', fields.datetime.now()),
'paypal_txn_id': values['id'],
})
return True
elif status in ['pending', 'expired']:
_logger.info('Received notification for Paypal s2s payment %s: set as pending' % (tx.reference))
tx.write({
'state': 'pending',
# 'state_message': data.get('pending_reason', ''),
'paypal_txn_id': values['id'],
})
return True
else:
error = 'Received unrecognized status for Paypal s2s payment %s: %s, set as error' % (tx.reference, status)
_logger.info(error)
tx.write({
'state': 'error',
# 'state_message': error,
'paypal_txn_id': values['id'],
})
return False
def _paypal_s2s_get_tx_status(self, cr, uid, tx, context=None):
"""
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
# TDETODO: check tx.paypal_txn_id is set
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % tx.acquirer_id._paypal_s2s_get_access_token()[tx.acquirer_id.id],
}
url = 'https://api.sandbox.paypal.com/v1/payments/payment/%s' % (tx.paypal_txn_id)
request = urllib2.Request(url, headers=headers)
data = self._paypal_try_url(request, tries=3, context=context)
return self.s2s_feedback(cr, uid, tx.id, data, context=context)<|fim▁end|>
|
percentage = acquirer.fees_dom_var
fixed = acquirer.fees_dom_fixed
else:
percentage = acquirer.fees_int_var
|
<|file_name|>test_tasks_base.py<|end_file_name|><|fim▁begin|># Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import unittest
import mock
from pumphouse.tasks import base
class Tenant(base.Resource):
@base.task
def create(self):
self.data = self.env.cloud.keystone.tenants.create(self.data)
@base.task
def delete(self):
self.env.cloud.keystone.tenants.delete(self.data["id"])
class Server(base.Resource):
@classmethod
def get_id_for(cls, data):
try:
tenant_id = data["tenant_id"]
except KeyError:
tenant_id = Tenant.get_id_for(data["tenant"])
return (tenant_id, super(Server, cls).get_id_for(data))
@Tenant()
def tenant(self):
if "tenant_id" in self.data:
return {"id": self.data["tenant_id"]}
elif "tenant" in self.data:
return self.data["tenant"]
else:
assert False
@base.task(requires=[tenant.create])
def create(self):
server = self.data.copy()
server.pop("tenant")
server["tenant_id"] = self.tenant["id"]
self.data = self.env.cloud.nova.servers.create(server)
@base.task(before=[tenant.delete])
def delete(self):
self.env.cloud.nova.servers.delete(self.data)
class TenantWorkload(base.Resource):
@Tenant()
def tenant(self):
return self.data
@base.Collection(Server)
def servers(self):
return self.env.cloud.nova.servers.list(search_opts={
"all_tenants": 1,
"tenant_id": self.tenant["id"],
})
delete = base.task(name="delete",
requires=[tenant.delete, servers.each().delete])
create = base.task(requires=[tenant.create, servers.each().create])
class TasksBaseTestCase(unittest.TestCase):
def test_create_tasks(self):
tenant = {"name": "tenant1"}
created_tenant = dict(tenant, id="tenid1")
servers = [
{"name": "server1", "tenant": tenant},
{"name": "server2", "tenant": tenant},
]
env = mock.Mock(plugins={})
env.cloud.keystone.tenants.create.return_value = created_tenant
runner = base.TaskflowRunner(env)
workload = runner.get_resource(TenantWorkload, tenant)
workload.servers = servers
runner.add(workload.create)
runner.run()
self.assertEqual(
env.cloud.keystone.tenants.create.call_args_list,
[mock.call(tenant)],
)
self.assertItemsEqual(
env.cloud.nova.servers.create.call_args_list,
map(mock.call, [
{"tenant_id": created_tenant["id"], "name": server["name"]}
for server in servers
]),
)
self.assertEqual(len(env.method_calls), 1 + len(servers))
<|fim▁hole|> servers = [
{"id": "servid1", "name": "server1", "tenant_id": tenant["id"]},
{"id": "servid2", "name": "server2", "tenant_id": tenant["id"]},
]
env = mock.Mock(plugins={})
env.cloud.nova.servers.list.return_value = servers
env.cloud.keystone.tenants.get.return_value = tenant
runner = base.TaskflowRunner(env)
workload = runner.get_resource(TenantWorkload, tenant)
runner.add(workload.delete)
runner.run()
self.assertEqual(
env.cloud.nova.servers.list.call_args_list,
[mock.call(search_opts={
"all_tenants": 1,
"tenant_id": tenant["id"],
})],
)
self.assertEqual(
env.cloud.keystone.tenants.delete.call_args_list,
[mock.call(tenant["id"])],
)
self.assertItemsEqual(
env.cloud.nova.servers.delete.call_args_list,
map(mock.call, servers),
)
self.assertEqual(len(env.method_calls), 2 + len(servers))<|fim▁end|>
|
def test_delete_tasks(self):
tenant = {"id": "tenid1", "name": "tenant1"}
|
<|file_name|>config.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#<|fim▁hole|>#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Configuration for Invenio-Formatter."""
from __future__ import absolute_import, print_function
FORMATTER_BADGES_ALLOWED_TITLES = ['DOI']
"""List of allowed titles in badges."""
FORMATTER_BADGES_TITLE_MAPPING = {}
"""Mapping of titles."""<|fim▁end|>
|
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
|
<|file_name|>dataDao.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class DataDao(object):
def __init__(self, spark_context, config):
self.spark_context = spark_context
self.zookeepers_uri = config.ZOOKEEPERS
self.device_measurement_table_name = config.DEVICE_MEASUREMENT_TABLE_NAME
def get_data_from_hbase(self, account_id, component_id, start_ts, stop_ts):
print("get_data_for_device", account_id, component_id, start_ts, stop_ts)
start = account_id + '\0' + component_id + '\0' + start_ts
stop = account_id + '\0' + component_id + '\0' + stop_ts
print("START: ", start.replace('\0', '\\0'))
print("STOP: ", stop.replace('\0', '\\0'))
# see https://hbase.apache.org/0.94/xref/org/apache/hadoop/hbase/mapreduce/TableInputFormat.html
conf = {
"hbase.zookeeper.quorum": self.zookeepers_uri,
"hbase.mapreduce.inputtable": self.device_measurement_table_name,
"hbase.mapreduce.scan.row.start": str(start),
"hbase.mapreduce.scan.row.stop": str(stop),
"hbase.mapreduce.scan.columns": "data:measure_val"
}<|fim▁hole|> key_conv = "org.apache.spark.examples.pythonconverters.ImmutableBytesWritableToStringConverter"
value_conv = "org.apache.spark.examples.pythonconverters.HBaseResultToStringConverter"
rdd = self.spark_context.newAPIHadoopRDD("org.apache.hadoop.hbase.mapreduce.TableInputFormat",
"org.apache.hadoop.hbase.io.ImmutableBytesWritable",
"org.apache.hadoop.hbase.client.Result",
conf=conf, keyConverter=key_conv, valueConverter=value_conv)
return rdd<|fim▁end|>
| |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
##############################################################################
#<|fim▁hole|># The licence is in the file __manifest__.py
#
##############################################################################
from . import b2s_image<|fim▁end|>
|
# Copyright (C) 2015 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <[email protected]>
#
|
<|file_name|>salary_slip.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, erpnext
import datetime, math
from frappe.utils import add_days, cint, cstr, flt, getdate, rounded, date_diff, money_in_words
from frappe.model.naming import make_autoname
from frappe import msgprint, _
from erpnext.hr.doctype.payroll_entry.payroll_entry import get_start_end_dates
from erpnext.hr.doctype.employee.employee import get_holiday_list_for_employee
from erpnext.utilities.transaction_base import TransactionBase
from frappe.utils.background_jobs import enqueue
from erpnext.hr.doctype.additional_salary.additional_salary import get_additional_salary_component
from erpnext.hr.doctype.payroll_period.payroll_period import get_period_factor, get_payroll_period
from erpnext.hr.doctype.employee_benefit_application.employee_benefit_application import get_benefit_component_amount
from erpnext.hr.doctype.employee_benefit_claim.employee_benefit_claim import get_benefit_claim_amount, get_last_payroll_period_benefits
class SalarySlip(TransactionBase):
def __init__(self, *args, **kwargs):
super(SalarySlip, self).__init__(*args, **kwargs)
self.series = 'Sal Slip/{0}/.#####'.format(self.employee)
self.whitelisted_globals = {
"int": int,
"float": float,
"long": int,
"round": round,
"date": datetime.date,
"getdate": getdate
}
def autoname(self):
self.name = make_autoname(self.series)
def validate(self):
self.status = self.get_status()
self.validate_dates()
self.check_existing()
if not self.salary_slip_based_on_timesheet:
self.get_date_details()
if not (len(self.get("earnings")) or len(self.get("deductions"))):
# get details from salary structure
self.get_emp_and_leave_details()
else:
self.get_leave_details(lwp = self.leave_without_pay)
self.calculate_net_pay()
company_currency = erpnext.get_company_currency(self.company)
self.total_in_words = money_in_words(self.rounded_total, company_currency)
if frappe.db.get_single_value("HR Settings", "max_working_hours_against_timesheet"):
max_working_hours = frappe.db.get_single_value("HR Settings", "max_working_hours_against_timesheet")
if self.salary_slip_based_on_timesheet and (self.total_working_hours > int(max_working_hours)):
frappe.msgprint(_("Total working hours should not be greater than max working hours {0}").
format(max_working_hours), alert=True)
def on_submit(self):
if self.net_pay < 0:
frappe.throw(_("Net Pay cannot be less than 0"))
else:
self.set_status()
self.update_status(self.name)
self.update_salary_slip_in_additional_salary()
if (frappe.db.get_single_value("HR Settings", "email_salary_slip_to_employee")) and not frappe.flags.via_payroll_entry:
self.email_salary_slip()
def on_cancel(self):
self.set_status()
self.update_status()
self.update_salary_slip_in_additional_salary()
def on_trash(self):
from frappe.model.naming import revert_series_if_last
revert_series_if_last(self.series, self.name)
def get_status(self):
if self.docstatus == 0:
status = "Draft"
elif self.docstatus == 1:
status = "Submitted"
elif self.docstatus == 2:
status = "Cancelled"
return status
def validate_dates(self):
if date_diff(self.end_date, self.start_date) < 0:
frappe.throw(_("To date cannot be before From date"))
def check_existing(self):
if not self.salary_slip_based_on_timesheet:
ret_exist = frappe.db.sql("""select name from `tabSalary Slip`
where start_date = %s and end_date = %s and docstatus != 2
and employee = %s and name != %s""",
(self.start_date, self.end_date, self.employee, self.name))
if ret_exist:
self.employee = ''
frappe.throw(_("Salary Slip of employee {0} already created for this period").format(self.employee))
else:
for data in self.timesheets:
if frappe.db.get_value('Timesheet', data.time_sheet, 'status') == 'Payrolled':
frappe.throw(_("Salary Slip of employee {0} already created for time sheet {1}").format(self.employee, data.time_sheet))
def get_date_details(self):
if not self.end_date:
date_details = get_start_end_dates(self.payroll_frequency, self.start_date or self.posting_date)
self.start_date = date_details.start_date
self.end_date = date_details.end_date
def get_emp_and_leave_details(self):
'''First time, load all the components from salary structure'''
if self.employee:
self.set("earnings", [])
self.set("deductions", [])
if not self.salary_slip_based_on_timesheet:
self.get_date_details()
self.validate_dates()
joining_date, relieving_date = frappe.get_cached_value("Employee", self.employee,
["date_of_joining", "relieving_date"])
self.get_leave_details(joining_date, relieving_date)
struct = self.check_sal_struct(joining_date, relieving_date)
if struct:
self._salary_structure_doc = frappe.get_doc('Salary Structure', struct)
self.salary_slip_based_on_timesheet = self._salary_structure_doc.salary_slip_based_on_timesheet or 0
self.set_time_sheet()
self.pull_sal_struct()
def set_time_sheet(self):
if self.salary_slip_based_on_timesheet:
self.set("timesheets", [])
timesheets = frappe.db.sql(""" select * from `tabTimesheet` where employee = %(employee)s and start_date BETWEEN %(start_date)s AND %(end_date)s and (status = 'Submitted' or
status = 'Billed')""", {'employee': self.employee, 'start_date': self.start_date, 'end_date': self.end_date}, as_dict=1)
for data in timesheets:
self.append('timesheets', {
'time_sheet': data.name,
'working_hours': data.total_hours
})
def check_sal_struct(self, joining_date, relieving_date):
cond = """and sa.employee=%(employee)s and (sa.from_date <= %(start_date)s or
sa.from_date <= %(end_date)s or sa.from_date <= %(joining_date)s)"""
if self.payroll_frequency:
cond += """and ss.payroll_frequency = '%(payroll_frequency)s'""" % {"payroll_frequency": self.payroll_frequency}
st_name = frappe.db.sql("""
select sa.salary_structure
from `tabSalary Structure Assignment` sa join `tabSalary Structure` ss
where sa.salary_structure=ss.name
and sa.docstatus = 1 and ss.docstatus = 1 and ss.is_active ='Yes' %s
order by sa.from_date desc
limit 1
""" %cond, {'employee': self.employee, 'start_date': self.start_date,
'end_date': self.end_date, 'joining_date': joining_date})
if st_name:
self.salary_structure = st_name[0][0]
return self.salary_structure
else:
self.salary_structure = None
frappe.msgprint(_("No active or default Salary Structure found for employee {0} for the given dates")
.format(self.employee), title=_('Salary Structure Missing'))
def pull_sal_struct(self):
from erpnext.hr.doctype.salary_structure.salary_structure import make_salary_slip
if self.salary_slip_based_on_timesheet:
self.salary_structure = self._salary_structure_doc.name
self.hour_rate = self._salary_structure_doc.hour_rate
self.total_working_hours = sum([d.working_hours or 0.0 for d in self.timesheets]) or 0.0
wages_amount = self.hour_rate * self.total_working_hours
self.add_earning_for_hourly_wages(self, self._salary_structure_doc.salary_component, wages_amount)
make_salary_slip(self._salary_structure_doc.name, self)
def get_leave_details(self, joining_date=None, relieving_date=None, lwp=None, for_preview=0):
if not joining_date:
joining_date, relieving_date = frappe.get_cached_value("Employee", self.employee,
["date_of_joining", "relieving_date"])
working_days = date_diff(self.end_date, self.start_date) + 1
if for_preview:
self.total_working_days = working_days
self.payment_days = working_days
return
holidays = self.get_holidays_for_employee(self.start_date, self.end_date)
actual_lwp = self.calculate_lwp(holidays, working_days)
if not cint(frappe.db.get_value("HR Settings", None, "include_holidays_in_total_working_days")):
working_days -= len(holidays)
if working_days < 0:
frappe.throw(_("There are more holidays than working days this month."))
if not lwp:
lwp = actual_lwp
elif lwp != actual_lwp:
frappe.msgprint(_("Leave Without Pay does not match with approved Leave Application records"))
self.total_working_days = working_days
self.leave_without_pay = lwp
payment_days = flt(self.get_payment_days(joining_date, relieving_date)) - flt(lwp)
self.payment_days = payment_days > 0 and payment_days or 0
def get_payment_days(self, joining_date, relieving_date):
start_date = getdate(self.start_date)
if joining_date:
if getdate(self.start_date) <= joining_date <= getdate(self.end_date):
start_date = joining_date
elif joining_date > getdate(self.end_date):
return
end_date = getdate(self.end_date)
if relieving_date:
if getdate(self.start_date) <= relieving_date <= getdate(self.end_date):
end_date = relieving_date
elif relieving_date < getdate(self.start_date):
frappe.throw(_("Employee relieved on {0} must be set as 'Left'")
.format(relieving_date))
payment_days = date_diff(end_date, start_date) + 1
if not cint(frappe.db.get_value("HR Settings", None, "include_holidays_in_total_working_days")):
holidays = self.get_holidays_for_employee(start_date, end_date)
payment_days -= len(holidays)
return payment_days
def get_holidays_for_employee(self, start_date, end_date):
holiday_list = get_holiday_list_for_employee(self.employee)
holidays = frappe.db.sql_list('''select holiday_date from `tabHoliday`
where
parent=%(holiday_list)s
and holiday_date >= %(start_date)s
and holiday_date <= %(end_date)s''', {
"holiday_list": holiday_list,
"start_date": start_date,
"end_date": end_date
})
holidays = [cstr(i) for i in holidays]
return holidays
def calculate_lwp(self, holidays, working_days):
lwp = 0
holidays = "','".join(holidays)
for d in range(working_days):
dt = add_days(cstr(getdate(self.start_date)), d)
leave = frappe.db.sql("""
select t1.name, t1.half_day
from `tabLeave Application` t1, `tabLeave Type` t2
where t2.name = t1.leave_type
and t2.is_lwp = 1
and t1.docstatus = 1
and t1.employee = %(employee)s
and CASE WHEN t2.include_holiday != 1 THEN %(dt)s not in ('{0}') and %(dt)s between from_date and to_date and ifnull(t1.salary_slip, '') = ''
WHEN t2.include_holiday THEN %(dt)s between from_date and to_date and ifnull(t1.salary_slip, '') = ''
END
""".format(holidays), {"employee": self.employee, "dt": dt})
if leave:
lwp = cint(leave[0][1]) and (lwp + 0.5) or (lwp + 1)
return lwp
def add_earning_for_hourly_wages(self, doc, salary_component, amount):
row_exists = False
for row in doc.earnings:
if row.salary_component == salary_component:
row.amount = amount
row_exists = True
break
if not row_exists:
wages_row = {
"salary_component": salary_component,
"abbr": frappe.db.get_value("Salary Component", salary_component, "salary_component_abbr"),
"amount": self.hour_rate * self.total_working_hours,
"default_amount": 0.0,
"additional_amount": 0.0
}
doc.append('earnings', wages_row)
def calculate_net_pay(self):
if self.salary_structure:
self.calculate_component_amounts()
self.gross_pay = self.get_component_totals("earnings")
self.total_deduction = self.get_component_totals("deductions")
self.set_loan_repayment()
self.net_pay = flt(self.gross_pay) - (flt(self.total_deduction) + flt(self.total_loan_repayment))
self.rounded_total = rounded(self.net_pay)
def calculate_component_amounts(self):
if not getattr(self, '_salary_structure_doc', None):
self._salary_structure_doc = frappe.get_doc('Salary Structure', self.salary_structure)
payroll_period = get_payroll_period(self.start_date, self.end_date, self.company)
self.add_structure_components()
self.add_employee_benefits(payroll_period)
self.add_additional_salary_components()
self.add_tax_components(payroll_period)
self.set_component_amounts_based_on_payment_days()
def add_structure_components(self):
data = self.get_data_for_eval()
for key in ('earnings', 'deductions'):
for struct_row in self._salary_structure_doc.get(key):
amount = self.eval_condition_and_formula(struct_row, data)
if amount and struct_row.statistical_component == 0:
self.update_component_row(struct_row, amount, key)
def get_data_for_eval(self):
'''Returns data for evaluating formula'''
data = frappe._dict()
data.update(frappe.get_doc("Salary Structure Assignment",
{"employee": self.employee, "salary_structure": self.salary_structure}).as_dict())
data.update(frappe.get_doc("Employee", self.employee).as_dict())
data.update(self.as_dict())
# set values for components
salary_components = frappe.get_all("Salary Component", fields=["salary_component_abbr"])
for sc in salary_components:
data.setdefault(sc.salary_component_abbr, 0)
for key in ('earnings', 'deductions'):
for d in self.get(key):
data[d.abbr] = d.amount
return data
def eval_condition_and_formula(self, d, data):
try:
condition = d.condition.strip() if d.condition else None
if condition:
if not frappe.safe_eval(condition, self.whitelisted_globals, data):
return None
amount = d.amount
if d.amount_based_on_formula:
formula = d.formula.strip() if d.formula else None
if formula:
amount = flt(frappe.safe_eval(formula, self.whitelisted_globals, data), d.precision("amount"))
if amount:
data[d.abbr] = amount
return amount
except NameError as err:
frappe.throw(_("Name error: {0}".format(err)))
except SyntaxError as err:
frappe.throw(_("Syntax error in formula or condition: {0}".format(err)))
except Exception as e:
frappe.throw(_("Error in formula or condition: {0}".format(e)))
raise
def add_employee_benefits(self, payroll_period):
for struct_row in self._salary_structure_doc.get("earnings"):
if struct_row.is_flexible_benefit == 1:
if frappe.db.get_value("Salary Component", struct_row.salary_component, "pay_against_benefit_claim") != 1:
benefit_component_amount = get_benefit_component_amount(self.employee, self.start_date, self.end_date,
struct_row.salary_component, self._salary_structure_doc, self.payroll_frequency, payroll_period)
if benefit_component_amount:
self.update_component_row(struct_row, benefit_component_amount, "earnings")
else:
benefit_claim_amount = get_benefit_claim_amount(self.employee, self.start_date, self.end_date, struct_row.salary_component)
if benefit_claim_amount:
self.update_component_row(struct_row, benefit_claim_amount, "earnings")
self.adjust_benefits_in_last_payroll_period(payroll_period)
def adjust_benefits_in_last_payroll_period(self, payroll_period):
if payroll_period:
if (getdate(payroll_period.end_date) <= getdate(self.end_date)):
last_benefits = get_last_payroll_period_benefits(self.employee, self.start_date, self.end_date,
payroll_period, self._salary_structure_doc)
if last_benefits:
for last_benefit in last_benefits:
last_benefit = frappe._dict(last_benefit)
amount = last_benefit.amount
self.update_component_row(frappe._dict(last_benefit.struct_row), amount, "earnings")
def add_additional_salary_components(self):
additional_components = get_additional_salary_component(self.employee, self.start_date, self.end_date)
if additional_components:
for additional_component in additional_components:
amount = additional_component.amount
overwrite = additional_component.overwrite
key = "earnings" if additional_component.type == "Earning" else "deductions"
self.update_component_row(frappe._dict(additional_component.struct_row), amount, key, overwrite=overwrite)
def add_tax_components(self, payroll_period):
# Calculate variable_based_on_taxable_salary after all components updated in salary slip
tax_components, other_deduction_components = [], []
for d in self._salary_structure_doc.get("deductions"):
if d.variable_based_on_taxable_salary == 1 and not d.formula and not flt(d.amount):
tax_components.append(d.salary_component)
else:
other_deduction_components.append(d.salary_component)
if not tax_components:
tax_components = [d.name for d in frappe.get_all("Salary Component", filters={"variable_based_on_taxable_salary": 1})
if d.name not in other_deduction_components]
for d in tax_components:
tax_amount = self.calculate_variable_based_on_taxable_salary(d, payroll_period)
tax_row = self.get_salary_slip_row(d)
self.update_component_row(tax_row, tax_amount, "deductions")
def update_component_row(self, struct_row, amount, key, overwrite=1):
component_row = None
for d in self.get(key):
if d.salary_component == struct_row.salary_component:
component_row = d
if not component_row:
if amount:
self.append(key, {
'amount': amount,
'default_amount': amount if not struct_row.get("is_additional_component") else 0,
'depends_on_payment_days' : struct_row.depends_on_payment_days,
'salary_component' : struct_row.salary_component,
'abbr' : struct_row.abbr,
'do_not_include_in_total' : struct_row.do_not_include_in_total,
'is_tax_applicable': struct_row.is_tax_applicable,
'is_flexible_benefit': struct_row.is_flexible_benefit,
'variable_based_on_taxable_salary': struct_row.variable_based_on_taxable_salary,
'deduct_full_tax_on_selected_payroll_date': struct_row.deduct_full_tax_on_selected_payroll_date,
'additional_amount': amount if struct_row.get("is_additional_component") else 0
})
else:
if struct_row.get("is_additional_component"):
if overwrite:
component_row.additional_amount = amount - component_row.get("default_amount", 0)
else:
component_row.additional_amount = amount
if not overwrite and component_row.default_amount:
amount += component_row.default_amount
else:
component_row.default_amount = amount
component_row.amount = amount
component_row.deduct_full_tax_on_selected_payroll_date = struct_row.deduct_full_tax_on_selected_payroll_date
def calculate_variable_based_on_taxable_salary(self, tax_component, payroll_period):
if not payroll_period:
frappe.msgprint(_("Start and end dates not in a valid Payroll Period, cannot calculate {0}.")
.format(tax_component))
return
# Deduct taxes forcefully for unsubmitted tax exemption proof and unclaimed benefits in the last period
if payroll_period.end_date <= getdate(self.end_date):
self.deduct_tax_for_unsubmitted_tax_exemption_proof = 1
self.deduct_tax_for_unclaimed_employee_benefits = 1
return self.calculate_variable_tax(payroll_period, tax_component)
def calculate_variable_tax(self, payroll_period, tax_component):
# get remaining numbers of sub-period (period for which one salary is processed)
remaining_sub_periods = get_period_factor(self.employee,
self.start_date, self.end_date, self.payroll_frequency, payroll_period)[1]
# get taxable_earnings, paid_taxes for previous period
previous_taxable_earnings = self.get_taxable_earnings_for_prev_period(payroll_period.start_date, self.start_date)
previous_total_paid_taxes = self.get_tax_paid_in_period(payroll_period.start_date, self.start_date, tax_component)
# get taxable_earnings for current period (all days)
current_taxable_earnings = self.get_taxable_earnings()
future_structured_taxable_earnings = current_taxable_earnings.taxable_earnings * (math.ceil(remaining_sub_periods) - 1)
# get taxable_earnings, addition_earnings for current actual payment days
current_taxable_earnings_for_payment_days = self.get_taxable_earnings(based_on_payment_days=1)
current_structured_taxable_earnings = current_taxable_earnings_for_payment_days.taxable_earnings
current_additional_earnings = current_taxable_earnings_for_payment_days.additional_income
current_additional_earnings_with_full_tax = current_taxable_earnings_for_payment_days.additional_income_with_full_tax
# Get taxable unclaimed benefits
unclaimed_taxable_benefits = 0
if self.deduct_tax_for_unclaimed_employee_benefits:
unclaimed_taxable_benefits = self.calculate_unclaimed_taxable_benefits(payroll_period)
unclaimed_taxable_benefits += current_taxable_earnings_for_payment_days.flexi_benefits
# Total exemption amount based on tax exemption declaration
total_exemption_amount, other_incomes = self.get_total_exemption_amount_and_other_incomes(payroll_period)
# Total taxable earnings including additional and other incomes
total_taxable_earnings = previous_taxable_earnings + current_structured_taxable_earnings + future_structured_taxable_earnings \
+ current_additional_earnings + other_incomes + unclaimed_taxable_benefits - total_exemption_amount
# Total taxable earnings without additional earnings with full tax
total_taxable_earnings_without_full_tax_addl_components = total_taxable_earnings - current_additional_earnings_with_full_tax
# Structured tax amount
total_structured_tax_amount = self.calculate_tax_by_tax_slab(payroll_period, total_taxable_earnings_without_full_tax_addl_components)
current_structured_tax_amount = (total_structured_tax_amount - previous_total_paid_taxes) / remaining_sub_periods
# Total taxable earnings with additional earnings with full tax
full_tax_on_additional_earnings = 0.0
if current_additional_earnings_with_full_tax:
total_tax_amount = self.calculate_tax_by_tax_slab(payroll_period, total_taxable_earnings)
full_tax_on_additional_earnings = total_tax_amount - total_structured_tax_amount
current_tax_amount = current_structured_tax_amount + full_tax_on_additional_earnings
if flt(current_tax_amount) < 0:
current_tax_amount = 0
return current_tax_amount
def get_taxable_earnings_for_prev_period(self, start_date, end_date):
taxable_earnings = frappe.db.sql("""
select sum(sd.amount)
from
`tabSalary Detail` sd join `tabSalary Slip` ss on sd.parent=ss.name
where
sd.parentfield='earnings'
and sd.is_tax_applicable=1
and is_flexible_benefit=0
and ss.docstatus=1
and ss.employee=%(employee)s
and ss.start_date between %(from_date)s and %(to_date)s
and ss.end_date between %(from_date)s and %(to_date)s
""", {
"employee": self.employee,
"from_date": start_date,
"to_date": end_date
})
return flt(taxable_earnings[0][0]) if taxable_earnings else 0
def get_tax_paid_in_period(self, start_date, end_date, tax_component):
# find total_tax_paid, tax paid for benefit, additional_salary
total_tax_paid = flt(frappe.db.sql("""
select
sum(sd.amount)
from
`tabSalary Detail` sd join `tabSalary Slip` ss on sd.parent=ss.name
where
sd.parentfield='deductions'
and sd.salary_component=%(salary_component)s
and sd.variable_based_on_taxable_salary=1
and ss.docstatus=1
and ss.employee=%(employee)s
and ss.start_date between %(from_date)s and %(to_date)s
and ss.end_date between %(from_date)s and %(to_date)s
""", {
"salary_component": tax_component,
"employee": self.employee,
"from_date": start_date,
"to_date": end_date
})[0][0])
return total_tax_paid
def get_taxable_earnings(self, based_on_payment_days=0):
joining_date, relieving_date = frappe.get_cached_value("Employee", self.employee,
["date_of_joining", "relieving_date"])
if not relieving_date:
relieving_date = getdate(self.end_date)
if not joining_date:
frappe.throw(_("Please set the Date Of Joining for employee {0}").format(frappe.bold(self.employee_name)))
taxable_earnings = 0
additional_income = 0
additional_income_with_full_tax = 0
flexi_benefits = 0
for earning in self.earnings:
if based_on_payment_days:
amount, additional_amount = self.get_amount_based_on_payment_days(earning, joining_date, relieving_date)
else:
amount, additional_amount = earning.amount, earning.additional_amount
if earning.is_tax_applicable:
if additional_amount:
taxable_earnings += (amount - additional_amount)
additional_income += additional_amount
if earning.deduct_full_tax_on_selected_payroll_date:
additional_income_with_full_tax += additional_amount
continue
if earning.is_flexible_benefit:
flexi_benefits += amount
else:
taxable_earnings += amount
return frappe._dict({
"taxable_earnings": taxable_earnings,
"additional_income": additional_income,
"additional_income_with_full_tax": additional_income_with_full_tax,
"flexi_benefits": flexi_benefits
})
def get_amount_based_on_payment_days(self, row, joining_date, relieving_date):
amount, additional_amount = row.amount, row.additional_amount
if (self.salary_structure and
cint(row.depends_on_payment_days) and cint(self.total_working_days) and
(not self.salary_slip_based_on_timesheet or
getdate(self.start_date) < joining_date or
getdate(self.end_date) > relieving_date
)):
additional_amount = flt((flt(row.additional_amount) * flt(self.payment_days)
/ cint(self.total_working_days)), row.precision("additional_amount"))
amount = flt((flt(row.default_amount) * flt(self.payment_days)
/ cint(self.total_working_days)), row.precision("amount")) + additional_amount
elif not self.payment_days and not self.salary_slip_based_on_timesheet and cint(row.depends_on_payment_days):
amount, additional_amount = 0, 0
elif not row.amount:
amount = flt(row.default_amount) + flt(row.additional_amount)
# apply rounding
if frappe.get_cached_value("Salary Component", row.salary_component, "round_to_the_nearest_integer"):
amount, additional_amount = rounded(amount), rounded(additional_amount)
return amount, additional_amount
def calculate_unclaimed_taxable_benefits(self, payroll_period):
# get total sum of benefits paid
total_benefits_paid = flt(frappe.db.sql("""
select sum(sd.amount)
from `tabSalary Detail` sd join `tabSalary Slip` ss on sd.parent=ss.name
where
sd.parentfield='earnings'
and sd.is_tax_applicable=1
and is_flexible_benefit=1
and ss.docstatus=1
and ss.employee=%(employee)s
and ss.start_date between %(start_date)s and %(end_date)s
and ss.end_date between %(start_date)s and %(end_date)s
""", {
"employee": self.employee,
"start_date": payroll_period.start_date,
"end_date": self.start_date
})[0][0])
# get total benefits claimed
total_benefits_claimed = flt(frappe.db.sql("""
select sum(claimed_amount)
from `tabEmployee Benefit Claim`
where
docstatus=1
and employee=%s
and claim_date between %s and %s
""", (self.employee, payroll_period.start_date, self.end_date))[0][0])
return total_benefits_paid - total_benefits_claimed
def get_total_exemption_amount_and_other_incomes(self, payroll_period):
total_exemption_amount, other_incomes = 0, 0
if self.deduct_tax_for_unsubmitted_tax_exemption_proof:
exemption_proof = frappe.db.get_value("Employee Tax Exemption Proof Submission",
{"employee": self.employee, "payroll_period": payroll_period.name, "docstatus": 1},
["exemption_amount", "income_from_other_sources"])
if exemption_proof:
total_exemption_amount, other_incomes = exemption_proof
else:
declaration = frappe.db.get_value("Employee Tax Exemption Declaration",
{"employee": self.employee, "payroll_period": payroll_period.name, "docstatus": 1},
["total_exemption_amount", "income_from_other_sources"])
if declaration:
total_exemption_amount, other_incomes = declaration
return total_exemption_amount, other_incomes
def calculate_tax_by_tax_slab(self, payroll_period, annual_taxable_earning):
payroll_period_obj = frappe.get_doc("Payroll Period", payroll_period)
annual_taxable_earning -= flt(payroll_period_obj.standard_tax_exemption_amount)
data = self.get_data_for_eval()
data.update({"annual_taxable_earning": annual_taxable_earning})
taxable_amount = 0
for slab in payroll_period_obj.taxable_salary_slabs:
if slab.condition and not self.eval_tax_slab_condition(slab.condition, data):
continue
if not slab.to_amount and annual_taxable_earning > slab.from_amount:
taxable_amount += (annual_taxable_earning - slab.from_amount) * slab.percent_deduction *.01
continue
if annual_taxable_earning > slab.from_amount and annual_taxable_earning < slab.to_amount:
taxable_amount += (annual_taxable_earning - slab.from_amount) * slab.percent_deduction *.01
elif annual_taxable_earning > slab.from_amount and annual_taxable_earning > slab.to_amount:
taxable_amount += (slab.to_amount - slab.from_amount) * slab.percent_deduction * .01
return taxable_amount
def eval_tax_slab_condition(self, condition, data):
try:
condition = condition.strip()
if condition:
return frappe.safe_eval(condition, self.whitelisted_globals, data)
except NameError as err:
frappe.throw(_("Name error: {0}".format(err)))
except SyntaxError as err:
frappe.throw(_("Syntax error in condition: {0}".format(err)))
except Exception as e:
frappe.throw(_("Error in formula or condition: {0}".format(e)))
raise
def get_salary_slip_row(self, salary_component):
component = frappe.get_doc("Salary Component", salary_component)
# Data for update_component_row
struct_row = frappe._dict()
struct_row['depends_on_payment_days'] = component.depends_on_payment_days
struct_row['salary_component'] = component.name
struct_row['abbr'] = component.salary_component_abbr
struct_row['do_not_include_in_total'] = component.do_not_include_in_total
struct_row['is_tax_applicable'] = component.is_tax_applicable
struct_row['is_flexible_benefit'] = component.is_flexible_benefit
struct_row['variable_based_on_taxable_salary'] = component.variable_based_on_taxable_salary
return struct_row
def get_component_totals(self, component_type):
total = 0.0
for d in self.get(component_type):
if not d.do_not_include_in_total:
d.amount = flt(d.amount, d.precision("amount"))
total += d.amount
return total
def set_component_amounts_based_on_payment_days(self):
joining_date, relieving_date = frappe.get_cached_value("Employee", self.employee,
["date_of_joining", "relieving_date"])
if not relieving_date:
relieving_date = getdate(self.end_date)
if not joining_date:
frappe.throw(_("Please set the Date Of Joining for employee {0}").format(frappe.bold(self.employee_name)))
for component_type in ("earnings", "deductions"):
for d in self.get(component_type):
d.amount = self.get_amount_based_on_payment_days(d, joining_date, relieving_date)[0]
def set_loan_repayment(self):
self.set('loans', [])<|fim▁hole|> self.total_interest_amount = 0
self.total_principal_amount = 0
for loan in self.get_loan_details():
self.append('loans', {
'loan': loan.name,
'total_payment': loan.total_payment,
'interest_amount': loan.interest_amount,
'principal_amount': loan.principal_amount,
'loan_account': loan.loan_account,
'interest_income_account': loan.interest_income_account
})
self.total_loan_repayment += loan.total_payment
self.total_interest_amount += loan.interest_amount
self.total_principal_amount += loan.principal_amount
def get_loan_details(self):
return frappe.db.sql("""select rps.principal_amount, rps.interest_amount, l.name,
rps.total_payment, l.loan_account, l.interest_income_account
from
`tabRepayment Schedule` as rps, `tabLoan` as l
where
l.name = rps.parent and rps.payment_date between %s and %s and
l.repay_from_salary = 1 and l.docstatus = 1 and l.applicant = %s""",
(self.start_date, self.end_date, self.employee), as_dict=True) or []
def update_salary_slip_in_additional_salary(self):
salary_slip = self.name if self.docstatus==1 else None
frappe.db.sql("""
update `tabAdditional Salary` set salary_slip=%s
where employee=%s and payroll_date between %s and %s and docstatus=1
""", (salary_slip, self.employee, self.start_date, self.end_date))
def email_salary_slip(self):
receiver = frappe.db.get_value("Employee", self.employee, "prefered_email")
if receiver:
email_args = {
"recipients": [receiver],
"message": _("Please see attachment"),
"subject": 'Salary Slip - from {0} to {1}'.format(self.start_date, self.end_date),
"attachments": [frappe.attach_print(self.doctype, self.name, file_name=self.name)],
"reference_doctype": self.doctype,
"reference_name": self.name
}
if not frappe.flags.in_test:
enqueue(method=frappe.sendmail, queue='short', timeout=300, is_async=True, **email_args)
else:
frappe.sendmail(**email_args)
else:
msgprint(_("{0}: Employee email not found, hence email not sent").format(self.employee_name))
def update_status(self, salary_slip=None):
for data in self.timesheets:
if data.time_sheet:
timesheet = frappe.get_doc('Timesheet', data.time_sheet)
timesheet.salary_slip = salary_slip
timesheet.flags.ignore_validate_update_after_submit = True
timesheet.set_status()
timesheet.save()
def set_status(self, status=None):
'''Get and update status'''
if not status:
status = self.get_status()
self.db_set("status", status)
def process_salary_structure(self, for_preview=0):
'''Calculate salary after salary structure details have been updated'''
if not self.salary_slip_based_on_timesheet:
self.get_date_details()
self.pull_emp_details()
self.get_leave_details(for_preview=for_preview)
self.calculate_net_pay()
def pull_emp_details(self):
emp = frappe.db.get_value("Employee", self.employee, ["bank_name", "bank_ac_no"], as_dict=1)
if emp:
self.bank_name = emp.bank_name
self.bank_account_no = emp.bank_ac_no
def process_salary_based_on_leave(self, lwp=0):
self.get_leave_details(lwp=lwp)
self.calculate_net_pay()
def unlink_ref_doc_from_salary_slip(ref_no):
linked_ss = frappe.db.sql_list("""select name from `tabSalary Slip`
where journal_entry=%s and docstatus < 2""", (ref_no))
if linked_ss:
for ss in linked_ss:
ss_doc = frappe.get_doc("Salary Slip", ss)
frappe.db.set_value("Salary Slip", ss_doc.name, "journal_entry", "")<|fim▁end|>
|
self.total_loan_repayment = 0
|
<|file_name|>SpringSecurityAuditorAware.java<|end_file_name|><|fim▁begin|>package io.variability.jhipster.security;
import io.variability.jhipster.config.Constants;
import org.springframework.data.domain.AuditorAware;
import org.springframework.stereotype.Component;
/**
* Implementation of AuditorAware based on Spring Security.<|fim▁hole|>public class SpringSecurityAuditorAware implements AuditorAware<String> {
@Override
public String getCurrentAuditor() {
String userName = SecurityUtils.getCurrentUserLogin();
return (userName != null ? userName : Constants.SYSTEM_ACCOUNT);
}
}<|fim▁end|>
|
*/
@Component
|
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>// Copyright 2013 The GLFW-RS Developers. For a full listing of the authors,
// refer to the AUTHORS file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[crate_type = "lib"];
#[crate_id = "github.com/bjz/glfw-rs#glfw:0.1"];
#[comment = "Bindings and wrapper functions for glfw3."];
#[feature(globs)];
#[feature(macro_rules)];
// TODO: Document differences between GLFW and glfw-rs
use std::cast;
use std::libc::*;
use std::ptr;
use std::str;
use std::vec;
pub mod ffi;
mod callbacks;
#[repr(C)]
#[deriving(Clone, Eq, IterBytes, ToStr)]
pub enum Action {
Release = ffi::RELEASE,
Press = ffi::PRESS,
Repeat = ffi::REPEAT,
}
#[repr(C)]
#[deriving(Clone, Eq, IterBytes, ToStr)]
pub enum Key {
KeySpace = ffi::KEY_SPACE,
KeyApostrophe = ffi::KEY_APOSTROPHE,
KeyComma = ffi::KEY_COMMA,
KeyMinus = ffi::KEY_MINUS,
KeyPeriod = ffi::KEY_PERIOD,
KeySlash = ffi::KEY_SLASH,
Key0 = ffi::KEY_0,
Key1 = ffi::KEY_1,
Key2 = ffi::KEY_2,
Key3 = ffi::KEY_3,
Key4 = ffi::KEY_4,
Key5 = ffi::KEY_5,
Key6 = ffi::KEY_6,
Key7 = ffi::KEY_7,
Key8 = ffi::KEY_8,
Key9 = ffi::KEY_9,
KeySemicolon = ffi::KEY_SEMICOLON,
KeyEqual = ffi::KEY_EQUAL,
KeyA = ffi::KEY_A,
KeyB = ffi::KEY_B,
KeyC = ffi::KEY_C,
KeyD = ffi::KEY_D,
KeyE = ffi::KEY_E,
KeyF = ffi::KEY_F,
KeyG = ffi::KEY_G,
KeyH = ffi::KEY_H,
KeyI = ffi::KEY_I,
KeyJ = ffi::KEY_J,
KeyK = ffi::KEY_K,
KeyL = ffi::KEY_L,
KeyM = ffi::KEY_M,
KeyN = ffi::KEY_N,
KeyO = ffi::KEY_O,
KeyP = ffi::KEY_P,
KeyQ = ffi::KEY_Q,
KeyR = ffi::KEY_R,
KeyS = ffi::KEY_S,
KeyT = ffi::KEY_T,
KeyU = ffi::KEY_U,
KeyV = ffi::KEY_V,
KeyW = ffi::KEY_W,
KeyX = ffi::KEY_X,
KeyY = ffi::KEY_Y,
KeyZ = ffi::KEY_Z,
KeyLeftBracket = ffi::KEY_LEFT_BRACKET,
KeyBackslash = ffi::KEY_BACKSLASH,
KeyRightBracket = ffi::KEY_RIGHT_BRACKET,
KeyGraveAccent = ffi::KEY_GRAVE_ACCENT,
KeyWorld1 = ffi::KEY_WORLD_1,
KeyWorld2 = ffi::KEY_WORLD_2,
KeyEscape = ffi::KEY_ESCAPE,
KeyEnter = ffi::KEY_ENTER,
KeyTab = ffi::KEY_TAB,
KeyBackspace = ffi::KEY_BACKSPACE,
KeyInsert = ffi::KEY_INSERT,
KeyDelete = ffi::KEY_DELETE,
KeyRight = ffi::KEY_RIGHT,
KeyLeft = ffi::KEY_LEFT,
KeyDown = ffi::KEY_DOWN,
KeyUp = ffi::KEY_UP,
KeyPageUp = ffi::KEY_PAGE_UP,
KeyPageDown = ffi::KEY_PAGE_DOWN,
KeyHome = ffi::KEY_HOME,
KeyEnd = ffi::KEY_END,
KeyCapsLock = ffi::KEY_CAPS_LOCK,
KeyScrollLock = ffi::KEY_SCROLL_LOCK,
KeyNumLock = ffi::KEY_NUM_LOCK,
KeyPrintScreen = ffi::KEY_PRINT_SCREEN,
KeyPause = ffi::KEY_PAUSE,
KeyF1 = ffi::KEY_F1,
KeyF2 = ffi::KEY_F2,
KeyF3 = ffi::KEY_F3,
KeyF4 = ffi::KEY_F4,
KeyF5 = ffi::KEY_F5,
KeyF6 = ffi::KEY_F6,
KeyF7 = ffi::KEY_F7,
KeyF8 = ffi::KEY_F8,
KeyF9 = ffi::KEY_F9,
KeyF10 = ffi::KEY_F10,
KeyF11 = ffi::KEY_F11,
KeyF12 = ffi::KEY_F12,
KeyF13 = ffi::KEY_F13,
KeyF14 = ffi::KEY_F14,
KeyF15 = ffi::KEY_F15,
KeyF16 = ffi::KEY_F16,
KeyF17 = ffi::KEY_F17,
KeyF18 = ffi::KEY_F18,
KeyF19 = ffi::KEY_F19,
KeyF20 = ffi::KEY_F20,
KeyF21 = ffi::KEY_F21,
KeyF22 = ffi::KEY_F22,
KeyF23 = ffi::KEY_F23,
KeyF24 = ffi::KEY_F24,
KeyF25 = ffi::KEY_F25,
KeyKp0 = ffi::KEY_KP_0,
KeyKp1 = ffi::KEY_KP_1,
KeyKp2 = ffi::KEY_KP_2,
KeyKp3 = ffi::KEY_KP_3,
KeyKp4 = ffi::KEY_KP_4,
KeyKp5 = ffi::KEY_KP_5,
KeyKp6 = ffi::KEY_KP_6,
KeyKp7 = ffi::KEY_KP_7,
KeyKp8 = ffi::KEY_KP_8,
KeyKp9 = ffi::KEY_KP_9,
KeyKpDecimal = ffi::KEY_KP_DECIMAL,
KeyKpDivide = ffi::KEY_KP_DIVIDE,
KeyKpMultiply = ffi::KEY_KP_MULTIPLY,
KeyKpSubtract = ffi::KEY_KP_SUBTRACT,
KeyKpAdd = ffi::KEY_KP_ADD,
KeyKpEnter = ffi::KEY_KP_ENTER,
KeyKpEqual = ffi::KEY_KP_EQUAL,
KeyLeftShift = ffi::KEY_LEFT_SHIFT,
KeyLeftControl = ffi::KEY_LEFT_CONTROL,
KeyLeftAlt = ffi::KEY_LEFT_ALT,
KeyLeftSuper = ffi::KEY_LEFT_SUPER,
KeyRightShift = ffi::KEY_RIGHT_SHIFT,
KeyRightControl = ffi::KEY_RIGHT_CONTROL,
KeyRightAlt = ffi::KEY_RIGHT_ALT,
KeyRightSuper = ffi::KEY_RIGHT_SUPER,
KeyMenu = ffi::KEY_MENU,
}
#[repr(C)]
#[deriving(Clone, Eq, IterBytes, ToStr)]
pub enum MouseButton {
MouseButtonLeft = ffi::MOUSE_BUTTON_LEFT,
MouseButtonRight = ffi::MOUSE_BUTTON_RIGHT,
MouseButtonMiddle = ffi::MOUSE_BUTTON_MIDDLE,
// MouseButton1 = ffi::MOUSE_BUTTON_1,
// MouseButton2 = ffi::MOUSE_BUTTON_2,
// MouseButton3 = ffi::MOUSE_BUTTON_3,
MouseButton4 = ffi::MOUSE_BUTTON_4,
MouseButton5 = ffi::MOUSE_BUTTON_5,
MouseButton6 = ffi::MOUSE_BUTTON_6,
MouseButton7 = ffi::MOUSE_BUTTON_7,
MouseButton8 = ffi::MOUSE_BUTTON_8,
}
// pub static MouseButtonLeft : MouseButton = MouseButton1;
// pub static MouseButtonRight : MouseButton = MouseButton2;
// pub static MouseButtonMiddle : MouseButton = MouseButton3;
#[repr(C)]
#[deriving(Clone, Eq, IterBytes, ToStr)]
pub enum Error {
NotInitialized = ffi::NOT_INITIALIZED,
NoCurrentContext = ffi::NO_CURRENT_CONTEXT,
InvalidEnum = ffi::INVALID_ENUM,
InvalidValue = ffi::INVALID_VALUE,
OutOfMemory = ffi::OUT_OF_MEMORY,
ApiUnavailable = ffi::API_UNAVAILABLE,
VersionUnavailable = ffi::VERSION_UNAVAILABLE,
PlatformError = ffi::PLATFORM_ERROR,
FormatUnavailable = ffi::FORMAT_UNAVAILABLE,
}
#[repr(C)]
#[deriving(Clone, Eq, IterBytes, ToStr)]
pub enum ClientApi {
OpenGlApi = ffi::OPENGL_API,
OpenGlEsApi = ffi::OPENGL_ES_API,
}
#[repr(C)]
#[deriving(Clone, Eq, IterBytes, ToStr)]
pub enum ContextRobustness {
NoRobustness = ffi::NO_ROBUSTNESS,
NoResetNotification = ffi::NO_RESET_NOTIFICATION,
LoseContextOnReset = ffi::LOSE_CONTEXT_ON_RESET,
}
#[repr(C)]
#[deriving(Clone, Eq, IterBytes, ToStr)]
pub enum OpenGlProfile {
OpenGlAnyProfile = ffi::OPENGL_ANY_PROFILE,
OpenGlCoreProfile = ffi::OPENGL_CORE_PROFILE,
OpenGlCompatProfile = ffi::OPENGL_COMPAT_PROFILE,
}
#[repr(C)]
#[deriving(Clone, Eq, IterBytes, ToStr)]
pub enum CursorMode {
CursorNormal = ffi::CURSOR_NORMAL,
CursorHidden = ffi::CURSOR_HIDDEN,
CursorDisabled = ffi::CURSOR_DISABLED,
}
/// Describes a single video mode.
pub struct VidMode {
width: u32,
height: u32,
red_bits: u32,
green_bits: u32,
blue_bits: u32,
refresh_rate: u32,
}
/// Describes the gamma ramp of a monitor.
pub struct GammaRamp {
red: ~[c_ushort],
green: ~[c_ushort],
blue: ~[c_ushort],
}
pub type GLProc = ffi::GLFWglproc;
/// Initialise glfw. This must be called on the main platform thread.
///
/// Returns `true` if the initialisation was successful, otherwise `false`.
///
/// Wrapper for `glfwInit`.
pub fn init() -> Result<(),()> {
match unsafe { ffi::glfwInit() } {
ffi::TRUE => Ok(()),
_ => Err(()),
}
}
/// Terminate glfw. This must be called on the main platform thread.
///
/// Wrapper for `glfwTerminate`.
pub fn terminate() {
unsafe { ffi::glfwTerminate() }
}
/// Initialises GLFW, automatically calling `glfw::terminate` on exit or
/// failure. Fails if the initialisation was unsuccessful.
///
/// # Parameters
///
/// - `f`: to be called after the GLFW is initialised.
pub fn start(f: proc()) {
// use std::unstable::finally::Finally;
if init().is_ok() {
// f.finally(terminate);
f();
terminate();
} else {
fail!(~"Failed to initialize GLFW");
}
}
/// Holds the version information of the underlying GLFW library
pub struct Version {
major: u32,
minor: u32,
rev: u32,
}
impl ToStr for Version {
/// Returns a string representation of the version struct.
///
/// # Returns
///
/// A string in the form:
///
/// ~~~
/// ~"[major].[minor].[rev]"
/// ~~~
fn to_str(&self) -> ~str {
format!("{}.{}.{}", self.major, self.minor, self.rev)
}
}
/// Wrapper for `glfwGetVersion`.
pub fn get_version() -> Version {
unsafe {
let mut major = 0;
let mut minor = 0;
let mut rev = 0;
ffi::glfwGetVersion(&mut major, &mut minor, &mut rev);
Version {
major: major as u32,
minor: minor as u32,
rev: rev as u32,
}
}
}
/// Wrapper for `glfwGetVersionString`.
pub fn get_version_string() -> ~str {
unsafe { str::raw::from_c_str(ffi::glfwGetVersionString()) }
}
pub trait ErrorCallback { fn call(&self, error: Error, description: ~str); }
/// Wrapper for `glfwSetErrorCallback`.
pub fn set_error_callback<Cb: ErrorCallback + Send>(callback: ~Cb) {
callbacks::set_error_callback(callback, (|ext_cb| {
unsafe { ffi::glfwSetErrorCallback(Some(ext_cb)); }
}));
}
/// An ErrorCallback implementation that uses the `error!` macro.
pub struct LogErrorHandler;
impl ErrorCallback for LogErrorHandler {
fn call(&self, error: Error, desc: ~str) {
error!("GLFW Error: {} ({})", error.to_str(), desc);
}
}
pub trait MonitorCallback { fn call(&self, monitor: &Monitor, event: MonitorEvent); }
/// A struct that wraps a `*GLFWmonitor` handle.
#[deriving(Eq)]
pub struct Monitor {
ptr: *ffi::GLFWmonitor
}
impl Monitor {
/// Wrapper for `glfwGetPrimaryMonitor`.
pub fn get_primary() -> Result<Monitor,()> {
unsafe {
ffi::glfwGetPrimaryMonitor()
.to_option()
.map_default(Err(()),
|ptr| Ok(Monitor { ptr: ptr }))
}
}
/// Wrapper for `glfwGetMonitors`.
pub fn get_connected() -> ~[Monitor] {
unsafe {
let mut count = 0;
let ptr = ffi::glfwGetMonitors(&mut count);
vec::from_buf(ptr, count as uint).map(|&m| Monitor { ptr: m })
}
}
/// Wrapper for `glfwGetMonitorPos`.
pub fn get_pos(&self) -> (i32, i32) {
unsafe {
let mut xpos = 0;
let mut ypos = 0;
ffi::glfwGetMonitorPos(self.ptr, &mut xpos, &mut ypos);
(xpos as i32, ypos as i32)
}
}
/// Wrapper for `glfwGetMonitorPhysicalSize`.
pub fn get_physical_size(&self) -> (i32, i32) {
unsafe {
let mut width = 0;
let mut height = 0;
ffi::glfwGetMonitorPhysicalSize(self.ptr, &mut width, &mut height);
(width as i32, height as i32)
}
}
/// Wrapper for `glfwGetMonitorName`.
pub fn get_name(&self) -> ~str {
unsafe { str::raw::from_c_str(ffi::glfwGetMonitorName(self.ptr)) }
}
/// Wrapper for `glfwSetMonitorCallback`.
pub fn set_callback<Cb: MonitorCallback + Send>(callback: ~Cb) {
callbacks::set_monitor_callback(callback, (|ext_cb| {
unsafe { ffi::glfwSetMonitorCallback(Some(ext_cb)); }
}));
}
/// Wrapper for `glfwGetVideoModes`.
pub fn get_video_modes(&self) -> ~[VidMode] {
unsafe {
let mut count = 0;
let ptr = ffi::glfwGetVideoModes(self.ptr, &mut count);
vec::from_buf(ptr, count as uint).map(VidMode::from_glfw_vid_mode)
}
}
/// Wrapper for `glfwGetVideoMode`.
pub fn get_video_mode(&self) -> Option<VidMode> {
unsafe {
ffi::glfwGetVideoMode(self.ptr).to_option().map(|v| VidMode::from_glfw_vid_mode(v))
}
}
/// Wrapper for `glfwSetGamma`.
pub fn set_gamma(&self, gamma: f32) {
unsafe { ffi::glfwSetGamma(self.ptr, gamma as c_float); }
}
/// Wrapper for `glfwGetGammaRamp`.
pub fn get_gamma_ramp(&self) -> GammaRamp {
unsafe {
let llramp = *ffi::glfwGetGammaRamp(self.ptr);
GammaRamp {
red: vec::from_buf(llramp.red, llramp.size as uint),
green: vec::from_buf(llramp.green, llramp.size as uint),
blue: vec::from_buf(llramp.blue, llramp.size as uint),
}
}
}
/// Wrapper for `glfwSetGammaRamp`.
pub fn set_gamma_ramp(&self, ramp: &GammaRamp) {
unsafe {
ffi::glfwSetGammaRamp(
self.ptr,
&ffi::GLFWgammaramp {
red: ramp.red.as_ptr(),
green: ramp.green.as_ptr(),
blue: ramp.blue.as_ptr(),
size: ramp.red.len() as c_uint,
}
);
}
}
}
#[repr(C)]
pub enum MonitorEvent {
Connected = ffi::CONNECTED,
Disconnected = ffi::DISCONNECTED,
}
impl VidMode {
fn from_glfw_vid_mode(mode: &ffi::GLFWvidmode) -> VidMode {
VidMode {
width: mode.width as u32,
height: mode.height as u32,
red_bits: mode.redBits as u32,
green_bits: mode.greenBits as u32,
blue_bits: mode.blueBits as u32,
refresh_rate: mode.refreshRate as u32,
}
}
}
impl ToStr for VidMode {
/// Returns a string representation of the video mode.
///
/// # Returns
///
/// A string in the form:
///
/// ~~~
/// ~"[width] x [height], [total_bits] ([red_bits] [green_bits] [blue_bits]) [refresh_rate] Hz"
/// ~~~
fn to_str(&self) -> ~str {
format!("{} x {}, {} ({} {} {}) {} Hz",
self.width, self.height,
self.red_bits + self.green_bits + self.blue_bits,
self.red_bits, self.green_bits, self.blue_bits,
self.refresh_rate)
}
}
pub mod window_hint {
use std::libc::c_int;
use super::*;
/// Wrapper for `glfwDefaultWindowHints`.
pub fn default() {
unsafe { ffi::glfwDefaultWindowHints(); }
}
/// Wrapper for `glfwWindowHint` called with `RED_BITS`.
pub fn red_bits(bits: u32) {
unsafe { ffi::glfwWindowHint(ffi::RED_BITS, bits as c_int); }
}
/// Wrapper for `glfwWindowHint` called with `GREEN_BITS`.
pub fn green_bits(bits: u32) {
unsafe { ffi::glfwWindowHint(ffi::GREEN_BITS, bits as c_int); }
}
/// Wrapper for `glfwWindowHint` called with `BLUE_BITS`.
pub fn blue_bits(bits: u32) {
unsafe { ffi::glfwWindowHint(ffi::BLUE_BITS, bits as c_int); }
}
/// Wrapper for `glfwWindowHint` called with `ALPHA_BITS`.
pub fn alpha_bits(bits: u32) {
unsafe { ffi::glfwWindowHint(ffi::ALPHA_BITS, bits as c_int); }
}
/// Wrapper for `glfwWindowHint` called with `DEPTH_BITS`.
pub fn depth_bits(bits: u32) {
unsafe { ffi::glfwWindowHint(ffi::DEPTH_BITS, bits as c_int); }
}
/// Wrapper for `glfwWindowHint` called with `STENCIL_BITS`.
pub fn stencil_bits(bits: u32) {
unsafe { ffi::glfwWindowHint(ffi::STENCIL_BITS, bits as c_int); }
}
/// Wrapper for `glfwWindowHint` called with `ACCUM_RED_BITS`.
pub fn accum_red_bits(bits: u32) {
unsafe { ffi::glfwWindowHint(ffi::ACCUM_RED_BITS, bits as c_int); }
}
/// Wrapper for `glfwWindowHint` called with `ACCUM_GREEN_BITS`.
pub fn accum_green_bits(bits: u32) {
unsafe { ffi::glfwWindowHint(ffi::ACCUM_GREEN_BITS, bits as c_int); }
}
/// Wrapper for `glfwWindowHint` called with `ACCUM_BLUE_BITS`.
pub fn accum_blue_bits(bits: u32) {
unsafe { ffi::glfwWindowHint(ffi::ACCUM_BLUE_BITS, bits as c_int); }
}
/// Wrapper for `glfwWindowHint` called with `ACCUM_ALPHA_BITS`.
pub fn accum_alpha_bits(bits: u32) {
unsafe { ffi::glfwWindowHint(ffi::ACCUM_ALPHA_BITS, bits as c_int); }
}
/// Wrapper for `glfwWindowHint` called with `AUX_BUFFERS`.
pub fn aux_buffers(buffers: u32) {
unsafe { ffi::glfwWindowHint(ffi::AUX_BUFFERS, buffers as c_int); }
}
/// Wrapper for `glfwWindowHint` called with `STEREO`.
pub fn stereo(value: bool) {
unsafe { ffi::glfwWindowHint(ffi::STEREO, value as c_int); }
}
/// Wrapper for `glfwWindowHint` called with `SAMPLES`.
pub fn samples(samples: u32) {
unsafe { ffi::glfwWindowHint(ffi::SAMPLES, samples as c_int); }
}
/// Wrapper for `glfwWindowHint` called with `SRGB_CAPABLE`.
pub fn srgb_capable(value: bool) {
unsafe { ffi::glfwWindowHint(ffi::SRGB_CAPABLE, value as c_int); }
}
/// Wrapper for `glfwWindowHint` called with `REFRESH_RATE`.
pub fn refresh_rate(rate: u32) {
unsafe { ffi::glfwWindowHint(ffi::REFRESH_RATE, rate as c_int); }
}
/// Wrapper for `glfwWindowHint` called with `CLIENT_API`.
pub fn client_api(api: ClientApi) {
unsafe { ffi::glfwWindowHint(ffi::CLIENT_API, api as c_int); }
}
/// Wrapper for `glfwWindowHint` called with `CONTEXT_VERSION_MAJOR`.
pub fn context_version_major(major: u32) {
unsafe { ffi::glfwWindowHint(ffi::CONTEXT_VERSION_MAJOR, major as c_int); }
}
/// Wrapper for `glfwWindowHint` called with `CONTEXT_VERSION_MINOR`.
pub fn context_version_minor(minor: u32) {
unsafe { ffi::glfwWindowHint(ffi::CONTEXT_VERSION_MINOR, minor as c_int); }
}
/// Wrapper for `glfwWindowHint` called with `CONTEXT_VERSION_MAJOR` and
/// `CONTEXT_VERSION_MINOR`.
pub fn context_version(major: u32, minor: u32) {
unsafe {
ffi::glfwWindowHint(ffi::CONTEXT_VERSION_MAJOR, major as c_int);
ffi::glfwWindowHint(ffi::CONTEXT_VERSION_MINOR, minor as c_int);
}
}
/// Wrapper for `glfwWindowHint` called with `CONTEXT_ROBUSTNESS`.
pub fn context_robustness(value: ContextRobustness) {
unsafe { ffi::glfwWindowHint(ffi::CONTEXT_ROBUSTNESS, value as c_int); }
}
/// Wrapper for `glfwWindowHint` called with `OPENGL_FORWARD_COMPAT`.
pub fn opengl_forward_compat(value: bool) {
unsafe { ffi::glfwWindowHint(ffi::OPENGL_FORWARD_COMPAT, value as c_int); }
}
/// Wrapper for `glfwWindowHint` called with `OPENGL_DEBUG_CONTEXT`.
pub fn opengl_debug_context(value: bool) {
unsafe { ffi::glfwWindowHint(ffi::OPENGL_DEBUG_CONTEXT, value as c_int); }
}
/// Wrapper for `glfwWindowHint` called with `OPENGL_PROFILE`.
pub fn opengl_profile(profile: OpenGlProfile) {
unsafe { ffi::glfwWindowHint(ffi::OPENGL_PROFILE, profile as c_int); }
}
/// Wrapper for `glfwWindowHint` called with `RESIZABLE`.
pub fn resizable(value: bool) {
unsafe { ffi::glfwWindowHint(ffi::RESIZABLE, value as c_int); }
}
/// Wrapper for `glfwWindowHint` called with `VISIBLE`.
pub fn visible(value: bool) {
unsafe { ffi::glfwWindowHint(ffi::VISIBLE, value as c_int); }
}
/// Wrapper for `glfwWindowHint` called with `DECORATED`.
pub fn decorated(value: bool) {
unsafe { ffi::glfwWindowHint(ffi::DECORATED, value as c_int); }
}
}
/// Describes the mode of a window
pub enum WindowMode {
/// Full screen mode. Contains the monitor on which the window is displayed.
FullScreen(Monitor),
/// Windowed mode.
Windowed,
}
/// Private conversion methods for `glfw::WindowMode`
impl WindowMode {
/// Extract the window mode from a low-level monitor pointer. If the pointer
/// is null it assumes the window is in windowed mode and returns `Windowed`,
/// otherwise it returns the pointer wrapped in `glfw::FullScreen`.
fn from_ptr(ptr: *ffi::GLFWmonitor) -> WindowMode {
if ptr.is_null() {
Windowed
} else {
FullScreen(Monitor { ptr: ptr })
}
}
/// Returns a pointer to a monitor if the window is fullscreen, otherwise
/// it returns a null pointer (if it is in windowed mode).
fn to_ptr(&self) -> *ffi::GLFWmonitor {
match *self {
FullScreen(monitor) => monitor.ptr,
Windowed => ptr::null()
}
}
}
/// A struct that wraps a `*GLFWwindow` handle.
pub struct Window {
ptr: *ffi::GLFWwindow,
is_shared: bool,
}
/// A group of key modifiers
pub struct Modifiers {
values: c_int,
}
/// Key modifier tokens
#[repr(C)]
#[deriving(Clone, Eq, IterBytes, ToStr)]
pub enum Modifier {
Shift = ffi::MOD_SHIFT,
Control = ffi::MOD_CONTROL,
Alt = ffi::MOD_ALT,
Super = ffi::MOD_SUPER,
}
impl Modifiers {
/// Check to see if a specific key modifier is present
///
/// # Example
///
/// ~~~rust
/// do window.set_key_callback |_, _, _, _, mods| {
/// if mods.contains(glfw::Shift) {
/// println("Shift detected!")
/// }
/// }
/// ~~~
pub fn contains(&self, modifier: Modifier) -> bool {
self.values & (modifier as c_int) != ffi::FALSE
}
}
impl ToStr for Modifiers {
fn to_str(&self) -> ~str {
let mut ss = ~[];
if self.contains(Shift) { ss.push(Shift.to_str()) }
if self.contains(Control) { ss.push(Control.to_str()) }
if self.contains(Alt) { ss.push(Alt.to_str()) }
if self.contains(Super) { ss.push(Super.to_str()) }
ss.connect(", ")
}
}
pub trait WindowPosCallback { fn call(&self, window: &Window, xpos: i32, ypos: i32); }
pub trait WindowSizeCallback { fn call(&self, window: &Window, width: i32, height: i32); }
pub trait WindowCloseCallback { fn call(&self, window: &Window); }
pub trait WindowRefreshCallback { fn call(&self, window: &Window); }
pub trait WindowFocusCallback { fn call(&self, window: &Window, focused: bool); }
pub trait WindowIconifyCallback { fn call(&self, window: &Window, iconified: bool); }
pub trait FramebufferSizeCallback { fn call(&self, window: &Window, width: i32, height: i32); }
pub trait MouseButtonCallback { fn call(&self, window: &Window, button: MouseButton, action: Action, modifiers: Modifiers); }
pub trait CursorPosCallback { fn call(&self, window: &Window, xpos: f64, ypos: f64); }
pub trait CursorEnterCallback { fn call(&self, window: &Window, entered: bool); }
pub trait ScrollCallback { fn call(&self, window: &Window, xpos: f64, ypos: f64); }
pub trait KeyCallback { fn call(&self, window: &Window, key: Key, scancode: c_int, action: Action, modifiers: Modifiers); }
pub trait CharCallback { fn call(&self, window: &Window, character: char); }
/// Holds the callbacks associated with a window
struct WindowCallbacks {
pos_callback: Option<~WindowPosCallback>,
size_callback: Option<~WindowSizeCallback>,
close_callback: Option<~WindowCloseCallback>,
refresh_callback: Option<~WindowRefreshCallback>,
focus_callback: Option<~WindowFocusCallback>,
iconify_callback: Option<~WindowIconifyCallback>,
framebuffer_size_callback: Option<~FramebufferSizeCallback>,
mouse_button_callback: Option<~MouseButtonCallback>,
cursor_pos_callback: Option<~CursorPosCallback>,
cursor_enter_callback: Option<~CursorEnterCallback>,
scroll_callback: Option<~ScrollCallback>,
key_callback: Option<~KeyCallback>,
char_callback: Option<~CharCallback>,
}
impl WindowCallbacks {
/// Initialize the struct with all callbacks set to `None`.
fn new() -> WindowCallbacks {
WindowCallbacks {
pos_callback: None,
size_callback: None,
close_callback: None,
refresh_callback: None,
focus_callback: None,
iconify_callback: None,
framebuffer_size_callback: None,
mouse_button_callback: None,
cursor_pos_callback: None,
cursor_enter_callback: None,
scroll_callback: None,
key_callback: None,
char_callback: None,
}
}
}
macro_rules! set_window_callback(
(
setter: $ll_fn:ident,
cb_trait: $cb_trait:ident,
callback: $ext_callback:ident,
field: $data_field:ident
) => ({
unsafe {
self.get_callbacks().$data_field = Some(callback as ~$cb_trait);
ffi::$ll_fn(self.ptr, Some(callbacks::$ext_callback));
}
})
)
impl Window {
/// Wrapper for `glfwCreateWindow`.
pub fn create(width: u32, height: u32, title: &str, mode: WindowMode) -> Option<Window> {
Window::create_intern(width, height, title, mode, None)
}
/// Wrapper for `glfwCreateWindow`.
pub fn create_shared(&self, width: u32, height: u32, title: &str, mode: WindowMode) -> Option<Window> {
Window::create_intern(width, height, title, mode, Some(self))
}
/// Internal wrapper for `glfwCreateWindow`.
fn create_intern(width: u32, height: u32, title: &str, mode: WindowMode, share: Option<&Window>) -> Option<Window> {
let ptr = unsafe {
title.with_c_str(|title| {
ffi::glfwCreateWindow(
width as c_int,
height as c_int,
title,
mode.to_ptr(),
match share { Some(w) => w.ptr, None => ptr::null() }
)
})
};
if ptr.is_null() {
None
} else {
unsafe {
ffi::glfwSetWindowUserPointer(ptr, cast::transmute(~WindowCallbacks::new()));
}
let window = Window {
ptr: ptr,
is_shared: share.is_none(),
};
Some(window)
}
}
unsafe fn get_callbacks(&self) -> &mut WindowCallbacks {
cast::transmute(ffi::glfwGetWindowUserPointer(self.ptr))
}
unsafe fn free_callbacks(&self) {
if !self.ptr.is_null() {
let _: ~WindowCallbacks =
cast::transmute(ffi::glfwGetWindowUserPointer(self.ptr));
}
}
pub fn close(self) {
// Calling this method forces the destructor to be called, closing the window
}
/// Wrapper for `glfwWindowShouldClose`.
pub fn should_close(&self) -> bool {
unsafe { ffi::glfwWindowShouldClose(self.ptr) == ffi::TRUE }
}
/// Wrapper for `glfwSetWindowShouldClose`.
pub fn set_should_close(&self, value: bool) {
unsafe { ffi::glfwSetWindowShouldClose(self.ptr, value as c_int) }
}
/// Wrapper for `glfwSetWindowTitle`.
pub fn set_title(&self, title: &str) {
unsafe {
title.with_c_str(|title| {
ffi::glfwSetWindowTitle(self.ptr, title);
});
}
}
/// Wrapper for `glfwGetWindowPos`.
pub fn get_pos(&self) -> (i32, i32) {
unsafe {
let mut xpos = 0;
let mut ypos = 0;
ffi::glfwGetWindowPos(self.ptr, &mut xpos, &mut ypos);
(xpos as i32, ypos as i32)
}
}
/// Wrapper for `glfwSetWindowPos`.
pub fn set_pos(&self, xpos: i32, ypos: i32) {
unsafe { ffi::glfwSetWindowPos(self.ptr, xpos as c_int, ypos as c_int); }
}
/// Wrapper for `glfwGetWindowSize`.
pub fn get_size(&self) -> (i32, i32) {
unsafe {
let mut width = 0;
let mut height = 0;
ffi::glfwGetWindowSize(self.ptr, &mut width, &mut height);
(width as i32, height as i32)
}<|fim▁hole|> pub fn set_size(&self, width: i32, height: i32) {
unsafe { ffi::glfwSetWindowSize(self.ptr, width as c_int, height as c_int); }
}
/// Wrapper for `glfwGetFramebufferSize`.
pub fn get_framebuffer_size(&self) -> (i32, i32) {
unsafe {
let mut width = 0;
let mut height = 0;
ffi::glfwGetFramebufferSize(self.ptr, &mut width, &mut height);
(width as i32, height as i32)
}
}
/// Wrapper for `glfwIconifyWindow`.
pub fn iconify(&self) {
unsafe { ffi::glfwIconifyWindow(self.ptr); }
}
/// Wrapper for `glfwRestoreWindow`.
pub fn restore(&self) {
unsafe { ffi::glfwRestoreWindow(self.ptr); }
}
/// Wrapper for `glfwShowWindow`.
pub fn show(&self) {
unsafe { ffi::glfwShowWindow(self.ptr); }
}
/// Wrapper for `glfwHideWindow`.
pub fn hide(&self) {
unsafe { ffi::glfwHideWindow(self.ptr); }
}
/// Wrapper for `glfwGetWindowMonitor`.
///
/// # Returns
///
/// The window mode; either glfw::FullScreen or glfw::Windowed
pub fn get_window_mode(&self) -> WindowMode {
WindowMode::from_ptr(
unsafe { ffi::glfwGetWindowMonitor(self.ptr) }
)
}
/// Wrapper for `glfwGetWindowAttrib` called with `FOCUSED`.
pub fn is_focused(&self) -> bool {
unsafe { ffi::glfwGetWindowAttrib(self.ptr, ffi::FOCUSED) == ffi::TRUE }
}
/// Wrapper for `glfwGetWindowAttrib` called with `ICONIFIED`.
pub fn is_iconified(&self) -> bool {
unsafe { ffi::glfwGetWindowAttrib(self.ptr, ffi::ICONIFIED) == ffi::TRUE }
}
/// Wrapper for `glfwGetWindowAttrib` called with `CLIENT_API`.
pub fn get_client_api(&self) -> c_int {
unsafe { ffi::glfwGetWindowAttrib(self.ptr, ffi::CLIENT_API) }
}
/// Wrapper for `glfwGetWindowAttrib` called with
/// `CONTEXT_VERSION_MAJOR`, `CONTEXT_VERSION_MINOR` and `CONTEXT_REVISION`.
///
/// # Returns
///
/// The client API version of the window's context in a version struct.
pub fn get_context_version(&self) -> Version {
unsafe {
Version {
major: ffi::glfwGetWindowAttrib(self.ptr, ffi::CONTEXT_VERSION_MAJOR) as u32,
minor: ffi::glfwGetWindowAttrib(self.ptr, ffi::CONTEXT_VERSION_MINOR) as u32,
rev: ffi::glfwGetWindowAttrib(self.ptr, ffi::CONTEXT_REVISION) as u32,
}
}
}
/// Wrapper for `glfwGetWindowAttrib` called with `CONTEXT_ROBUSTNESS`.
pub fn get_context_robustness(&self) -> c_int {
unsafe { ffi::glfwGetWindowAttrib(self.ptr, ffi::CONTEXT_ROBUSTNESS) }
}
/// Wrapper for `glfwGetWindowAttrib` called with `OPENGL_FORWARD_COMPAT`.
pub fn is_opengl_forward_compat(&self) -> bool {
unsafe { ffi::glfwGetWindowAttrib(self.ptr, ffi::OPENGL_FORWARD_COMPAT) == ffi::TRUE }
}
/// Wrapper for `glfwGetWindowAttrib` called with `OPENGL_DEBUG_CONTEXT`.
pub fn is_opengl_debug_context(&self) -> bool {
unsafe { ffi::glfwGetWindowAttrib(self.ptr, ffi::OPENGL_DEBUG_CONTEXT) == ffi::TRUE }
}
/// Wrapper for `glfwGetWindowAttrib` called with `OPENGL_PROFILE`.
pub fn get_opengl_profile(&self) -> c_int {
unsafe { ffi::glfwGetWindowAttrib(self.ptr, ffi::OPENGL_PROFILE) }
}
/// Wrapper for `glfwGetWindowAttrib` called with `RESIZABLE`.
pub fn is_resizable(&self) -> bool {
unsafe { ffi::glfwGetWindowAttrib(self.ptr, ffi::RESIZABLE) == ffi::TRUE }
}
/// Wrapper for `glfwGetWindowAttrib` called with `VISIBLE`.
pub fn is_visible(&self) -> bool {
unsafe { ffi::glfwGetWindowAttrib(self.ptr, ffi::VISIBLE) == ffi::TRUE }
}
/// Wrapper for `glfwGetWindowAttrib` called with `DECORATED`.
pub fn is_decorated(&self) -> bool {
unsafe { ffi::glfwGetWindowAttrib(self.ptr, ffi::DECORATED) == ffi::TRUE }
}
/// Wrapper for `glfwSetWindowPosCallback`.
pub fn set_pos_callback<Cb: WindowPosCallback + Send>(&self, callback: ~Cb) {
set_window_callback!(setter: glfwSetWindowPosCallback,
cb_trait: WindowPosCallback,
callback: window_pos_callback,
field: pos_callback);
}
/// Wrapper for `glfwSetWindowSizeCallback`.
pub fn set_size_callback<Cb: WindowSizeCallback + Send>(&self, callback: ~Cb) {
set_window_callback!(setter: glfwSetWindowSizeCallback,
cb_trait: WindowSizeCallback,
callback: window_size_callback,
field: size_callback);
}
/// Wrapper for `glfwSetWindowCloseCallback`.
pub fn set_close_callback<Cb: WindowCloseCallback + Send>(&self, callback: ~Cb) {
set_window_callback!(setter: glfwSetWindowCloseCallback,
cb_trait: WindowCloseCallback,
callback: window_close_callback,
field: close_callback);
}
/// Wrapper for `glfwSetWindowRefreshCallback`.
pub fn set_refresh_callback<Cb: WindowRefreshCallback + Send>(&self, callback: ~Cb) {
set_window_callback!(setter: glfwSetWindowRefreshCallback,
cb_trait: WindowRefreshCallback,
callback: window_refresh_callback,
field: refresh_callback);
}
/// Wrapper for `glfwSetWindowFocusCallback`.
pub fn set_focus_callback<Cb: WindowFocusCallback + Send>(&self, callback: ~Cb) {
set_window_callback!(setter: glfwSetWindowFocusCallback,
cb_trait: WindowFocusCallback,
callback: window_focus_callback,
field: focus_callback);
}
/// Wrapper for `glfwSetWindowIconifyCallback`.
pub fn set_iconify_callback<Cb: WindowIconifyCallback + Send>(&self, callback: ~Cb) {
set_window_callback!(setter: glfwSetWindowIconifyCallback,
cb_trait: WindowIconifyCallback,
callback: window_iconify_callback,
field: iconify_callback);
}
/// Wrapper for `glfwSetFramebufferSizeCallback`.
pub fn set_framebuffer_size_callback<Cb: FramebufferSizeCallback + Send>(&self, callback: ~Cb) {
set_window_callback!(setter: glfwSetFramebufferSizeCallback,
cb_trait: FramebufferSizeCallback,
callback: framebuffer_size_callback,
field: framebuffer_size_callback);
}
/// Wrapper for `glfwGetInputMode` called with `CURSOR`.
pub fn get_cursor_mode(&self) -> CursorMode {
unsafe { cast::transmute(ffi::glfwGetInputMode(self.ptr, ffi::CURSOR)) }
}
/// Wrapper for `glfwSetInputMode` called with `CURSOR`.
pub fn set_cursor_mode(&self, mode: CursorMode) {
unsafe { ffi::glfwSetInputMode(self.ptr, ffi::CURSOR, mode as c_int); }
}
/// Wrapper for `glfwGetInputMode` called with `STICKY_KEYS`.
pub fn has_sticky_keys(&self) -> bool {
unsafe { ffi::glfwGetInputMode(self.ptr, ffi::STICKY_KEYS) == ffi::TRUE }
}
/// Wrapper for `glfwSetInputMode` called with `STICKY_KEYS`.
pub fn set_sticky_keys(&self, value: bool) {
unsafe { ffi::glfwSetInputMode(self.ptr, ffi::STICKY_KEYS, value as c_int); }
}
/// Wrapper for `glfwGetInputMode` called with `STICKY_MOUSE_BUTTONS`.
pub fn has_sticky_mouse_buttons(&self) -> bool {
unsafe { ffi::glfwGetInputMode(self.ptr, ffi::STICKY_MOUSE_BUTTONS) == ffi::TRUE }
}
/// Wrapper for `glfwSetInputMode` called with `STICKY_MOUSE_BUTTONS`.
pub fn set_sticky_mouse_buttons(&self, value: bool) {
unsafe { ffi::glfwSetInputMode(self.ptr, ffi::STICKY_MOUSE_BUTTONS, value as c_int); }
}
/// Wrapper for `glfwGetKey`.
pub fn get_key(&self, key: Key) -> Action {
unsafe { cast::transmute(ffi::glfwGetKey(self.ptr, key as c_int)) }
}
/// Wrapper for `glfwGetMouseButton`.
pub fn get_mouse_button(&self, button: MouseButton) -> Action {
unsafe { cast::transmute(ffi::glfwGetMouseButton(self.ptr, button as c_int)) }
}
/// Wrapper for `glfwGetCursorPos`.
pub fn get_cursor_pos(&self) -> (f64, f64) {
unsafe {
let mut xpos = 0.0;
let mut ypos = 0.0;
ffi::glfwGetCursorPos(self.ptr, &mut xpos, &mut ypos);
(xpos as f64, ypos as f64)
}
}
/// Wrapper for `glfwSetCursorPos`.
pub fn set_cursor_pos(&self, xpos: f64, ypos: f64) {
unsafe { ffi::glfwSetCursorPos(self.ptr, xpos as c_double, ypos as c_double); }
}
/// Wrapper for `glfwSetKeyCallback`.
pub fn set_key_callback<Cb: KeyCallback + Send>(&self, callback: ~Cb) {
set_window_callback!(setter: glfwSetKeyCallback,
cb_trait: KeyCallback,
callback: key_callback,
field: key_callback);
}
/// Wrapper for `glfwSetCharCallback`.
pub fn set_char_callback<Cb: CharCallback + Send>(&self, callback: ~Cb) {
set_window_callback!(setter: glfwSetCharCallback,
cb_trait: CharCallback,
callback: char_callback,
field: char_callback);
}
/// Wrapper for `glfwSetMouseButtonCallback`.
pub fn set_mouse_button_callback<Cb: MouseButtonCallback + Send>(&self, callback: ~Cb) {
set_window_callback!(setter: glfwSetMouseButtonCallback,
cb_trait: MouseButtonCallback,
callback: mouse_button_callback,
field: mouse_button_callback);
}
/// Wrapper for `glfwSetCursorPosCallback`.
pub fn set_cursor_pos_callback<Cb: CursorPosCallback + Send>(&self, callback: ~Cb) {
set_window_callback!(setter: glfwSetCursorPosCallback,
cb_trait: CursorPosCallback,
callback: cursor_pos_callback,
field: cursor_pos_callback);
}
/// Wrapper for `glfwSetCursorEnterCallback`.
pub fn set_cursor_enter_callback<Cb: CursorEnterCallback + Send>(&self, callback: ~Cb) {
set_window_callback!(setter: glfwSetCursorEnterCallback,
cb_trait: CursorEnterCallback,
callback: cursor_enter_callback,
field: cursor_enter_callback);
}
/// Wrapper for `glfwSetScrollCallback`.
pub fn set_scroll_callback<Cb: ScrollCallback + Send>(&self, callback: ~Cb) {
set_window_callback!(setter: glfwSetScrollCallback,
cb_trait: ScrollCallback,
callback: scroll_callback,
field: scroll_callback);
}
/// Wrapper for `glfwGetClipboardString`.
pub fn set_clipboard_string(&self, string: &str) {
unsafe {
string.with_c_str(|string| {
ffi::glfwSetClipboardString(self.ptr, string);
});
}
}
/// Wrapper for `glfwGetClipboardString`.
pub fn get_clipboard_string(&self) -> ~str {
unsafe { str::raw::from_c_str(ffi::glfwGetClipboardString(self.ptr)) }
}
/// Wrapper for `glfwMakeContextCurrent`.
pub fn make_context_current(&self) {
make_context_current(Some(self));
}
/// Wrapper for `glfwGetCurrentContext`
pub fn is_current_context(&self) -> bool {
self.ptr == unsafe { ffi::glfwGetCurrentContext() }
}
/// Wrapper for `glfwSwapBuffers`.
pub fn swap_buffers(&self) {
unsafe { ffi::glfwSwapBuffers(self.ptr); }
}
/// Wrapper for `glfwGetWin32Window`
#[cfg(target_os="win32")]
pub fn get_win32_window(&self) -> *c_void {
unsafe { ffi::glfwGetWin32Window(self.ptr) }
}
/// Wrapper for `glfwGetWGLContext`
#[cfg(target_os="win32")]
pub fn get_wgl_context(&self) -> *c_void {
unsafe { ffi::glfwGetWGLContext(self.ptr) }
}
/// Wrapper for `glfwGetCocoaWindow`
#[cfg(target_os="macos")]
pub fn get_cocoa_window(&self) -> *c_void {
unsafe { ffi::glfwGetCocoaWindow(self.ptr) }
}
/// Wrapper for `glfwGetNSGLContext`
#[cfg(target_os="macos")]
pub fn get_nsgl_context(&self) -> *c_void {
unsafe { ffi::glfwGetNSGLContext(self.ptr) }
}
/// Wrapper for `glfwGetX11Window`
#[cfg(target_os="linux")]
pub fn get_x11_window(&self) -> *c_void {
unsafe { ffi::glfwGetX11Window(self.ptr) }
}
/// Wrapper for `glfwGetGLXContext`
#[cfg(target_os="linux")]
pub fn get_glx_context(&self) -> *c_void {
unsafe { ffi::glfwGetGLXContext(self.ptr) }
}
}
/// Wrapper for `glfwMakeContextCurrent`.
pub fn make_context_current(context: Option<&Window>) {
match context {
Some(window) => unsafe { ffi::glfwMakeContextCurrent(window.ptr) },
None => unsafe { ffi::glfwMakeContextCurrent(ptr::null()) },
}
}
/// Wrapper for `glfwGetX11Display`
#[cfg(target_os="linux")]
pub fn get_x11_display() -> *c_void {
unsafe { ffi::glfwGetX11Display() }
}
#[unsafe_destructor]
impl Drop for Window {
/// Closes the window and removes all associated callbacks.
///
/// Wrapper for `glfwDestroyWindow`.
fn drop(&mut self) {
if !self.is_shared {
unsafe { ffi::glfwDestroyWindow(self.ptr); }
}
unsafe { self.free_callbacks() }
}
}
/// Wrapper for `glfwPollEvents`.
pub fn poll_events() {
unsafe { ffi::glfwPollEvents(); }
}
/// Wrapper for `glfwWaitEvents`.
pub fn wait_events() {
unsafe { ffi::glfwWaitEvents(); }
}
#[repr(C)]
#[deriving(Clone, Eq, IterBytes, ToStr)]
pub enum Joystick {
Joystick1 = ffi::JOYSTICK_1,
Joystick2 = ffi::JOYSTICK_2,
Joystick3 = ffi::JOYSTICK_3,
Joystick4 = ffi::JOYSTICK_4,
Joystick5 = ffi::JOYSTICK_5,
Joystick6 = ffi::JOYSTICK_6,
Joystick7 = ffi::JOYSTICK_7,
Joystick8 = ffi::JOYSTICK_8,
Joystick9 = ffi::JOYSTICK_9,
Joystick10 = ffi::JOYSTICK_10,
Joystick11 = ffi::JOYSTICK_11,
Joystick12 = ffi::JOYSTICK_12,
Joystick13 = ffi::JOYSTICK_13,
Joystick14 = ffi::JOYSTICK_14,
Joystick15 = ffi::JOYSTICK_15,
Joystick16 = ffi::JOYSTICK_16,
}
impl Joystick {
/// Wrapper for `glfwJoystickPresent`.
pub fn is_present(&self) -> bool {
unsafe { ffi::glfwJoystickPresent(*self as c_int) == ffi::TRUE }
}
/// Wrapper for `glfwGetJoystickAxes`.
pub fn get_axes(&self) -> ~[f32] {
unsafe {
let mut count = 0;
let ptr = ffi::glfwGetJoystickAxes(*self as c_int, &mut count);
vec::from_buf(ptr, count as uint).map(|&a| a as f32)
}
}
/// Wrapper for `glfwGetJoystickButtons`.
pub fn get_buttons(&self) -> ~[c_int] {
unsafe {
let mut count = 0;
let ptr = ffi::glfwGetJoystickButtons(*self as c_int, &mut count);
vec::from_buf(ptr, count as uint).map(|&b| b as c_int)
}
}
/// Wrapper for `glfwGetJoystickName`.
pub fn get_name(&self) -> ~str {
unsafe { str::raw::from_c_str(ffi::glfwGetJoystickName(*self as c_int)) }
}
}
/// Wrapper for `glfwGetTime`.
pub fn get_time() -> f64 {
unsafe { ffi::glfwGetTime() as f64 }
}
/// Wrapper for `glfwSetTime`.
pub fn set_time(time: f64) {
unsafe { ffi::glfwSetTime(time as c_double); }
}
/// Wrapper for `glfwSwapInterval`.
pub fn set_swap_interval(interval: u32) {
unsafe { ffi::glfwSwapInterval(interval as c_int); }
}
/// Wrapper for `glfwExtensionSupported`.
pub fn extension_supported(extension: &str) -> bool {
unsafe {
extension.with_c_str(|extension| {
ffi::glfwExtensionSupported(extension) == ffi::TRUE
})
}
}
/// Wrapper for `glfwGetProcAddress`.
pub fn get_proc_address(procname: &str) -> Option<GLProc> {
unsafe {
procname.with_c_str(|procname| {
ffi::glfwGetProcAddress(procname)
})
}
}<|fim▁end|>
|
}
/// Wrapper for `glfwSetWindowSize`.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.