code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import zc.selenium.pytest
class SeleniumTests(zc.selenium.pytest.Test):
def testCalculator(self):
s = self.selenium
s.open('/calculator.html?login')
s.waitForText('value', '0')
s.type('input', '2')
s.click('dijit_form_Button_0')
s.waitForText('value', '2')
s.click('dijit_form_Button_0')
s.waitForText('value', '4')
s.type('input', '3')
s.click('dijit_form_Button_1')
s.waitForText('value', '1')
def testForm(self):
s = self.selenium
s.open('/form.html?login')
s.waitForValue('first_name', "Happy")
s.verifyValue('last_name', 'Camper')
s.verifyValue('age', '23')
# XXX Iframe selection not implemented yet apparently
#s.selectFrame('other_iframe')
#s.verifyTextPresent("I've got a magic toenail")
#s.selectFrame('description_iframe')
#s.verifyTextPresent("10ft tall Razor sharp scales.")
# test the pet combobox:
s.verifyValue('pet', '')
# 1) the combobox has a pulldown menu
s.click(
'//div[@id=\'widget_pet\']/div'
'/div[contains(@class, \'dijitDownArrowButton\')][1]')
# 2) the combobox has text input
s.type('pet', 'Cockatiel')
s.verifyValue('pet', 'Cockatiel')
s.verifyValue('favorite_color', "Blue")
s.assertChecked('happy')
s.verifyValue('temperment', 'Right Neighborly')
s.verifyValue('siblings', '1')
#check grid
s.verifyText(
'//div[@id=\'dojox_grid__View_1\']/div/div/div/div[1]/table/tbody/tr/td[1]/div',
'123 fake street')
s.verifyText(
'//div[@id=\'dojox_grid__View_1\']/div/div/div/div[1]/table/tbody/tr/td[2]/div',
'fakeville')
s.verifyText(
'//div[@id=\'dojox_grid__View_1\']/div/div/div/div[1]/table/tbody/tr/td[3]/div',
'9')
s.verifyText(
'//div[@id=\'dojox_grid__View_1\']/div/div/div/div[2]/table/tbody/tr/td[1]/div',
'345 false street')
s.verifyText(
'//div[@id=\'dojox_grid__View_1\']/div/div/div/div[2]/table/tbody/tr/td[2]/div',
'falsetown')
s.verifyText(
'//div[@id=\'dojox_grid__View_1\']/div/div/div/div[2]/table/tbody/tr/td[3]/div',
'9001')
s.click('ExampleForm.actions.register')
s.verifyTextPresent('Submitting Form failed')
s.verifyTextPresent('Value is too big')
s.verifyTextPresent('Weight: Missing Input')
s.click("//div[@id='dijit_Dialog_0']/div[1]/span[2]")
s.type('weight', '23.5')
s.click('//div[@id=\'dojox_grid__View_1\']/div/div/div/div[2]/table/tbody/tr/td[1]/div')
s.click('dijit_form_Button_14')
s.type('addresses.awesomeness', '2')
s.click('dijit_form_Button_29')
# check delete & immediate add
s.click(
'//div[@id=\'dojox_grid__View_1\']/div/div/div/div[2]/table/tbody/tr/td[1]/div')
s.click('dijit_form_Button_15')
# add a new record
s.click('dijit_form_Button_13')
s.type('addresses.street', 'The thirteenth Floor')
s.type('addresses.city', 'Somewhere')
s.type('addresses.awesomeness', '1')
s.click('dijit_form_Button_29')
s.click('ExampleForm.actions.register')
s.verifyText(
'//div[@id=\'dojox_grid__View_1\']/div/div/div/div[2]/table/tbody/tr/td[1]/div',
'The thirteenth Floor')
s.verifyText(
'//div[@id=\'dojox_grid__View_1\']/div/div/div/div[2]/table/tbody/tr/td[2]/div',
'Somewhere')
s.verifyText(
'//div[@id=\'dojox_grid__View_1\']/div/div/div/div[2]/table/tbody/tr/td[3]/div',
'1')
# check the deleted item doesn't exist
s.verifyTextNotPresent('345 false street')
s.verifyTextNotPresent('falsetown')
# now try a delete & save
s.click(
'//div[@id=\'dojox_grid__View_1\']/div/div/div/div[1]/table/tbody/tr/td[1]/div')
s.click('dijit_form_Button_15')
s.click('ExampleForm.actions.register')
s.verifyTextNotPresent('123 fake street')
s.verifyTextNotPresent('fakeville') | zc.dojoform | /zc.dojoform-0.15.0.tar.gz/zc.dojoform-0.15.0/src/zc/dojoform/selenium.py | selenium.py |
var ckeditorCustomConfig = '';
var CKEditorWidget = function (config, parent, order) {
var textarea = dojo.create(
'textarea',
{'name': config.name},
parent
);
textarea.value = config.value;
parent.postStartup = function (formNode) {
var editor, ckeditorConfig;
if (config.display_options != null) {
ckeditorConfig = config.display_options;
} else {
ckeditorConfig = {};
}
if (order != null) {
ckeditorConfig.tabIndex = order;
}
if (ckeditorCustomConfig != '') {
ckeditorConfig['customConfig'] = ckeditorCustomConfig;
}
editor = CKEDITOR.replace(textarea, ckeditorConfig);
var handler = function () {
textarea.value = editor.getData();
};
window.addEventListener('beforeSubmit', handler, true);
dojo.subscribe(zc.dojo.recordFormSubmittedTopic, handler);
CKEDITOR.on('instanceReady', function (event) {
if (formNode.fit != null) {
formNode.fit()
}
});
};
/* subscribers to reset/set/save photo widget data.
*/
if (!zc.dojo.widgets['zc.dojoform.ckeditor.CKEditor'].subscribers) {
dojo.subscribe(zc.dojo.beforeRecordFormSubmittedTopic, function(frm_id) {
dojo.forEach(dojo.query('textarea', frm_id), function (textarea) {
var editor = CKEDITOR.instances[textarea.name];
if (editor) {
textarea.value = editor.getData();
}
})
})
dojo.subscribe(zc.dojo.dialogFormResetTopic, function(frm_id) {
dojo.forEach(dojo.query('textarea', frm_id), function (textarea) {
var editor = CKEDITOR.instances[textarea.name];
if (editor) {
editor.setData('');
}
});
})
dojo.subscribe(zc.dojo.dialogFormUpdateTopic, function(frm_id, row) {
dojo.forEach(dojo.query('textarea', frm_id), function (textarea) {
textarea.value = row[textarea.name];
var editor = CKEDITOR.instances[textarea.name];
if (editor) {
editor.setData(row[textarea.name]);
}
});
})
zc.dojo.widgets['zc.dojoform.ckeditor.CKEditor'].subscribers = true;
}
return parent;
};
zc.dojo.widgets['zc.dojoform.ckeditor.CKEditor'] = CKEditorWidget; | zc.dojoform | /zc.dojoform-0.15.0.tar.gz/zc.dojoform-0.15.0/src/zc/dojoform/ckeditor/resources/widget.js | widget.js |
dojo.provide('zc.dojo');
dojo.require('dijit.form.ValidationTextBox');
dojo.require('dijit.form.TextBox');
dojo.require('dijit.form.NumberSpinner');
dojo.require('dijit.form.FilteringSelect');
dojo.require('dijit.form.CheckBox');
dojo.require('dijit.form.ComboBox');
dojo.require('dijit.form.Button');
dojo.require('dijit.form.Form');
dojo.require('dijit.form.SimpleTextarea');
dojo.require('dijit.Editor');
dojo.require('dijit.layout.BorderContainer');
dojo.require('dijit.layout.ContentPane');
dojo.require('dijit.form.NumberTextBox');
dojo.require('dijit.Dialog');
dojo.require('dojo.data.ItemFileReadStore');
dojo.require('dojo.data.ItemFileWriteStore');
dojo.require("dojox.grid.cells.dijit");
dojo.require("dojox.grid.DataGrid");
dojo.require("dojox.grid.EnhancedGrid");
dojo.require("dojox.grid.enhanced.plugins.DnD");
dojo.require("dojox.grid.enhanced.plugins.Menu");
dojo.require("dojox.grid.enhanced.plugins.NestedSorting");
dojo.require("dojox.grid.enhanced.plugins.IndirectSelection");
zc.dojo.widgets = {};
zc.dojo.beforeContentFormSubmittedTopic = "ZC_DOJO_BEFORE_CONTENT_FORM_SUBMITTED";
zc.dojo.beforeRecordFormSubmittedTopic = "ZC_DOJO_BEFORE_RECORD_FORM_SUBMITTED";
zc.dojo.dialogFormResetTopic = "ZC_DOJO_DIALOG_FORM_RESET";
zc.dojo.dialogFormUpdateTopic = "ZC_DOJO_DIALOG_FORM_UPDATE";
zc.dojo.get_recordlist_data = function (args) {
var content, rec;
if (args.form_id) {
content = {};
dojo.forEach(dojo.query('div.dojoxGrid', args.form_id), function (g) {
var form_grid = dijit.byId(g.id);
var idx = 0;
var k;
while (idx < form_grid.rowCount) {
rec = form_grid.getItem(idx);
for (k in rec) {
content[k + '.' + idx] = form_grid.store.getValue(rec, k);
}
idx += 1;
}
});
return content;
}
};
zc.dojo.call_server = function (args) {
var content, k;
var callback_error = function (error) {
var result;
result = dojo.fromJson(error.responseText);
if (!('error' in result) && !('session_expired' in result)) {
zc.dojo.system_error(args.task);
}
else if (result.session_expired) {
return zc.dojo.session_expired(error);
}
else if (result.error) {
var this_dialog = new dijit.Dialog({
title: args.task + ' failed',
content: result.error
});
this_dialog.show();
}
if (args.failure) {
args.failure(error);
}
};
var callback_success = function (data) {
var result, error, errors;
if (dojo.isString(data)) {
data = dojo.fromJson(data);
}
if (data.errors) {
result = '';
errors = data.errors;
for (error in errors) {
result += errors[error] + '<br>';
}
var this_dialog = new dijit.Dialog({
title: args.task + ' failed',
content: dojo.create('div', {id: 'error_message', innerHTML: result})
});
this_dialog.show();
}
else if (args.success) {
args.success(data);
}
};
/* Subscribers might be listening to this event. Do not remove. */
dojo.publish(zc.dojo.beforeContentFormSubmittedTopic, [args.form_id]);
content = zc.dojo.get_recordlist_data(args);
if (args.content == null) {
args.content = {};
}
for (k in content) {
args.content[k] = content[k];
}
if (args.form_id == undefined) {
dojo.xhrPost({
url: args.url,
handleAs: "json",
content: args.content,
load: callback_success,
error: callback_error
});
}
else {
dojo.xhrPost({
url: args.url,
content: args.content,
form: args.form_id,
load: callback_success,
error: callback_error
});
}
};
zc.dojo.submit_form = zc.dojo.call_server;
zc.dojo.widgets['zope.schema.TextLine'] = function (config, node, order) {
var wconfig;
wconfig = zc.dojo.parse_config(config, order);
if (config.max_size != undefined)
{
wconfig.maxLength = config.max_size;
if (config.min_size) {
wconfig.regExp = ".{" + config.min_size + "," + config.max_size + "}";
} else {
wconfig.regExp = ".{0," + config.max_size + "}";
}
} else if (config.min_size) {
wconfig.regExp = ".{" + config.mmin_size + ",}";
}
return new dijit.form.ValidationTextBox(wconfig, node).domNode;
};
function update(a, b) {
for (var k in b)
if (b.hasOwnProperty(k))
a[k] = b[k];
}
zc.dojo.widgets['zope.schema.Password'] = function (config, node, order) {
var wconfig;
wconfig = zc.dojo.parse_config(config, order);
wconfig.type = "password";
if (config.max_size != undefined)
{
wconfig.maxLength = config.max_size;
if (config.min_size) {
wconfig.regExp = ".{" + config.min_size + "," + config.max_size + "}";
} else {
wconfig.regExp = ".{0," + config.max_size + "}";
}
} else if (config.min_size) {
wconfig.regExp = ".{" + config.mmin_size + ",}";
}
return new dijit.form.ValidationTextBox(wconfig, node).domNode;
};
zc.dojo.widgets['zope.schema.Text'] = function (config, node, order, readOnly) {
var wconfig = zc.dojo.parse_config(config, order);
wconfig.style = 'width:auto';
if (config.display_options != null) {
update(wconfig, config.display_options);
}
return new dijit.form.SimpleTextarea(wconfig, node).domNode;
};
zc.dojo.widgets['zc.ajaxform.widgets.RichText'] =
function (config, node, order, readOnly) {
var wconfig = zc.dojo.parse_config(config, order);
var total_editor = dojo.create('div', {}, node);
var editor_for_form = new dijit.form.TextBox({
type: 'hidden',
name: wconfig.name,
value: wconfig.value
});
// iframes = :[
if (wconfig.style == null)
wconfig.style = '';
if (wconfig.width == null)
wconfig.width = '400px';
if (wconfig.height == null)
wconfig.height = '200px';
if (config.display_options != null) {
update(wconfig, config.display_options);
}
wconfig.height = '100%';
if (readOnly) {
wconfig.readOnly = true;
}
var editor = new dijit.Editor(wconfig);
total_editor.appendChild(editor_for_form.domNode);
total_editor.appendChild(editor.domNode);
editor.value = editor_for_form.getValue();
dojo.connect(editor, 'onBlur', function () {
editor_for_form.setValue(editor.getValue());
});
return total_editor;
};
zc.dojo.widgets['zc.ajaxform.widgets.Hidden'] = function (config, node, order) {
var wconfig;
wconfig = zc.dojo.parse_config(config, order);
wconfig.type = 'hidden';
return new dijit.form.TextBox(wconfig, node).domNode;
};
zc.dojo.parse_number_config = function (config, order) {
var wconfig, constraints;
wconfig = zc.dojo.parse_config(config, order);
constraints = {};
if (config.field_min != undefined) {
constraints.min = config.field_min;
}
if (config.field_max != undefined) {
constraints.max = config.field_max;
}
wconfig.constraints = constraints;
return wconfig;
};
zc.dojo.widgets['zope.schema.Int'] = function (config, node, order) {
var wconfig;
wconfig = zc.dojo.parse_number_config(config, order);
wconfig.constraints.places = 0;
return new dijit.form.NumberTextBox(wconfig, node).domNode;
};
zc.dojo.widgets['zc.ajaxform.widgets.NumberSpinner'] = function (config, node, order) {
var wconfig;
wconfig = zc.dojo.parse_number_config(config, order);
return new dijit.form.NumberSpinner(wconfig, node).domNode;
};
zc.dojo.widgets['zope.schema.Decimal'] = function (config, node, order) {
var wconfig;
wconfig = zc.dojo.parse_number_config(config, order);
return new dijit.form.NumberTextBox(wconfig, node).domNode;
};
zc.dojo.widgets['zope.schema.Bool'] = function (config, node, order) {
var wconfig;
wconfig = zc.dojo.parse_config(config, order);
wconfig.checked = config.value;
return new dijit.form.CheckBox(wconfig, node).domNode;
};
zc.dojo.widgets['zc.ajaxform.widgets.BasicDisplay'] = function (config, node, order) {
var wconfig;
wconfig = zc.dojo.parse_config(config, order);
wconfig.readOnly = true;
return new dijit.form.TextBox(wconfig, node).domNode;
};
zc.dojo.widgets['zc.ajaxform.widgets.RichTextDisplay'] = function (config, node, order) {
var iframe = dojo.create('iframe', {'frameborder': 1}, node);
iframe.postStartup = function (node) {
var doc = this.contentDocument;
doc.open();
doc.write(config.value);
doc.close();
};
return iframe;
};
var _choiceConfig = function (config, node, order) {
var wconfig, values, index;
wconfig = zc.dojo.parse_config(config, order);
var store_data = {
identifier: 'value',
label: 'label'
};
var items = [];
values = config.values;
for (index in values) {
items.push({
label: values[index][1],
value: values[index][0]
});
}
store_data.items = items;
var select_store = new dojo.data.ItemFileReadStore({
data: store_data
});
wconfig.store = select_store;
wconfig.searchAttr = "label";
return wconfig;
};
var makeChoice = function (config, node, order) {
var wconfig = _choiceConfig(config, node, order);
return new dijit.form.FilteringSelect(wconfig, node).domNode;
};
zc.dojo.widgets['zope.schema.Choice'] = makeChoice;
var makeComboBox = function (config, node, order) {
var wconfig = _choiceConfig(config, node, order);
return new dijit.form.ComboBox(wconfig, node).domNode;
};
zc.dojo.widgets['zc.ajaxform.widgets.ComboBox'] = makeComboBox;
function build_layout(record) {
var rc_wid, new_name;
var record_layout = [];
var colwidth = 750 / record.widgets.length;
for (rc_wid in record.widgets) {
rc_wid = dojo.clone(record.widgets[rc_wid]);
new_name = record.name + '.' + rc_wid.name;
rc_wid.name = new_name;
rc_wid.id = new_name;
var column_label = rc_wid.fieldLabel;
var column = {
name: column_label,
field: rc_wid.name,
width: 'auto',
widget_constructor: rc_wid.widget_constructor,
rc_wid: rc_wid,
draggable: false,
cellStyles: 'vertical-align: top;'
};
if (rc_wid.type == "file") {
column.formatter = function (v) {
if (v) {
var data = dojo.fromJson(v);
if (data.thumbnail_tag != null) {
return unescape(data.thumbnail_tag);
}
else if (data.thumbnail_url != null){
return '<img src="' + unescape(data.thumbnail_url) + '" />';
}
else if (data.filename != null){
return data.filename;
}
else {
return '';
}
}
else {
return '';
}
};
}
record_layout.push(column);
}
record_layout.push({
name: '',
field: '',
width: '20px',
noresize: true,
cellStyles: 'text-align: right;',
value: '<div>||</div>'
});
return record_layout;
}
function build_record(record, pnode, suffix, record_value) {
var rc_wid, indexed_name, k;
var record_json = '"name": "' + suffix + '", ';
for (rc_wid in record.widgets) {
rc_wid = dojo.clone(record.widgets[rc_wid]);
indexed_name = rc_wid.name;
rc_wid.name = record.name + '.' + indexed_name;
rc_wid.id = record.name + '.' + indexed_name;
if (record_value) {
rc_wid.value = escape(record_value[indexed_name] || '');
}
record_json += '"' + rc_wid.name + '": "' + rc_wid.value + '",';
}
var rec = dojo.fromJson('{' + record_json + '}');
for (k in rec) {
rec[k] = unescape(rec[k]);
}
return rec;
}
function build_record_form(widget_name, grid, index_map) {
var layout = grid.structure[0].cells;
var edit_dlg = new dijit.Dialog({
title: 'Add/Modify Record',
style: 'width: auto;',
doLayout: true
});
var rec_form = new dijit.form.Form({
method: 'POST',
style: 'max-height: 400px; overflow: auto;',
encType: 'multipart/form-data'
}, dojo.create('div', null, edit_dlg.domNode));
var record_input = new dijit.form.TextBox({
name: 'record_id',
type: 'hidden'
}, dojo.create('div', null, rec_form.domNode));
edit_dlg.form_widgets = [];
dojo.forEach(layout, function (fld) {
if (fld.rc_wid) {
var rc_wid = dojo.clone(fld.rc_wid);
var order = index_map[rc_wid.name];
rc_wid.tabIndex = order;
var widget_div = dojo.create(
'div', {'class': 'widget', style: 'margin: 5px;'}, rec_form.domNode);
var label = dojo.create('label', {
innerHTML: rc_wid.fieldLabel + ': '
}, widget_div);
if (rc_wid.required == true) {
var span = dojo.create(
'span', {innerHTML: ' (required)'}, label);
dojo.addClass(span, 'status-marker');
}
dojo.create('br', null, widget_div);
var wid = zc.dojo.widgets[rc_wid.widget_constructor](
rc_wid,
dojo.create('div', {style: 'height: auto;'}, widget_div),
order);
edit_dlg.form_widgets.push(wid);
}
});
var buttons_cp = new dijit.layout.ContentPane(
{}, dojo.create('div', null, rec_form.domNode));
var buttons_div = dojo.create('div', null, buttons_cp.domNode);
var save_btn = new dijit.form.Button({
label: 'Save',
tabIndex: index_map[widget_name + '.dojo.save'],
onClick: function (e) {
dojo.publish(zc.dojo.beforeRecordFormSubmittedTopic, [rec_form.id]);
var record_data = dojo.formToObject(rec_form.domNode);
if (! record_data.record_id) {
var row = {name: '.' + grid.rowCount + 1};
dojo.forEach(grid.structure[0].cells, function (fld) {
if (fld.rc_wid) {
row[fld.field] = record_data[fld.field];
}
});
grid.store.newItem(row);
grid.store.save();
}
else {
grid.store.fetchItemByIdentity({
identity: record_data.record_id,
onItem: function (item) {
dojo.forEach(grid.structure[0].cells, function (fld) {
if (fld.rc_wid) {
grid.store.setValue(item, fld.field, record_data[fld.field]);
grid.store.save();
}
});
}
});
}
edit_dlg.hide();
}
}, dojo.create('div', null, buttons_div));
var cancel_btn = new dijit.form.Button({
label: 'Cancel',
tabIndex: index_map[widget_name + '.dojo.cancel'],
onClick: function (evt) {
edit_dlg.hide();
}
}, dojo.create('div', null, buttons_div));
edit_dlg.attr('content', rec_form);
edit_dlg.startup();
edit_dlg.editForm = rec_form;
dojo.forEach(edit_dlg.form_widgets, function (w) {
if (w.postStartup != null) {
w.postStartup(edit_dlg);
}
});
return edit_dlg;
}
function edit_record(widget_name, grid, row_value, index_map) {
grid.select.clearDrugDivs();
if (grid.edit_dlg == null) {
grid.edit_dlg = build_record_form(widget_name, grid, index_map);
}
var form_values = {record_id: grid.store.getValue(row_value, 'name')};
dojo.forEach(grid.structure[0].cells, function (fld) {
if (fld.rc_wid) {
form_values[fld.field] = grid.store.getValue(row_value, fld.field);
}
});
/* order of next two lines is important */
dojo.forEach(grid.edit_dlg.editForm.domNode.elements, function (ele) {
if (ele.name == 'record_id' || ele.name in form_values) {
var wid = dijit.byId(ele.id);
if (wid) {
wid.attr('value', form_values[ele.name]);
}
}
});
dojo.publish(
zc.dojo.dialogFormUpdateTopic, [grid.edit_dlg.editForm.id, form_values]);
grid.edit_dlg.show();
}
zc.dojo.widgets['zope.schema.List'] = function (config, pnode, order, widgets, index_map) {
var record, records;
var node = new dijit.layout.BorderContainer({
design: "headline",
gutters: "false"
}, pnode);
var rc = config.record_schema;
rc.name = config.name;
var num = 0;
var item_list = [];
records = config.value;
for (record in records) {
record = records[record];
item_list.push(build_record(rc, node, '.' + String(num), record));
num += 1;
}
var records_data = {
"items": item_list,
"identifier": "name",
"label": "name"
};
var records_jsonStore = new dojo.data.ItemFileWriteStore({data: records_data});
var record_fields = build_layout(rc);
var layout = [{
cells: record_fields
}];
var grid_container = new dijit.layout.ContentPane({
autoWidth: true,
autoHeight: true,
doLayout: true
}, dojo.create('div', null, node.domNode));
var grid = new dojox.grid.EnhancedGrid({
query: { name: '*' },
store: records_jsonStore,
structure: layout,
escapeHTMLInData: false,
elastic: true,
rowSelector: '20px',
autoHeight: true,
plugins: {
nestedSorting: true,
dnd: true
}
});
grid_container.attr('content', grid);
// To limit DnD activity to the DnD Handle.
grid.select.exceptColumnsTo = record_fields.length - 2;
grid.select.getExceptionalColOffsetWidth = dojo.hitch(grid.select, function () {
// We override the method in dojox.grid.enhanced.dnd._DndMovingManager
// because we don't use the IndirectSelection plugin, but still want DnD.
var offsetWidth = (normalizedOffsetWidth = 0);
dojo.forEach(this.getHeaderNodes(), function (node, index) {
if (index <= this.exceptColumnsTo) {
var coord = dojo.coords(node);
offsetWidth += coord.w;
}
}, this);
normalizedOffsetWidth = offsetWidth;
return normalizedOffsetWidth > 0 ? normalizedOffsetWidth : 0;
});
if (!rc.readonly) {
dojo.connect(grid, 'onCellMouseOver', function (e) {
if (e.cell.draggable) {
grid.select.cleanAll();
grid.selection.select(e.rowIndex);
grid.select.clearDrugDivs();
grid.select.addRowMover(e.rowIndex, e.rowIndex);
}
else {
grid.select.clearDrugDivs();
}
});
dojo.connect(grid, 'onCellClick', function (e) {
grid.selection.select(e.rowIndex);
});
dojo.connect(grid, 'onCellDblClick', function (e) {
grid.selection.select(e.rowIndex);
edit_record(config.name, grid, grid.selection.getSelected()[0], index_map);
});
}
if (!rc.readonly) {
var new_btn = new dijit.form.Button({
label: 'New',
tabIndex: index_map[config.name + '.dojo.new'],
onClick: function (evt) {
if (grid.edit_dlg == null) {
grid.edit_dlg = build_record_form(config.name, grid, index_map);
}
grid.edit_dlg.reset();
dojo.publish(zc.dojo.dialogFormResetTopic, [grid.edit_dlg.editForm.id]);
grid.select.cancelDND();
grid.edit_dlg.show();
}
}, dojo.create('div', null, node.domNode));
var edit_btn = new dijit.form.Button({
label: 'Edit',
tabIndex: index_map[config.name + '.dojo.edit'],
onClick: function (evt) {
var row_values = grid.selection.getSelected();
if (row_values.length != 1) {
var error_dialog = new dijit.Dialog({
title: 'Error!',
content: 'Please select a single row to Edit.'
});
error_dialog.show();
}
edit_record(config.name, grid, row_values[0], index_map);
}
}, dojo.create('div', null, node.domNode));
var delete_btn = new dijit.form.Button({
label: 'Delete',
tabIndex: index_map[config.name + '.dojo.delete'],
onClick: function (evt) {
var selected = grid.selection.getSelected();
dojo.forEach(selected, grid.store.deleteItem, grid.store);
grid.store.save();
}
}, dojo.create('div', null, node.domNode));
}
pnode.postStartup = function (node) {
grid.startup();
};
return pnode;
};
zc.dojo.build_form = function (config, pnode, tabIndexOffset)
{
var action, actions, action_index;
if (!tabIndexOffset) {
tabIndexOffset = 0;
}
var form = dojo.create('form', {
id: config.definition.prefix,
style: 'position:absolute;'
}, pnode);
dojo.addClass(form, 'zcForm');
var node = new dijit.layout.BorderContainer({
design: "headline",
gutters: "false",
liveSplitters: true,
style: "height:100%; width:100%;"
}, form);
var left_pane = false;
var right_pane = new dijit.layout.ContentPane({
region: 'center',
splitter: true
});
node.addChild(right_pane);
var bottom_pane = new dijit.layout.ContentPane({
region: 'bottom'
});
node.addChild(bottom_pane);
var widgets = [];
var index_map = zc.dojo.tab_index_map(config.definition);
for (var i in config.definition.widgets)
{
var cp = new dijit.layout.ContentPane({}, dojo.create('div'));
dojo.addClass(cp.domNode, 'widget');
var widget = config.definition.widgets[i];
if (!(left_pane) && (!right_pane)) {
node.addChild(cp);
}
else if (config.definition.left_fields[widget.name]) {
if (!left_pane) {
left_pane = new dijit.layout.ContentPane({
region: 'left',
style: 'width: 60%',
splitter: true
});
right_pane.style.width = '40%';
node.addChild(left_pane);
}
left_pane.domNode.appendChild(cp.domNode);
}
else {
right_pane.domNode.appendChild(cp.domNode);
}
if (widget.widget_constructor !== 'zc.ajaxform.widgets.Hidden') {
var label = dojo.create(
'label', {innerHTML: widget.fieldLabel}, cp.domNode);
if (widget.required == true) {
var span = dojo.create(
'span', {innerHTML: ' (required)'}, label);
dojo.addClass(span, 'status-marker');
}
dojo.create('br', null, cp.domNode);
}
var wid = zc.dojo.widgets[widget.widget_constructor](
widget,
dojo.create('div'),
index_map[widget.name] + tabIndexOffset,
widgets,
index_map
);
cp.domNode.appendChild(wid);
widgets.push(wid);
}
node.fit = function () {
var margin = 17;
var getHeight = function (node) {
return node.scrollHeight;
};
var heights = dojo.map(
node.getChildren(),
function (child) { return getHeight(child.domNode); }
);
var max = function (xs) {
var m = null;
var x;
for (var i in xs) {
x = xs[i];
if (x > m) {
m = x;
}
}
return m;
};
var h = max(heights) + getHeight(bottom_pane.domNode) + margin;
node.domNode.style.height = h + 'px';
};
var fireSubmitEvent = function () {
var event = document.createEvent('Event');
event.initEvent('beforeSubmit', true, true);
document.dispatchEvent(event);
};
if (bottom_pane) {
if (config.definition.actions != undefined) {
actions = config.definition.actions;
for (action_index in actions) {
action = actions[action_index];
var button = new dijit.form.Button({
label: action.label,
id: action.name,
onClick: fireSubmitEvent,
type: 'button',
tabIndex: index_map[action.name] + tabIndexOffset
});
bottom_pane.domNode.appendChild(button.domNode);
}
}
}
dojo.forEach(widgets, function (widget, idx) {
if (widget.postStartup != null) {
widget.postStartup(node);
}
});
return node;
};
/* Return a mapping from name to tab-index for all widgets in the form. */
zc.dojo.tab_index_map = function (definition) {
var indices = {};
var left = definition.left_fields;
var right = [];
var index = 0;
var widget, k, i;
var list_widgets = [];
for (k in definition.widgets) {
widget = definition.widgets[k];
if (left[widget.name]) {
indices[widget.name] = index;
index += 1;
if (widget.widget_constructor == 'zope.schema.List') {
/* for the New, Edit, and Delete buttons */
dojo.forEach(['new', 'edit', 'delete'], function (item) {
indices[widget.name + '.dojo.' + item] = index;
index += 1;
});
for (inner_k in widget.record_schema.widgets) {
if (widget.record_schema.widgets.hasOwnProperty(inner_k)) {
var list_widget = widget.record_schema.widgets[inner_k];
list_widgets.push(widget.name + '.' + list_widget.name);
}
}
dojo.forEach(['save', 'cancel'], function (item) {
list_widgets.push(widget.name + '.dojo.' + item);
});
}
} else {
right.push(widget);
}
}
for (i in right) {
widget = right[i];
indices[widget.name] = index;
index += 1;
}
for (k in definition.actions) {
widget = definition.actions[k];
indices[widget.name] = index;
index += 1;
}
/* Handle widgets for list type if any*/
dojo.forEach(list_widgets, function (item, idx, arr) {
indices[item] = index;
index += 1;
});
return indices;
};
zc.dojo.session_expired = function () {
dijit.Dialog({
title: "Session Expired",
content: "You will need to log-in again." }).show();
};
zc.dojo.system_error = function (task) {
var this_dialog = new dijit.Dialog({
title: "Failed",
content: task + " failed for an unknown reason"
});
this_dialog.show();
};
zc.dojo.parse_config = function (config, order) {
var readonly;
readonly = config.readonly;
if (!readonly) { readonly = false; }
var wconfig = {
required: config.required,
id: config.name,
name: config.name,
promptMessage: config.fieldHint,
tabIndex: order,
value: config.value,
readonly: readonly,
left: config.left
};
return wconfig;
}; | zc.dojoform | /zc.dojoform-0.15.0.tar.gz/zc.dojoform-0.15.0/src/zc/dojoform/resources/zc.dojo.js | zc.dojo.js |
import zope.interface
class IExtrinsicReferences(zope.interface.Interface):
"""An object that stores extrinsic references to another object
All objects must be adaptable to
zope.app.keyreference.interfaces.IKeyReference."""
def add(obj, value):
"""Add an object and an associated value to the registry.
Both object and value must be adaptable to IKeyReference.
Multiple values may be stored for a single key. Each value is
only stored once; comparisons are performed using the value's
IKeyReference hash.
"""
def update(obj, values):
"""For given object, add all values in iterable values.
Object and each value must be adaptable to IKeyReference. Identical
values (as determined by IKeyReference) are collapsed to a single
instance (so, for instance, a set of [A, B, B, C, B] will be
collapsed to a logical set of A, B, C).
"""
def get(obj):
"""Retrieve an iterable of the values associated with the object.
If there are no references for `obj`, an iterable with no entries is
returned.
"""
def remove(obj, value):
"""Remove the specified value associated with the object.
Comparisons are made with the IKeyReference hashes.
If `value` is not set for `obj`, raises KeyError.
"""
def contains(obj, value):
"""returns a boolean value of whether the obj : value pair exists."""
def discard(obj, value):
"""Remove the specified value associated with the object.
Comparisons are made with the IKeyReference hashes.
If `value` is not set for `obj`, silently ignores.
"""
def set(obj, values):
"""Set the values for obj to the values in the given iterable.
Replaces any previous values for obj. Object and each value must be
adaptable to IKeyReference. Identical values (as determined by
IKeyReference) are collapsed to a single instance (so, for instance,
values of [A, B, B, C, B] will be collapsed to a logical set of
A, B, C).
Setting an empty values is the canonical way of clearing values for an
object.
""" | zc.extrinsicreference | /zc.extrinsicreference-0.3.0.tar.gz/zc.extrinsicreference-0.3.0/src/zc/extrinsicreference/interfaces.py | interfaces.py |
====================
Extrinsic References
====================
Extrinsic reference registries record a key and one or more values to which
they refer. The key and all values must be adaptable to
zope.app.keyreference.interfaces.IKeyReference.
>>> import zc.extrinsicreference
>>> references = zc.extrinsicreference.ExtrinsicReferences()
>>> references.add(1, 2)
Traceback (most recent call last):
...
TypeError: ('Could not adapt', 1...
>>> from zope import interface, component
>>> from zope.app.keyreference.interfaces import IKeyReference
>>> class IMyObject(interface.Interface):
... "An interface for which we register an IKeyReference adapter"
... id = interface.Attribute("An id unique to IMyObject instances")
...
>>> class MyObject(object):
... interface.implements(IMyObject)
... _id_counter = 0
... @classmethod
... def _getId(cls):
... val = cls._id_counter
... cls._id_counter += 1
... return val
... def __init__(self):
... self.id = self._getId()
...
>>> class DummyKeyReference(object):
... interface.implements(IKeyReference)
... component.adapts(IMyObject)
... key_type_id = 'zc.extrinsicreference.doctest'
... def __init__(self, obj):
... self.object = obj
... def __call__(self):
... """Get the object this reference is linking to.
... """
... return self.object
... def __hash__(self):
... """Get a unique identifier of the referenced object.
... """
... return hash(self.object.id)
... def __cmp__(self, other):
... """Compare the reference to another reference.
... """
... if self.key_type_id == other.key_type_id:
... return cmp(self.object.id, other.object.id)
... return cmp(self.key_type_id, other.key_type_id)
...
>>> component.provideAdapter(DummyKeyReference)
>>> object1 = MyObject()
>>> references.add(object1, 2)
Traceback (most recent call last):
...
TypeError: ('Could not adapt', 2...
>>> value1 = MyObject()
>>> value2 = MyObject()
>>> references.add(object1, value1)
>>> references.add(object1, value2)
Values can be retrieved by their key:
>>> set(references.get(object1)) == set((value1, value2))
True
References can be removed:
>>> references.remove(object1, value1)
>>> list(references.get(object1)) == [value2]
True
But if the reference is not registered, removing it raises a KeyError.
>>> references.remove(object1, value1)
Traceback (most recent call last):
...
KeyError:...
>>> object2 = MyObject()
>>> references.remove(object2, value2)
Traceback (most recent call last):
...
KeyError:...
If you prefer to silently ignore these errors, use `discard`.
>>> references.discard(object1, value1)
>>> references.discard(object2, value2)
Otherwise, you can use `contains` to determine if the reference exists:
>>> references.contains(object1, value1)
False
>>> references.contains(object2, value2)
False
>>> references.contains(object1, value2)
True
If a key has no associated values, an empty iterable is returned:
>>> references.discard(object1, value2)
>>> list(references.get(object1))
[]
Adding a value more than once does not cause the value to be included
in the result sequence more than once:
>>> references.add(object1, value1)
>>> references.add(object1, value1)
>>> list(references.get(object1)) == [value1]
True
The `set` method destructively sets the given values for the object. Repeated
objects are collapsed to a single instance.
>>> references.set(object1, (value2, object2, value2, value2, object2))
>>> references.contains(object1, value1)
False
>>> len(list(references.get(object1)))
2
>>> set(references.get(object1)) == set((value2, object2))
True
>>> references.set(object1, ())
>>> len(list(references.get(object1)))
0
The `update` method adds values to the previous values, non-destructively.
>>> references.add(object1, value1)
>>> references.update(object1, (value2, object2, value2))
>>> len(list(references.get(object1)))
3
>>> set(references.get(object1)) == set((value1, value2, object2))
True
| zc.extrinsicreference | /zc.extrinsicreference-0.3.0.tar.gz/zc.extrinsicreference-0.3.0/src/zc/extrinsicreference/README.txt | README.txt |
"""Extrinsic references implementation"""
from zc.extrinsicreference.interfaces import IExtrinsicReferences
import BTrees
import persistent
import zope.app.keyreference.interfaces
import zope.component
import zope.interface
class ExtrinsicReferences(persistent.Persistent):
zope.interface.implements(IExtrinsicReferences)
# To be usable as an ILocalUtility we have to have these.
__parent__ = __name__ = None
def __init__(self):
self.references = BTrees.OOBTree.OOBTree()
def add(self, obj, value):
key = zope.app.keyreference.interfaces.IKeyReference(obj)
refs = self.references.get(key)
if refs is None:
refs = self.references[key] = BTrees.OOBTree.OOTreeSet()
refs.insert(zope.app.keyreference.interfaces.IKeyReference(value))
def update(self, obj, values):
key = zope.app.keyreference.interfaces.IKeyReference(obj)
refs = self.references.get(key)
if refs is None:
refs = self.references[key] = BTrees.OOBTree.OOTreeSet()
refs.update(zope.app.keyreference.interfaces.IKeyReference(v)
for v in values)
def remove(self, obj, value):
key = zope.app.keyreference.interfaces.IKeyReference(obj)
refs = self.references.get(key)
if refs is not None:
# raises KeyError when the value isn't found
refs.remove(zope.app.keyreference.interfaces.IKeyReference(value))
else:
raise KeyError("Object and value pair does not exist")
def discard(self, obj, value):
try:
self.remove(obj, value)
except KeyError:
pass
def contains(self, obj, value):
key = zope.app.keyreference.interfaces.IKeyReference(obj)
refs = self.references.get(key)
if refs is not None:
return zope.app.keyreference.interfaces.IKeyReference(value) in refs
return False
def set(self, obj, values):
key = zope.app.keyreference.interfaces.IKeyReference(obj)
refs = self.references.get(key)
vals = map(zope.app.keyreference.interfaces.IKeyReference, values)
if not vals:
if refs is not None:
# del
del self.references[key]
else:
if refs is None:
refs = self.references[key] = BTrees.OOBTree.OOTreeSet()
else:
refs.clear()
refs.update(vals)
def get(self, obj):
key = zope.app.keyreference.interfaces.IKeyReference(obj)
refs = self.references.get(key, ())
for kr in refs:
yield kr()
def registerShortcut(shortcut, event):
"""Subscriber to add an extrinsic reference."""
registry = zope.component.queryUtility(IExtrinsicReferences, 'shortcuts')
if registry is not None:
# We use raw_target because we don't want a proxy.
registry.add(shortcut.raw_target, shortcut)
def unregisterShortcut(shortcut, event):
"""Subscriber to remove an extrinsic reference."""
registry = zope.component.queryUtility(IExtrinsicReferences, 'shortcuts')
if registry is not None:
# We use raw_target because we don't want a proxy.
registry.discard(shortcut.raw_target, shortcut) | zc.extrinsicreference | /zc.extrinsicreference-0.3.0.tar.gz/zc.extrinsicreference-0.3.0/src/zc/extrinsicreference/__init__.py | __init__.py |
=======
Changes
=======
2.0 (2023-02-06)
----------------
- Add support for Python 3.8, 3.9, 3.10, 3.11.
- Drop support for Python 2.7, 3.5, 3.6.
1.1 (2019-02-11)
----------------
- Fix ZCML configuration issue if the ``[mruwidget]`` extra was not installed.
1.0 (2019-01-11)
----------------
Features
++++++++
- Claim support for Python 3.5, 3.6, 3.7, PyPy and PyPy3.
Bugfixes
++++++++
- Fix a ``NameError`` in ``BaseVocabularyDisplay.render()``.
- Actually pass a ``missing_value`` set on the ``Combination`` field to the
containing fields.
Caveats
+++++++
- Installation of ``MruSourceInputWidget`` and ``TimeZoneWidget`` requires the
``[mruwidget]`` extra to break dependency on ``zc.resourcelibrary`` for
projects which do not need it.
0.5 (2016-08-02)
----------------
- Bind fields that are contained in a ``zc.form.field.Combination`` to fix the
``context`` of those fields.
0.4 (2016-01-12)
----------------
- Get rid of the `zope.app.pagetemplate` dependency.
0.3 (2014-04-23)
----------------
- Remove requirement, that ``zc.form.field.Combination`` needs at least
two subfields.
0.2 (2011-09-24)
----------------
- Got rid of ``zope.app.form`` dependency by requiring at least
``zope.formlib`` 4.0.
- Got rid of ``zope.app.component`` dependency by requiring at least
``zope.component`` 3.8.
- Depending on ``zope.catalog`` instead of ``zope.app.catalog``.
- Depending on ``zope.security`` instead of ``zope.app.security``.
- Depending on ``zope.app.wsgi`` >=3.7 instead of ``zope.app.testing`` for
test setup.
- Depending on ``zope.browserpage`` and ``zope.container`` instead of
``zope.app.publisher``.
- Got rid of the following dependencies:
- ``zope.app.basicskin``
- ``zope.app.securitypolicy``
- ``zope.app.zapi``
- ``zope.app.zcmlfiles``
- Fixed tests to run with ``zope.schema`` >= 3.6.
- Made package fit to run on ZTK 1.1.
- Moved test dependencies to `test` extra.
- Using Python's ``doctest`` module instead of deprecated
``zope.testing.doctest``.
0.1
---
- Exception views are now unicode aware. They used to break on translated
content.
- Added use_default_for_not_selected to Union field to use default
value even if sub field is not selected.
| zc.form | /zc.form-2.0.tar.gz/zc.form-2.0/CHANGES.rst | CHANGES.rst |
import zope.catalog.interfaces
import zope.index.text.parsetree
import zope.index.text.queryparser
from zope import component
from zope import i18n
from zope import interface
from zope import schema
from zope.interface.exceptions import DoesNotImplement
from zope.schema.interfaces import IField
from zope.schema.interfaces import ValidationError
from zope.schema.interfaces import WrongType
from zc.form import interfaces
from zc.form.i18n import _
_no_unioned_field_validates = _(
"No unioned field validates ${value}.")
_range_less_error = _("${minimum} must be less than ${maximum}.")
_range_less_equal_error = _(
"${minimum} must be less than or equal to ${maximum}.")
_combination_wrong_size_error = _("The value has the wrong number of members")
_combination_not_a_sequence_error = _("The value is not a sequence")
_bad_query = _("Invalid query.")
# Union field that accepts other fields...
class MessageValidationError(ValidationError):
"""ValidationError that takes a message"""
def __init__(self, message, mapping=None):
if mapping is not None:
self.message = i18n.Message(message, mapping=mapping)
else:
self.message = message
self.args = (message, mapping)
def doc(self):
return self.message
@interface.implementer(interfaces.IExtendedField)
class BaseField(schema.Field):
"""Field with a callable as default and a tuple of constraints.
>>> def secure_password(field, value):
... if len(value) < 8:
... raise schema.ValidationError('Password too short.')
...
>>> class IDummy(interface.Interface):
... suggested_password = BaseField(
... title=u'Suggested Password',
... default_getter=lambda context: u'asdf',
... constraints=(secure_password, ))
...
>>> f = IDummy['suggested_password'].bind(None) # use None as context
>>> interfaces.IExtendedField.providedBy(f)
True
>>> f.__name__
'suggested_password'
>>> print(f.title)
Suggested Password
>>> print(f.default)
asdf
>>> f.validate(u'123456789')
>>> f.validate(u'asdf')
Traceback (most recent call last):
...
ValidationError: Password too short.
>>> class IDummy2(interface.Interface):
... invalid_default = BaseField(
... title=u'Field with invalid default',
... default=u'standard',
... default_getter=lambda context: u'get default')
Traceback (most recent call last):
...
TypeError: may not specify both a default and a default_getter
"""
constraints = ()
_default = default_getter = None
def __init__(self, constraints=(), default_getter=None, **kw):
self.constraints = constraints
if default_getter is not None and 'default' in kw:
raise TypeError(
'may not specify both a default and a default_getter')
super().__init__(**kw)
self.default_getter = default_getter
def _validate(self, value):
super()._validate(value)
if value != self.missing_value:
for constraint in self.constraints:
constraint(self, value)
@property
def default(self):
if self.default_getter is not None:
return self.default_getter(self.context)
else:
return self._default
@default.setter
def default(self, value):
assert self.default_getter is None
self._default = value
@interface.implementer(interfaces.IOptionField)
class Option(BaseField):
"""A field with one predefined value."""
def __init__(self, value=None, value_getter=None,
identity_comparison=False, **kw):
self.value = value
self.value_getter = value_getter
self.identity_comparison = identity_comparison
assert (value is None) ^ (value_getter is None)
assert not kw.get('required')
kw['required'] = False
super().__init__(**kw)
def _validate(self, value):
if value != self.missing_value:
if self.identity_comparison:
if self.getValue() is not value:
raise WrongType
elif self.getValue() != value:
raise WrongType
def getValue(self):
if self.value_getter is not None:
return self.value_getter(self.context)
else:
return self.value
@interface.implementer(interfaces.IUnionField)
class Union(BaseField):
"""Union field allows a schema field to hold one of many other field types.
For instance, you might want to make a field that can hold
a duration *or* a date, if you are working with a PIM app. Or perhaps
you want to have a field that can hold a string from a vocabulary *or* a
custom string. Both of these examples can be accomplished in a variety of
ways--the union field is one option.
The second example is more easily illustrated. Here is a union field that
is a simple way of allowing "write-ins" in addition to selections from a
choice. We'll be very explicit about imports, in part so this can be
trivially moved to a doc file test.
Notice as you look through the example that field order does matter: the
first field as entered in the field list that validates is chosen as the
validField; thus, even though the options in the Choice field would also
validate in a TextLine, the Choice field is identified as the "validField"
because it is first.
>>> class IDummy(interface.Interface):
... cartoon_character = Union((
... schema.Choice(
... title=u'Disney',
... description=u'Some tried-and-true Disney characters',
... values=(u'Goofy',u'Mickey',u'Donald',u'Minnie')),
... schema.TextLine(
... title=u'Write-in',
... description=u'Name your own!')),
... required=True,
... title=u'Cartoon Character',
... description=u'Your favorite cartoon character')
...
>>> f = IDummy['cartoon_character']
>>> interfaces.IUnionField.providedBy(f)
True
>>> f.__name__
'cartoon_character'
>>> isinstance(f.fields[0], schema.Choice)
True
>>> isinstance(f.fields[1], schema.TextLine)
True
>>> f.fields[0].__name__ != f.fields[1].__name__
True
>>> len(f.fields)
2
>>> print(f.title)
Cartoon Character
>>> f.validate(u'Goofy')
>>> f.validField(u'Goofy') is f.fields[0]
True
>>> f.validate(u'Calvin')
>>> f.validField(u'Calvin') is f.fields[1]
True
>>> f.validate(42)
Traceback (most recent call last):
...
MessageValidationError: (u'No unioned field validates ${value}.', {'value': 42})
That's a working example. Now lets close with a couple of examples that
should fall over.
You must union at least two fields:
>>> f = Union((schema.TextLine(title=u'Foo Text Line!'),), title=u'Foo')
Traceback (most recent call last):
...
ValueError: union must combine two or more fields
And, unsurprisingly, they must actually be fields:
>>> from zope.interface.exceptions import DoesNotImplement
>>> try:
... f = Union(('I am not a number.', 'I am a free man!'), title=u'Bar')
... except DoesNotImplement:
... print("Not a field")
...
Not a field
Binding a union field also takes care of binding the contained fields:
>>> context = object()
>>> bound_f = f.bind(context)
>>> bound_f.context is context
True
>>> bound_f.fields[0].context is context
True
>>> bound_f.fields[1].context is context
True
""" # noqa
fields = ()
use_default_for_not_selected = False
def __init__(self, fields, use_default_for_not_selected=False, **kw):
if len(fields) < 2:
raise ValueError(_("union must combine two or more fields"))
for ix, field in enumerate(fields):
if not IField.providedBy(field):
raise DoesNotImplement(IField)
field.__name__ = "unioned_%02d" % ix
self.fields = tuple(fields)
self.use_default_for_not_selected = use_default_for_not_selected
super().__init__(**kw)
def bind(self, object):
clone = super().bind(object)
# We need to bind the fields too, e.g. for Choice fields
clone.fields = tuple(field.bind(object) for field in clone.fields)
return clone
def validField(self, value):
"""Return first valid field, or None."""
for field in self.fields:
try:
field.validate(value)
except ValidationError:
pass
else:
return field
def _validate(self, value):
if self.validField(value) is None:
raise MessageValidationError(_no_unioned_field_validates,
{'value': value})
class OrderedCombinationConstraint:
def __init__(self, may_be_equal=True, decreasing=False):
self.may_be_equal = may_be_equal
self.decreasing = decreasing
def __call__(self, field, value):
# can assume that len(value) == len(field.fields)
last = None
for v, f in zip(value, field.fields):
if v != f.missing_value:
if last is not None:
if self.decreasing:
if self.may_be_equal:
if v > last:
raise MessageValidationError(
_range_less_equal_error,
{'minimum': v, 'maximum': last})
elif v >= last:
raise MessageValidationError(
_range_less_error,
{'minimum': v, 'maximum': last})
else:
if self.may_be_equal:
if v < last:
raise MessageValidationError(
_range_less_equal_error,
{'minimum': last, 'maximum': v})
elif v <= last:
raise MessageValidationError(
_range_less_error,
{'minimum': last, 'maximum': v})
last = v
@interface.implementer(interfaces.ICombinationField)
class Combination(BaseField):
"""a combination of two or more fields, all of which may be completed.
It accepts two or more fields. It also accepts a 'constraints' argument.
Unlike the usual 'constraint' argument (which is also available), the
constraints should be a sequence of callables that take a field and a
value, and they should raise an error if there is a problem.
>>> from zc.form.field import Combination, OrderedCombinationConstraint
>>> from zope import schema, interface
>>> class IDemo(interface.Interface):
... publication_range = Combination(
... (schema.Date(title=u'Begin', required=False),
... schema.Date(title=u'Expire', required=True)),
... title=u'Publication Range',
... required=True,
... constraints=(OrderedCombinationConstraint(),))
...
>>> f = IDemo['publication_range']
>>> interfaces.ICombinationField.providedBy(f)
True
>>> f.__name__
'publication_range'
>>> isinstance(f.fields[0], schema.Date)
True
>>> isinstance(f.fields[1], schema.Date)
True
>>> print(f.title)
Publication Range
>>> print(f.fields[0].title)
Begin
>>> print(f.fields[1].title)
Expire
>>> import datetime
>>> f.validate((datetime.date(2005, 6, 22), datetime.date(2005, 7, 10)))
>>> f.validate((None, datetime.date(2005, 7, 10)))
>>> f.validate((datetime.date(2005, 6, 22), None))
Traceback (most recent call last):
...
RequiredMissing: combination_01
>>> f.validate(('foo', datetime.date(2005, 6, 22)))
Traceback (most recent call last):
...
WrongType: ('foo', <type 'datetime.date'>, 'combination_00')
>>> f.validate('foo') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
MessageValidationError: (u'The value has the wrong number of members', ...)
>>> f.validate(17)
Traceback (most recent call last):
...
MessageValidationError: (u'The value is not a sequence', None)
>>> f.validate((datetime.date(2005, 6, 22), datetime.date(1995, 7, 10)))
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
MessageValidationError: (u'${minimum} must be less than or equal to ...
Binding a Combination field also takes care of binding contained fields:
>>> context = object()
>>> bound_f = f.bind(context)
>>> bound_f.context is context
True
>>> bound_f.fields[0].context is context
True
>>> bound_f.fields[1].context is context
True
Each entry in the combination has to be a schema field
>>> class IDemo2(interface.Interface):
... invalid_field = Combination(
... (schema.Date(title=u'Begin', required=False),
... dict(title=u'Expire', required=True)),
... title=u'Invalid field')
Traceback (most recent call last):
...
DoesNotImplement: An object does not implement interface...
"""
fields = constraints = ()
def __init__(self, fields, **kw):
for ix, field in enumerate(fields):
if not IField.providedBy(field):
raise DoesNotImplement(IField)
field.__name__ = "combination_%02d" % ix
self.fields = tuple(fields)
super().__init__(**kw)
def _validate(self, value):
if value != self.missing_value:
try:
len_value = len(value)
except (TypeError, AttributeError):
raise MessageValidationError(
_combination_not_a_sequence_error)
if len_value != len(self.fields):
raise MessageValidationError(
_combination_wrong_size_error)
for v, f in zip(value, self.fields):
f = f.bind(self.context)
f.validate(v)
super()._validate(value)
def bind(self, object):
clone = super().bind(object)
# We need to bind the fields too, e.g. for Choice fields
clone.fields = tuple(field.bind(object) for field in clone.fields)
return clone
class QueryTextLineConstraint(BaseField, schema.TextLine):
def __init__(self, index_getter=None, catalog_name=None, index_name=None):
assert not ((catalog_name is None) ^ (index_name is None))
assert (index_getter is None) ^ (catalog_name is None)
self.catalog_name = catalog_name
self.index_name = index_name
self.index_getter = index_getter
def __call__(self, field, value):
if self.index_getter is not None:
index = self.index_getter(self.context)
else:
catalog = component.getUtility(
zope.catalog.interfaces.ICatalog,
self.catalog_name,
field.context)
index = catalog[self.index_name]
parser = zope.index.text.queryparser.QueryParser(index.lexicon)
try:
parser.parseQuery(value)
except zope.index.text.parsetree.ParseError:
raise MessageValidationError(_bad_query)
class TextLine(BaseField, schema.TextLine):
"""An extended TextLine.
>>> from zope.index.text.textindex import TextIndex
>>> index = TextIndex()
>>> class IDemo(interface.Interface):
... query = TextLine(
... constraints=(
... QueryTextLineConstraint(
... lambda context: index),),
... title=u"Text Query")
...
>>> field = IDemo['query'].bind(None) # using None as context
>>> field.validate(u'cow')
>>> field.validate(u'cow and dog')
>>> field.validate(u'a the') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
MessageValidationError: (u'Invalid query.', None)
>>> field.validate(u'and') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
MessageValidationError: (u'Invalid query.', None)
>>> field.validate(u'cow not dog')
>>> field.validate(u'cow not not dog') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
MessageValidationError: (u'Invalid query.', None)
>>> field.validate(u'cow and not dog')
>>> field.validate(u'cow -dog')
>>> field.validate(b'cow') # non-unicode fails, as usual with TextLine
Traceback (most recent call last):
...
WrongType: ('cow', <type 'unicode'>, 'query')
"""
@interface.implementer(interfaces.IHTMLSnippet)
class HTMLSnippet(BaseField, schema.Text):
"""Simple implementation for HTML snippet."""
@interface.implementer(interfaces.IHTMLDocument)
class HTMLDocument(BaseField, schema.Text):
"""Simple implementation for HTML document.""" | zc.form | /zc.form-2.0.tar.gz/zc.form-2.0/src/zc/form/field.py | field.py |
import datetime
import pytz
import zope.browser.interfaces
import zope.publisher.interfaces.browser
import zope.schema.interfaces
from zope import component
from zope import interface
from zope import schema
from zc.form.i18n import _
class IExtendedField(zope.schema.interfaces.IField):
constraints = schema.Tuple(
title=_("Constraints"),
description=_("""A tuple of callables taking the combination field
and the value, that should raise an exception if the values do not
match the constraint. Can assume that value is not the missing_value,
and has a len equal to the number of sub-fields."""))
def default_getter(context):
"""Return the default value."""
default = interface.Attribute(
"""if default_getter has been set, returns the result of that call;
otherwise returns whatever value has been set as the default.""")
class IOptionField(IExtendedField):
"""Field with excatly one predefined value
Caution: The value will not get displayed by the widget of this field.
"""
value = interface.Attribute(
"""the value for this field; one and only one of value and
value_getter must be non-None""")
value_getter = interface.Attribute(
"""a callable, taking a context, return the option's value; or None.
one and only one of value and value_getter must be non-None""")
identity_comparison = schema.Bool(
description=_("""Whether validation comparison should be identity
(as opposed to equality) based"""))
def getValue():
"""Return value for option field."""
class IUnionField(IExtendedField):
"""A field that may have one of many field types of values.
Order is important, in that the first field from left to right that
validates a value is considered to be the "active" field.
"""
fields = schema.Tuple(
title=_("Composite Fields"),
description=_("""\
The possible schema fields that may describe the data"""))
use_default_for_not_selected = schema.Bool(
description=_(
"""When displaying the Union field in the browser the fields
which are not selected will have no value (i. e. the field's
missing_value.
With this attribute set the default value of the field will
be displayed instead.
Default: False"""))
def validField(value):
"""Return first valid field for the given value, or None"""
class ICombinationField(IExtendedField):
"""A field that describes a combination of two or more fields"""
fields = schema.Tuple(
title=_("Composite Fields"),
description=_("""\
The schema fields that may describe the data"""))
class IExtendedTextLineField(IExtendedField, zope.schema.interfaces.ITextLine):
"""TextLine field extended with IExtendedField capabilities"""
@interface.implementer(zope.schema.interfaces.ISource)
class AvailableTimeZones:
def __contains__(self, value):
return isinstance(value, datetime.tzinfo)
@interface.implementer(zope.schema.interfaces.ITitledTokenizedTerm)
class Term:
def __init__(self, title, token):
self.title = title
self.token = token
@component.adapter(AvailableTimeZones,
zope.publisher.interfaces.browser.IBrowserRequest)
@interface.implementer(zope.browser.interfaces.ITerms)
class TimeZoneTerms:
"""Term and value support needed by query widgets."""
def __init__(self, source, request):
self.request = request
def getTerm(self, value):
token = value.zone
title = token.replace('_', ' ')
return Term(title, token)
def getValue(self, token):
return pytz.timezone(token)
class IHTMLSnippet(zope.schema.interfaces.IText):
"""HTML excerpt that can be placed within an HTML document's body element.
Snippet should have no dangling open tags.
XHTML preferred; field may have version attribute in future.
"""
class IHTMLDocument(zope.schema.interfaces.IText):
"""HTML Document.
XHTML preferred; field may have version attribute in future.
""" | zc.form | /zc.form-2.0.tar.gz/zc.form-2.0/src/zc/form/interfaces.py | interfaces.py |
====
Todo
====
Tests::
<content title= schema= fields= omit_fields= schemaprefix=
>
<require permission= attributes= interfaces=>
<allow attributes= interfaces=>
<add ...>
<buttons ...>
<button showDisabled= description=...>
</buttons>
<require ...>
<allow ...>
<macrotemplate name= source=>
<menuItem .../>
<widget .../>
</add>
<edit degradeInput= degradeDisplay= displayFields=>
...
<schema prefix= source= fields= >
<widget .../>
</schema>
</edit>
<display degradeDisplay=>
</display>
<form degradeInput= degradeDisplay= displayFields= editFields=>
...
</form>
<menuItem ...>
</content>
* Be able to specify some fields as input and others as display would be nice
* remove extra_script
* condition to show the form (and menu item) (e.g. condition="not:context/finalized_date")
| zc.form | /zc.form-2.0.tar.gz/zc.form-2.0/src/zc/form/TODO.rst | TODO.rst |
"""Combination widget"""
import zope.cachedescriptors.property
import zope.schema.interfaces
from zope import component
from zope.browserpage import ViewPageTemplateFile
from zope.formlib import namedtemplate
from zope.formlib.interfaces import IDisplayWidget
from zope.formlib.interfaces import IInputWidget
from zope.formlib.interfaces import WidgetInputError
from zc.form.browser.widgetapi import BaseWidget
class CombinationWidget(BaseWidget):
widget_interface = IInputWidget
@zope.cachedescriptors.property.Lazy
def widgets(self):
field = self.context
res = []
for f in field.fields:
f = f.bind(self.context)
w = component.getMultiAdapter((f, self.request,),
self.widget_interface)
w.setPrefix(self.name + ".")
res.append(w)
return res
def setPrefix(self, prefix):
super().setPrefix(prefix)
for w in self.widgets:
w.setPrefix(self.name + ".")
def loadValueFromRequest(self):
# the lack of an API to get the input value regardless of validation
# is a significant problem. The inability to clear errors is a
# problem.
field = self.context
missing_value = field.missing_value
widgets = self.widgets
required_errors = []
errors = []
values = []
any = False
for w in widgets:
try:
val = w.getInputValue()
except WidgetInputError as e:
if isinstance(getattr(e, 'errors'),
zope.schema.interfaces.RequiredMissing): # :-(
required_errors.append((w, e))
else:
errors.append((w, e))
val = w.context.missing_value
values.append(val)
any = any or val != w.context.missing_value
if field.required or any or errors:
errors.extend(required_errors)
else: # remove the required errors in the sub widgets
for w, e in required_errors:
w.error = lambda: None # :-(
if errors:
if len(errors) == 1:
errors = errors[0][1]
else:
errors = [e for widget, e in errors]
self._error = WidgetInputError(
self.context.__name__,
self.label, errors)
values = missing_value
elif not any:
values = missing_value
else:
values = tuple(values)
return values
template = namedtemplate.NamedTemplate('default')
def render(self, value):
field = self.context
missing_value = field.missing_value
if value is not missing_value:
try:
len_value = len(value)
except (TypeError, AttributeError):
value = missing_value
self._set_values_on_widgets(value)
else:
if len_value != len(field.fields):
value = missing_value
self._set_values_on_widgets(value)
if value is not missing_value:
self._set_values_on_widgets(value)
for w in self.widgets: # XXX quick hack.
if zope.schema.interfaces.IBool.providedBy(w.context):
w.invert_label = True
else:
w.invert_label = False
return self.template()
def _set_values_on_widgets(self, values):
hasInput = self.hasInput()
for w, v in zip(self.widgets, values):
if not hasInput or v != w.context.missing_value:
w.setRenderedValue(v)
default_template = namedtemplate.NamedTemplateImplementation(
ViewPageTemplateFile('combinationwidget.pt'), CombinationWidget)
class CombinationDisplayWidget(CombinationWidget):
widget_interface = IDisplayWidget | zc.form | /zc.form-2.0.tar.gz/zc.form-2.0/src/zc/form/browser/combinationwidget.py | combinationwidget.py |
"""Union widget"""
from zope import component
from zope.browserpage import ViewPageTemplateFile
from zope.formlib import namedtemplate
from zope.formlib.interfaces import IInputWidget
from zope.formlib.interfaces import WidgetInputError
import zc.form.interfaces
from zc.form.i18n import _
from .widgetapi import BaseWidget
class CompositeOptionWidget(BaseWidget):
def __call__(self):
return None
class NotChosenWidget:
error = name = None
required = False
def __init__(self, label, hint):
self.label = label
self.hint = hint
def __call__(self):
return None
class UnionWidget(BaseWidget):
_field_index = None
no_value_label = _('union_field_label-no_value', "Not specified")
no_value_hint = _('union_field_hint-no_value', '')
def loadValueFromRequest(self):
field = self.context
missing_value = field.missing_value
value = self.request.form.get(self.name)
try:
value = int(value)
except (TypeError, ValueError):
value = missing_value
else:
if value >= len(field.fields):
value = missing_value
else:
self._field_index = value
# value should be an int index of the active field
active = field.fields[value].bind(self.context)
if zc.form.interfaces.IOptionField.providedBy(active):
return active.getValue()
widget = component.getMultiAdapter(
(active, self.request), IInputWidget)
widget.required = widget.context.required = self.required
widget.setPrefix(self.name)
try:
return widget.getInputValue()
except WidgetInputError as e:
# recast with our name and title
self._error = WidgetInputError(
self.context.__name__,
self.label,
e.errors)
return missing_value
template = namedtemplate.NamedTemplate('default')
def render(self, value):
# choices = ({selected, identifier, widget},)
# widget may be None, name may be None.
field = self.context
missing_value = field.missing_value
choices = []
field_index = self._field_index
if field_index is not None:
chosen_field = field.fields[self._field_index]
elif value is not missing_value:
chosen_field = field.validField(value)
else:
chosen_field = None
for ix, inner_field in enumerate(field.fields):
selected = inner_field is chosen_field
inner = inner_field.bind(field.context)
identifier = "%s-%02d" % (self.name, ix)
if zc.form.interfaces.IOptionField.providedBy(inner):
widget = CompositeOptionWidget(inner, self.request)
else:
widget = component.getMultiAdapter(
(inner, self.request), IInputWidget)
if selected:
widget.setRenderedValue(value)
elif self._renderedValueSet():
if field.use_default_for_not_selected:
widget.setRenderedValue(inner.default)
else:
widget.setRenderedValue(inner.missing_value)
widget.setPrefix(self.name)
choices.append(
{'selected': selected, 'identifier': identifier,
'widget': widget, 'value': str(ix)})
if not field.required:
ix += 1
selected = chosen_field is None
identifier = "%s-%02d" % (self.name, ix)
widget = NotChosenWidget(self.no_value_label, self.no_value_hint)
choices.append(
{'selected': selected, 'identifier': identifier,
'widget': widget, 'value': str(ix)})
return self.template(choices=choices)
default_template = namedtemplate.NamedTemplateImplementation(
ViewPageTemplateFile('unionwidget.pt'), UnionWidget) | zc.form | /zc.form-2.0.tar.gz/zc.form-2.0/src/zc/form/browser/unionwidget.py | unionwidget.py |
"""source input widget with most recently used (MRU) value support"""
import html
import persistent.list
import zc.resourcelibrary
import zope.annotation.interfaces
import zope.browser.interfaces
import zope.component
import zope.formlib.interfaces
from BTrees import OOBTree
from zope.formlib.source import SourceInputWidget
class MruSourceInputWidget(SourceInputWidget):
ANNOTATION_KEY = 'zc.form.browser.mruwidget'
def hasInput(self):
return self.name + '.displayed' in self.request
def getMostRecentlyUsedTokens(self):
"""Get a sequence of the most recently used tokens (most recent first).
"""
key = self.name # TODO should the key be more specific?
principal = self.request.principal
annotations = zope.annotation.interfaces.IAnnotations(principal)
annotation = annotations.get(self.ANNOTATION_KEY)
if annotation is None:
annotations[self.ANNOTATION_KEY] = annotation = OOBTree.OOBTree()
tokens = annotation.get(key)
if tokens is None:
tokens = annotation[key] = persistent.list.PersistentList()
return tokens
def getMostRecentlyUsedTerms(self):
"""Get a sequence of the most recently used terms (most recent first).
"""
tokens = self.getMostRecentlyUsedTokens()
terms = zope.component.getMultiAdapter(
(self.source, self.request), zope.browser.interfaces.ITerms)
mru = []
for token in tokens:
try:
value = terms.getValue(token)
term = terms.getTerm(value)
except LookupError:
continue
mru.append(term)
return mru
def addMostRecentlyUsedTerm(self, term):
"""Add a term to the list of MRU values.
"""
tokens = self.getMostRecentlyUsedTokens()
try:
tokens.remove(term.token)
except ValueError:
pass
tokens.insert(0, term.token)
del tokens[10:] # TODO should this constant be configurable?
def queryViewApplied(self):
"""Determine if a query view was used to set the value of the field.
"""
for name, queryview in self.queryviews:
if name + '.apply' in self.request:
return True
def __call__(self):
zc.resourcelibrary.need('zc.form.mruwidget')
result = ['<div class="value">']
value = self._value()
field = self.context
term = None
if value != field.missing_value:
try:
term = self.terms.getTerm(value)
except LookupError:
pass # let the "missing" term value from above be used
else:
self.addMostRecentlyUsedTerm(term)
mru_terms = self.getMostRecentlyUsedTerms()
queries_id = self.name + '.queries'
# should the query views be visible?
if (self.request.form.get(queries_id + '.visible') == 'yes'
and not self.queryViewApplied()) or not mru_terms:
queries_style = ''
queries_visible = 'yes'
else:
queries_style = 'display: none;'
queries_visible = 'no'
result.append('<input type="hidden" name="%s.visible" '
'id="%s.visible" value="%s">'
% (queries_id, queries_id, queries_visible))
if mru_terms:
result.append('<select name="%s" id="%s">' %
(self.name, self.name))
for mru_term in mru_terms:
if term is not None and mru_term.token == term.token:
selected = ' selected="selected"'
else:
selected = ''
result.append(' <option value="%s"%s>%s</option>'
% (html.escape(mru_term.token),
selected,
html.escape(mru_term.title)))
result.append('</select>')
result.append(' <input type="button" '
'name="%s.mru_expand_button" ' % self.name +
'onclick="javascript:'
'zc_mruwidget_toggleQueriesDisplay(\'%s\')"'
% queries_id + ' value="...">')
result.append(' <input type="hidden" name="%s.displayed" value="y">'
% self.name)
result.append(' <div class="queries" id="%s" style="%s">'
% (queries_id, queries_style))
for name, queryview in self.queryviews:
result.append(' <div class="query">')
result.append(' <div class="queryinput">')
result.append(queryview.render(name))
result.append(' </div> <!-- queryinput -->')
qresults = queryview.results(name)
if qresults:
result.append(' <div class="queryresults">\n%s' %
self._renderResults(qresults, name))
result.append(' </div> <!-- queryresults -->')
result.append(' </div> <!-- query -->')
result.append(' </div> <!-- queries -->')
result.append('</div> <!-- value -->')
return '\n'.join(result) | zc.form | /zc.form-2.0.tar.gz/zc.form-2.0/src/zc/form/browser/mruwidget.py | mruwidget.py |
import pytz
# XXX argh.
import pytz.tzinfo
import zope.browserpage
import zope.formlib.interfaces
import zope.interface.common.idatetime
import zope.publisher.interfaces.browser
from zope import component
from zope import interface
from zope.interface.common.idatetime import ITZInfo
from zope.security.checker import NamesChecker
import zc.form.browser.mruwidget
import zc.form.interfaces
ALL_TIMEZONES = frozenset(pytz.all_timezones)
names = set(ITZInfo.names(all=True))
names.add('zone')
names.add('localize')
checker = NamesChecker(names)
pytz.UTC.__Security_checker__ = checker
pytz.tzinfo.BaseTzInfo.__Security_checker__ = checker
# end argh.
class TimeZoneWidget(zc.form.browser.mruwidget.MruSourceInputWidget):
def getMostRecentlyUsedTerms(self):
mru = super().getMostRecentlyUsedTerms()
# add ones from locale
territory = self.request.locale.id.territory
if territory:
try:
choices = pytz.country_timezones(territory)
except KeyError:
pass
else:
already = {term.token for term in mru}
additional = sorted(t for t in choices if t not in already)
mru.extend(zc.form.interfaces.Term(t.replace('_', ' '), t)
for t in additional)
return mru
@component.adapter(zc.form.interfaces.AvailableTimeZones,
zope.publisher.interfaces.browser.IBrowserRequest)
@interface.implementer(zope.formlib.interfaces.ISourceQueryView)
class TimeZoneQueryView:
def __init__(self, source, request):
self.context = source
self.request = request
_render = zope.browserpage.ViewPageTemplateFile('timezone_queryview.pt')
def render(self, name):
return self._render(field_name=name + '.searchstring',
button_name=name + '.search')
def results(self, name):
if not (name + '.search' in self.request):
return None
searchstring = self.request[name + '.searchstring'].lower()
timezones = []
searchstring = searchstring.strip().lower().replace(' ', '_') # regex
for tz in ALL_TIMEZONES:
if searchstring in tz.lower():
timezones.append(pytz.timezone(tz))
return timezones | zc.form | /zc.form-2.0.tar.gz/zc.form-2.0/src/zc/form/browser/tzwidget.py | tzwidget.py |
from zope import interface
class ISchemaView(interface.Interface):
"the basic schema view interface suitable for extension and for display"
extra = interface.Attribute(
"""A dictionary to store values desired by a given schema view.
The add view is the only standard interface that explicitly declares a
use for this dictionary.""")
buttons = interface.Attribute(
"""A dictionary of a list of dictionaries,
representing the available buttons.
The top level dictionary has keys of unicode names representing
button categories. The default category defined in the buttons
subdirective, for example, is u'main'. The values are lists of
dictionaries. Each dictionary represents a button within the category
and contains the keys 'label,', which should be used to draw the
button; 'id,' which should be used as the name of the submit button
and typically as the id of the widget on the form (for DOM
getElementById); 'action,' the name of the method on this class to
which __call__ should dispatch if the id is activated; 'css_class,'
the name of the css_class that the button should be assigned;
'category', the name of the containing category, as described above;
and 'active,' a boolean indicating if the action is currently
available for this context (user, condition, etc.). If a button is
not active, the id and action are empty strings. Button dictionaries
may contain additional keys and values as form subclasses desire.
Typically, the button_views method is used to render buttons, but
the buttons property provides a more raw presentation of the
information.
All values are essentially advisory except for 'action' and 'id'. 'id'
is typically auto-generated in such a manner as to minimize the
likelihood of conflicting with other names on the form.
""")
label = interface.Attribute(
"""The suggested label for the form, as might be rendered in a header
at the top of the page.""")
fieldNames = interface.Attribute(
"list of field names in suggested order for the form")
active_button = interface.Attribute(
"""after the form machinery has determined the button pressed, if any,
and before the button action has been dispatched, this holds the button
dictionary, as decribed in the buttons attribute above, of the button
that was submitted.""")
update_status = interface.Attribute(
"""The status message, as translated from the status returned from
the dispatched action.""")
schema = interface.Attribute(
"""The schema that is the source of this form.""")
errors = interface.Attribute(
"""a tuple of the error objects generated by the dispatched action
for each field.""")
invariant_errors = interface.Attribute(
"""a tuple of the error objects generated by the dispatched action
for each invariant rule.""")
widgets = interface.Attribute(
"""After setUpWidgets is called, this contains a collection of widgets
with a mapping interface. Iterating over it is in the order of the
view's fieldNames. Widgets can be obtained by field name using
__getitem__ or get.
""")
def setUpWidgets():
"""the call to set up widgets for the view. After this call, the
widgets attribute is in place. Usually called in view
initialization"""
def setPrefix(prefix):
"Set the desired form prefix on all widgets."
def invariant_error_views(self):
"""return a view of each error object in the invariant_errors
attribute, as found for the 'snippet' name."""
def error_views():
"""return a view of each error object in the errors
attribute, as found for the 'snippet' name."""
def button_views(category=None, show_disabled=False):
"""return an iterator of standard html submit buttons for each button
definition. If category is None (default), all buttons in all
categories are rendered; otherwise the value is used to look up and
only render buttons in the specified category. A KeyError is raised
if the category does not exist. By default, disabled buttons
(buttons that are not active) are not rendered; if show_disabled is
True, these are rendered as disabled buttons.
"""
def beforeRender(self):
"""hook for performing things before rendering. Note this is called
whether or not the form is actually rendered: check self.halt_rendering
if you need to perform different behavior in this circumstance."""
def __call__(template_usage='', *args, **kw):
"""a dispatcher to button actions and renderer of the form.
__call__ searches the request for each of the ids in the buttons
attribute. As soon as it finds one, it dispatches to the associated
action, assuming there will be only one action per submission. The
active button is stored as active_button, and then the beforeRender
hook is called. if halt_rendering has been set to a Boolean True
value, rendering is stopped and an empty unicode string is returned.
Otherwise, the return value of the dispatch is (translated to be) the
update_status, and the form is rendered with the passed arguments.
"""
def validateInvariants(obj):
"""Given an object, validates the schema invariants and returns a
list of any generated errors."""
def getPseudoObject(data):
"""returns an object that claims to implement the form's schema and
has the data supplied stored on the object. A helper for validating
invariants."""
halt_rendering = interface.Attribute(
"""if halt_rendering is set to True by a dispatched action in
__call__, the form should not render.""")
label_msgid = interface.Attribute(
"""the message id of the label that should typically be rendered at
the top of this form.""")
label_title = interface.Attribute(
"""substituted into label_msgid if the message id translation
includes a "title" substitution slot (a la "Add ${title}").""")
raw_buttons = interface.Attribute(
"""the default source of the buttons attribute. It is a dictionary of
button group id to sequence of dicts, in which each dict has keys id,
label, action, permission, condition, and css_class. action is the
name of a method on the (composite, with optional bases provided in
zcml) view class; the value returned is set as the update_status. The
user must have the provided permission, if any, and the condition must
pass, in order for the action to be available as a button. The
condition is a TALES expression with 'context', 'adapted', 'nothing',
'request', and 'modules' available.""")
# form.pt in this product uses this hook. This functionality is
# exercised by forms in other products.
extra_script = interface.Attribute(
"""a hook point for schema views to suggest arbitrary
javascript to be rendered in the form.""")
class IEditView(ISchemaView):
"the basic edit view interface"
def attempt_commit():
"""The standard action method for an edit view's submit button: will
validate the entered data and, if it passes, make the changes.
"""
def commit_changes():
"""If no errors are found in attempt_commit, commit_changes is called
to actually edit the object. Override to change what "edit the object"
means.
"""
def changed():
"""A hook. Override to execute logic *after* changes have been made
successfully."""
class IAddView(ISchemaView):
"the basic add view interface"
extra = interface.Attribute(
"""The standard extra-value dictionary described in ISchemaView must
have "content_factory" as a key to the factory (such as a class) that
instantiates the object. It must also have one or more of the keys
"arguments", "keyword_arguments", "set_before_add", "set_after_add"
with names of arguments to be used for creation.
""")
def attempt_commit():
"""The standard action method for an add view's submit button: will
validate the entered data and, if it passes, create and add the object.
"""
def perform_cancel():
"""An action to cancel the add and redirect to the add's container or
to the previous add form for nested adds.
"""
def abstract_attempt_commit():
"""The guts of attempting a commit; does everything except redirect, so
you can override attempt_commit and redirect as desired."""
def createAndAdd(data):
"""Add the desired object using the data in the data argument.
The data argument is a dictionary with the data entered in the form.
returns created object.
"""
def create(*args, **kw):
"actually instantiates object and returns it"
def add(content):
"adds the content; typically delegates to the adding."
def changed():
"""A hook. Override to execute logic *after* an object has been
created and added successfully."""
def nextURL():
"returns the next URL; usually defers to the adding." | zc.form | /zc.form-2.0.tar.gz/zc.form-2.0/src/zc/form/browser/interfaces.py | interfaces.py |
===================
CombinationWidget
===================
The combinationwidget collects two or more subfields to provide a convenient
way to specify a sequence of values.
Rendering the widget returns a table with the subfields::
>>> from zc.form.browser.combinationwidget import (
... CombinationWidget, CombinationDisplayWidget, default_template)
>>> from zope import component, interface
>>> component.provideAdapter(default_template, name='default')
>>> from zc.form.field import Combination, OrderedCombinationConstraint
>>> from zope.schema import Int
>>> from zope.schema.interfaces import IInt
>>> from zope.publisher.interfaces.browser import IBrowserRequest
>>> from zope.formlib.interfaces import IInputWidget
>>> from zope.formlib.textwidgets import IntWidget
>>> component.provideAdapter(
... IntWidget, (IInt, IBrowserRequest), IInputWidget)
>>> from zope import interface
>>> class IDemo(interface.Interface):
... acceptable_count = Combination(
... (Int(title=u'Minimum', required=True, min=0),
... Int(title=u'Maximum', required=False)),
... title=u'Acceptable Count',
... required=False,
... constraints=(OrderedCombinationConstraint(),))
...
>>> from zope.publisher.browser import TestRequest
>>> request = TestRequest()
>>> widget = CombinationWidget(IDemo['acceptable_count'], request)
>>> widget.setPrefix('field')
>>> widget.loadValueFromRequest() # None
>>> print(widget())
<input type='hidden' name='field.acceptable_count-marker' value='x' />
<table class="combinationFieldWidget">
<tr>
<td class="label">
<label for="field.acceptable_count.combination_00">
<span class="required">*</span><span>Minimum</span>
</label>
</td>
<td class="field">
<div class="widget"><input class="textType"
id="field.acceptable_count.combination_00"
name="field.acceptable_count.combination_00" size="10" type="text"
value="" />
</div>
</td>
</tr>
<tr>
<td class="label">
<label for="field.acceptable_count.combination_01">
<span>Maximum</span>
</label>
</td>
<td class="field">
<div class="widget"><input class="textType"
id="field.acceptable_count.combination_01"
name="field.acceptable_count.combination_01" size="10" type="text"
value="" />
</div>
</td>
</tr>
</table>
Setting the appropriate values in the Request lets the widget correctly read
the specified value::
>>> request.form['field.acceptable_count-marker'] = 'x'
>>> request.form['field.acceptable_count.combination_00'] = '10'
>>> request.form['field.acceptable_count.combination_01'] = ''
>>> widget = CombinationWidget(IDemo['acceptable_count'], request)
>>> widget.setPrefix('field')
>>> widget.getInputValue()
(10, None)
>>> print(widget())
<...
...<input class="textType" id="field.acceptable_count.combination_00"
name="field.acceptable_count.combination_00" size="10" type="text"
value="10" />...
...<input class="textType" id="field.acceptable_count.combination_01"
name="field.acceptable_count.combination_01" size="10" type="text"
value="" />...
The field is fine with empty values, because it is not required::
>>> request.form['field.acceptable_count-marker'] = 'x'
>>> request.form['field.acceptable_count.combination_00'] = ''
>>> request.form['field.acceptable_count.combination_01'] = ''
>>> widget = CombinationWidget(IDemo['acceptable_count'], request)
>>> widget.setPrefix('field')
>>> widget.getInputValue() # None
>>> print(widget())
<...
...<input class="textType" id="field.acceptable_count.combination_00"
name="field.acceptable_count.combination_00" size="10" type="text"
value="" />...
...<input class="textType" id="field.acceptable_count.combination_01"
name="field.acceptable_count.combination_01" size="10" type="text"
value="" />...
>>> bool(widget.error())
False
>>> bool(widget.widgets[0].error())
False
If the optional value is filled in and the required one is not, though, there
are errors::
>>> request.form['field.acceptable_count-marker'] = 'x'
>>> request.form['field.acceptable_count.combination_00'] = ''
>>> request.form['field.acceptable_count.combination_01'] = '10'
>>> widget = CombinationWidget(IDemo['acceptable_count'], request)
>>> widget.setPrefix('field')
>>> widget.getInputValue()
Traceback (most recent call last):
WidgetInputError: ('acceptable_count', u'Acceptable Count',
WidgetInputError('combination_00', u'Minimum',
RequiredMissing('combination_00')))
>>> import zope.formlib.interfaces
>>> import zope.publisher.interfaces.browser
>>> @interface.implementer(zope.formlib.interfaces.IWidgetInputErrorView)
... @component.adapter(zope.formlib.interfaces.WidgetInputError,
... zope.publisher.interfaces.browser.IBrowserRequest)
... class SnippetView(object):
...
... def __init__(self, context, request):
... self.context = context
... self.request = request
... def snippet(self):
... return self.context.doc()
...
>>> component.provideAdapter(SnippetView)
>>> print(widget())
<...
...<input class="textType" id="field.acceptable_count.combination_00"
name="field.acceptable_count.combination_00" size="10"
type="text" value="" />...
...Required input is missing...
...<input class="textType" id="field.acceptable_count.combination_01"
name="field.acceptable_count.combination_01" size="10"
type="text" value="10" />...
>>> print(widget.error())
Required input is missing.
>>> print(widget.widgets[0].error())
Required input is missing.
Similarly, if the field's constraints are not met, the widget shows errors::
>>> request.form['field.acceptable_count-marker'] = 'x'
>>> request.form['field.acceptable_count.combination_00'] = '20'
>>> request.form['field.acceptable_count.combination_01'] = '10'
>>> widget = CombinationWidget(IDemo['acceptable_count'], request)
>>> widget.setPrefix('field')
>>> widget.getInputValue()
Traceback (most recent call last):
WidgetInputError: ('acceptable_count', u'Acceptable Count',
MessageValidationError(u'${minimum} ...
>>> print(widget())
<...
...input class="textType" id="field.acceptable_count.combination_00"
name="field.acceptable_count.combination_00" size="10"
type="text" value="20" />...
...<input class="textType" id="field.acceptable_count.combination_01"
name="field.acceptable_count.combination_01" size="10"
type="text" value="10" />...
>>> print(widget.error())
${minimum} must be less than or equal to ${maximum}.
There's also a display version of the widget::
>>> request = TestRequest()
>>> from zope.formlib.widget import DisplayWidget
>>> from zope.formlib.interfaces import IDisplayWidget
>>> component.provideAdapter(
... DisplayWidget, (IInt, IBrowserRequest), IDisplayWidget)
>>> widget = CombinationDisplayWidget(IDemo['acceptable_count'], request)
>>> widget.setPrefix('field')
>>> widget.setRenderedValue(('10', '2'))
>>> print(widget())
<input type='hidden' name='field.acceptable_count-marker' value='x' />
<table class="combinationFieldWidget">
<tr>
<td class="label">
<label for="field.acceptable_count.combination_00">
<span>Minimum</span>
</label>
</td>
<td class="field">
<div class="widget">10
</div>
</td>
</tr>
<tr>
<td class="label">
<label for="field.acceptable_count.combination_01">
<span>Maximum</span>
</label>
</td>
<td class="field">
<div class="widget">2
</div>
</td>
</tr>
</table>
In case of a wrong amount of parameters, the missing_value is used::
>>> field = IDemo['acceptable_count']
>>> field.missing_value=('23', '42')
>>> widget = CombinationDisplayWidget(field, request)
>>> widget.setPrefix('field')
>>> widget.setRenderedValue(('10', '2', '3'))
>>> print(widget())
<input type='hidden' name='field.acceptable_count-marker' value='x' />
<table class="combinationFieldWidget">
<tr>
<td class="label">
<label for="field.acceptable_count.combination_00">
<span>Minimum</span>
</label>
</td>
<td class="field">
<div class="widget">23
</div>
</td>
</tr>
<tr>
<td class="label">
<label for="field.acceptable_count.combination_01">
<span>Maximum</span>
</label>
</td>
<td class="field">
<div class="widget">42
</div>
</td>
</tr>
</table>
In case the parameter is not a sequence, the missing_value is used::
>>> widget = CombinationDisplayWidget(field, request)
>>> widget.setPrefix('field')
>>> widget.setRenderedValue(10)
>>> print(widget())
<input type='hidden' name='field.acceptable_count-marker' value='x' />
<table class="combinationFieldWidget">
<tr>
<td class="label">
<label for="field.acceptable_count.combination_00">
<span>Minimum</span>
</label>
</td>
<td class="field">
<div class="widget">23
</div>
</td>
</tr>
<tr>
<td class="label">
<label for="field.acceptable_count.combination_01">
<span>Maximum</span>
</label>
</td>
<td class="field">
<div class="widget">42
</div>
</td>
</tr>
</table>
The order of label and field are inverted in case of boolean::
>>> request = TestRequest()
>>> from zope.schema import Bool
>>> from zope.schema.interfaces import IBool
>>> from zope.formlib.boolwidgets import CheckBoxWidget
>>> from zope.formlib.widget import DisplayWidget
>>> from zope.formlib.interfaces import IDisplayWidget
>>> component.provideAdapter(
... CheckBoxWidget, (IBool, IBrowserRequest), IInputWidget)
>>> class IBoolDemo(interface.Interface):
... choices = Combination(
... (Bool(title=u'first'),
... Bool(title=u'second')),
... title=u'Choices',
... required=False,)
>>> widget = CombinationWidget(IBoolDemo['choices'], request)
>>> widget.setPrefix('field')
>>> print(widget())
<input type='hidden' name='field.choices-marker' value='x' />
<table class="combinationFieldWidget">
<tr>
<td></td>
<td class="field">
<div class="widget"><input class="hiddenType" id="field.choices.combination_00.used" name="field.choices.combination_00.used" type="hidden" value="" /> <input class="checkboxType" id="field.choices.combination_00" name="field.choices.combination_00" type="checkbox" value="on" />
<span>first</span>
</div>
</td>
</tr>
<tr>
<td></td>
<td class="field">
<div class="widget"><input class="hiddenType" id="field.choices.combination_01.used" name="field.choices.combination_01.used" type="hidden" value="" /> <input class="checkboxType" id="field.choices.combination_01" name="field.choices.combination_01" type="checkbox" value="on" />
<span>second</span>
</div>
</td>
</tr>
</table>
| zc.form | /zc.form-2.0.tar.gz/zc.form-2.0/src/zc/form/browser/combinationwidget.rst | combinationwidget.rst |
========================================
Most Recently Used (MRU) Source Widget
========================================
The MRU widget keeps track of the last few values selected (on a per-principal
basis) and allows quickly selecting from that list instead of using a query
interface.
We can see the widget in action by using a custom form. Let's define a schema
for the form that uses a source::
>>> import zope.interface
>>> import zope.schema
>>> class IDemo(zope.interface.Interface):
...
... color = zope.schema.Choice(
... title=u"Color",
... description=u"My favorite color",
... source=AvailableColors,
... )
And then a class that implements the interface::
>>> @zope.interface.implementer(IDemo)
... class Demo(object):
...
... color = None
We'll need a form that uses this schema::
>>> import zope.formlib.form
>>> class DemoInput(zope.formlib.form.EditForm):
... actions = ()
... form_fields = zope.formlib.form.fields(IDemo)
By rendering the form we can see that there are no MRU items to choose from
(because this principal has never visited this form before) and the query
interface is displayed::
>>> import zope.publisher.browser
>>> import zope.security.interfaces
>>> import zope.security.management
>>> import zope.component.hooks
>>> @zope.interface.implementer(zope.security.interfaces.IPrincipal)
... class DummyPrincipal(object):
...
... id = "someuser"
... title = "Some User's Name"
... description = "A User"
Note that we need to use the special resourcelibrary request. We're
hacking together the TestRequest and the resourcelibrary request here; when we
switch to TestBrowser we can remove this oddity.
>>> import zc.resourcelibrary.publication
>>> class TestRequest(zope.publisher.browser.TestRequest,
... zc.resourcelibrary.publication.Request):
... def _createResponse(self):
... return zc.resourcelibrary.publication.Request._createResponse(
... self)
...
>>> request = TestRequest()
>>> principal = DummyPrincipal()
>>> request.setPrincipal(principal)
>>> zope.security.management.newInteraction(request)
>>> oldsite = zope.component.hooks.getSite()
>>> zope.component.hooks.setSite(getRootFolder())
Now we can use an instance of our demo object to see that the form
pulls the possible values from the vocabulary we've defined above::
>>> form = DemoInput(Demo(), request)
>>> print(form())
<...
<div class="queries"...>
<div class="query"...>
<div class="queryinput"...>
<query view for colors>
</div> <!-- queryinput -->
</div> <!-- query -->
</div> <!-- queries -->
...
Note that the select box of MRU values isn't in the output, because the user
has never selected a value before::
>>> '<select name="form.color">' not in form()
True
Now, we can select one of the values::
>>> zope.security.management.endInteraction()
>>> request = TestRequest()
>>> request.form = {
... 'form.color.query.selection': 'red_token',
... 'form.color.query.apply': 'Apply',
... 'form.color.displayed': '',
... }
>>> request.setPrincipal(principal)
>>> zope.security.management.newInteraction(request)
Process the request and the list of MRU values is in the form::
>>> form = DemoInput(Demo(), request)
>>> print(form())
<...
<select name="form.color" id="form.color">
<option value="red_token" selected="selected">Red</option>
</select>
...
And the query view is hidden because we have an MRU list::
>>> print(form())
<...
<input type="hidden" name="form.color.queries.visible" ... value="no">
...
If we select another value...::
>>> request = TestRequest()
>>> request.form = {
... 'form.color.query.selection': 'green_token',
... 'form.color.query.apply': 'Apply',
... 'form.color.displayed': '',
... }
>>> request.setPrincipal(principal)
...and process the request, the list of MRU values includes the new one, at
the top, and it is selected::
>>> form = DemoInput(Demo(), request)
>>> print(form())
<...
<select name="form.color" id="form.color">
<option value="green_token" selected="selected">Green</option>
<option value="red_token">Red</option>
</select>
...
If we request a value not in the source everything stays the same, but nothing
is selected::
>>> request = TestRequest()
>>> request.form = {
... 'form.color.query.selection': 'blue_token',
... 'form.color.query.apply': 'Apply',
... 'form.color.displayed': '',
... }
>>> request.setPrincipal(principal)
>>> form = DemoInput(Demo(), request)
>>> print(form())
<...
<select name="form.color" id="form.color">
<option value="green_token">Green</option>
<option value="red_token">Red</option>
</select>
...
We can make the query visible::
>>> request = TestRequest()
>>> request.form = {
... 'form.color.query.selection': 'red_token',
... 'form.color.query.apply': 'Apply',
... 'form.color.queries.visible': 'yes',
... 'form.color.query.search': 'yes',
... 'form.color.query.searchstring': 'red',
... 'form.color.displayed': '',
... }
>>> request.setPrincipal(principal)
>>> form = DemoInput(Demo(), request)
>>> print(form())
<...
<select name="form.color" id="form.color">
<option value="red_token" selected="selected">Red</option>
<option value="green_token">Green</option>
</select>
...
<select name="form.color.query.selection">
<option value="red_token">Red</option>
</select>
<input type="submit" name="form.color.query.apply" value="Apply" />
...
It is not shown if the query is not applied::
>>> request = TestRequest()
>>> request.form = {
... 'form.color.query.selection': 'red_token',
... 'form.color.queries.visible': 'yes',
... 'form.color.query.search': 'yes',
... 'form.color.query.searchstring': 'red',
... 'form.color.displayed': '',
... }
>>> request.setPrincipal(principal)
>>> form = DemoInput(Demo(), request)
>>> print(form())
<...
<select name="form.color" id="form.color">
<option value="red_token">Red</option>
<option value="green_token">Green</option>
</select>
...
<select name="form.color.query.selection">
<option value="red_token">Red</option>
</select>
<input type="submit" name="form.color.query.apply" value="Apply" />
...
Tokens in the annotation of the principal are ignored if they are not in the
source::
>>> from zope.annotation.interfaces import IAnnotations
>>> annotations = IAnnotations(principal)
>>> annotation = annotations.get('zc.form.browser.mruwidget')
>>> tokens = annotation.get('form.color')
>>> tokens.append('black_token')
>>> tokens
['red_token', 'green_token', 'black_token']
>>> print(form())
<...
<select name="form.color" id="form.color">
<option value="red_token">Red</option>
<option value="green_token">Green</option>
</select>
...
<select name="form.color.query.selection">
<option value="red_token">Red</option>
</select>
<input type="submit" name="form.color.query.apply" value="Apply" />
...
Clean up a bit::
>>> zope.security.management.endInteraction()
>>> zope.component.hooks.setSite(oldsite)
| zc.form | /zc.form-2.0.tar.gz/zc.form-2.0/src/zc/form/browser/mruwidget.rst | mruwidget.rst |
from xml.sax.saxutils import quoteattr
from zope.formlib.interfaces import IBrowserWidget
from zope.formlib.interfaces import IInputWidget
from zope.formlib.interfaces import WidgetInputError
from zope.formlib.widget import BrowserWidget
from zope.formlib.widget import InputWidget
from zope.interface import implementer
from zope.schema.interfaces import ValidationError
from zc.form.i18n import _
_msg_missing_single_value_display = _(
_("widget-missing-single-value-for-display"), "")
_msg_missing_multiple_value_display = _(
_("widget-missing-multiple-value-for-display"), "")
_msg_missing_single_value_edit = _(
_("widget-missing-single-value-for-edit"), "(no value)")
_msg_missing_multiple_value_edit = _(
_("widget-missing-multiple-value-for-edit"), "(no value)")
@implementer(IBrowserWidget, IInputWidget)
class BaseWidget(BrowserWidget, InputWidget):
# Note to previous users of widgetapi:
# .translate -> ._translate; .__prefix -> ._prefix; NullValue ->
# ._data_marker; .__initial_value and .__calculated_value -> replaced
# with ._data (because zope.formlib.utility.setUpWidget behavior changed
# for the better)
_initialized = False
_error = None
# set `_display` to True if you are using this for a display widget:
_display = False
# Form management methods.
# Subclasses should not need to override these.
def getInputValue(self):
if not self._initialized:
self._initialize()
if self._error is not None:
raise self._error
value = self._data
field = self.context
# allow missing values only for non-required fields
if value == field.missing_value and not field.required:
return value
# value must be valid per the field contraints
try:
field.validate(value)
except ValidationError as v:
self._error = WidgetInputError(
self.context.__name__, self.context.title, v)
raise self._error
return value
def hasInput(self):
if self._display:
return False
marker_name = self.name + "-marker"
return marker_name in self.request.form
def _initialize(self):
self._initialized = True
self.initialize()
if not self._renderedValueSet():
if self.hasInput():
self._data = self.loadValueFromRequest()
else: # self._data is self._data_marker but no input in request
self._data = self.context.default
def applyChanges(self, content):
field = self.context
value = self.getInputValue()
change = field.query(content, self) != value
if change:
field.set(content, value)
# Dynamic fields may change during a set, so re-get their value;
# this is a larger Zope3 problem which is solved here for now.
self._data = field.get(content)
return change
# Rendering methods:
# (These should not need to be overridden.)
def __call__(self):
if not self._initialized:
self._initialize()
marker = self._get_marker()
return marker + self.render(self._data)
def hidden(self):
if not self._initialized:
self._initialize()
marker = self._get_marker()
return marker + self.renderHidden(self._data)
def _get_marker(self):
marker_name = self.name + "-marker"
return "<input type='hidden' name='%s' value='x' />\n" % marker_name
# API for subclasses to implement:
def initialize(self):
"""Initialize internal data structures needed by the widget.
This method should not load values from the request.
Derived classes should call the base class initialize() before
performing specialized initialization. This requirement is
waived for classes which inherit directly from, and *only*
from, BaseWidget.
"""
def loadValueFromRequest(self):
"""Load the value from data in the request."""
raise NotImplementedError(
"BaseWidget subclasses must implement loadValueFromRequest()")
def render(self, value):
raise NotImplementedError(
"BaseWidget subclasses must implement render()")
def renderHidden(self, value):
"""Render a hidden widget"""
class BaseVocabularyWidget(BaseWidget):
query = None
queryview = None
def __init__(self, field, vocabulary, request):
"""Initialize the widget."""
# only allow this to happen for a bound field
assert field.context is not None
self.vocabulary = vocabulary
super().__init__(field, request)
# Helpers used by the vocabulary widget machinery;
# these should not be overridden.
def setQuery(self, query, queryview):
assert self.query is None
assert self.queryview is None
assert query is not None
assert queryview is not None
self.query = query
self.queryview = queryview
# Use of a hyphen to form the name for the query widget
# ensures that it won't clash with anything else, since
# field names are normally Python identifiers.
queryview.setName(self.name + "-query")
def initialize(self):
"""Make sure the query view has a chance to initialize itself."""
if self.queryview is not None:
self.queryview.initialize()
def loadValueFromRequest(self):
"""Load the value from data in the request.
If self.queryview is not None, this method is responsible for
calling the query view's performAction() method with the value
loaded, and returning the result::
value = ...load value from request data...
if self.queryview is not None:
value = self.queryview.performAction(value)
return value
"""
return super().loadValueFromRequest()
# Convenience method:
def convertTokensToValues(self, tokens):
"""Convert a list of tokens to a list of values.
If an invalid token is encountered, WidgetInputError is raised.
"""
L = []
for token in tokens:
try:
term = self.vocabulary.getTermByToken(token)
except LookupError:
raise WidgetInputError(
self.context.__name__,
self.context.title,
"token %r not found in vocabulary" % token)
else:
L.append(term.value)
return L
class BaseVocabularyDisplay(BaseVocabularyWidget):
_display = True
def render(self, value):
if value in (self._data_marker, None):
# missing single value
return self.translate(_msg_missing_single_value_display)
else:
return self.renderTerm(self.vocabulary.getTerm(value))
def renderTerm(self, term):
"""Return textual presentation for term."""
raise NotImplementedError("BaseVocabularyMultiDisplay subclasses"
" must implement renderTerm()")
def _get_marker(self):
return ""
class BaseVocabularyMultiDisplay(BaseVocabularyDisplay):
"""Base class for display widgets of multi-valued vocabulary fields."""
def render(self, value):
if not value:
# missing multiple value
return self.translate(_msg_missing_multiple_value_display)
else:
pattern = ("<li>%s\n"
" <input type='hidden' name=%s value=%s /></li>")
vocabulary = self.vocabulary
L = []
name = quoteattr(self.name)
for v in value:
term = vocabulary.getTerm(v)
L.append(pattern % (self.renderTerm(term), name,
quoteattr(term.token)))
return ("<%s class=%s id=%s>\n%s\n</%s>"
% (self.containerElementType,
quoteattr(self.containerCssClass),
quoteattr(self.name),
"\n".join(L),
self.containerElementType))
containerCssClass = "values"
class BaseVocabularyBagDisplay(BaseVocabularyMultiDisplay):
"""Base class for display widgets of unordered multi-valued
vocabulary fields."""
containerElementType = "ul"
class BaseVocabularyListDisplay(BaseVocabularyMultiDisplay):
"""Base class for display widgets of ordered multi-valued
vocabulary fields."""
containerElementType = "ol"
class BaseQueryView:
name = None
widget = None
_initialized = False
def __init__(self, context, request):
self.context = context
self.request = request
# Methods called by the vocabulary widget construction machinery;
# subclasses should not need to override these.
def setName(self, name):
assert not self._initialized
assert not name.endswith(".")
assert self.name is None
self.name = name
def setWidget(self, widget):
assert not self._initialized
assert self.widget is None
assert widget is not None
self.widget = widget
# Methods which may be overriden by subclasses:
def initialize(self):
"""Initialization which does not require reading the request.
Derived classes should call the base class initialize() before
performing specialized initialization.
"""
# Should loading from the request happen here?
assert self.name is not None
assert self.widget is not None
assert not self._initialized
self._initialized = True
def renderResults(self, value):
"""Render query results if we have any, otherwise return an
empty string.
"""
results = self.getResults()
if results is None:
return ""
else:
return self.renderQueryResults(results, value)
# Methods which should be overriden by subclasses:
def performAction(self, value):
"""Perform any modifications to the value based on user actions.
This method should be overriden if the query view provides any
actions which can modify the value of the field.
"""
return value
# Methods which must be overriden by subclasses:
def getResults(self):
"""Perform the query, or return None.
The return value should be None if there is no query to
execute, or an object that can be rendered as a set of results
by renderQueryResults().
If the query results in an empty set of results, some value
other than None should be used to represent the results so
that renderQueryResults() can provide a helpful message.
"""
raise NotImplementedError(
"BaseQueryView subclasses must implement getResults()")
def renderInput(self):
"""Render the input area of the query view."""
raise NotImplementedError(
"BaseQueryView subclasses must implement renderInput()")
def renderQueryResults(self, results, value):
"""Render the results returned by getResults()."""
raise NotImplementedError(
"BaseQueryView subclasses must implement renderQueryResults()") | zc.form | /zc.form-2.0.tar.gz/zc.form-2.0/src/zc/form/browser/widgetapi.py | widgetapi.py |
========
Freezing
========
This package implements basic functionality for freezing objects:
spellings to query whether an object can be frozen, to query whether it
has been frozen, and to actually freeze an object. Further policies may
be implemented above the basic code in this package; and much of the
code in this package is offered as pluggable choices which can be
omitted while still keeping the basic API.
To discover whether an object is freezable, client code should ask if it
provides zc.freeze.interfaces.IFreezable.
Site configurations or code that declares that an object is IFreezable
is assuring that the object provides or can be adaptable to
zc.freeze.interfaces.IFreezing. This interface has only three elements:
_z_frozen is a readonly boolean that returns whether the object has been
versioned; _z_freeze_datetime is a readonly datetime in pytz.utc
specifying when the object was frozen (or None, if it is not yet
frozen); and _z_freeze is a method that actually freezes the object. If
the object is already frozen, it raises
zc.freeze.interfaces.FrozenError. If the object is not in a state to be
frozen, it may raise zc.freeze.interfaces.FreezeError. If the freezing
may succeed, the method should send a
zc.freeze.interfaces.IObjectFrozenEvent (such as
zc.freeze.interfaces.ObjectFrozenEvent).
That's the heart of the package: an API and an agreement, with nothing to test
directly. One policy that this package does not directly support is that
freezing an object might first create a copy and then version the copy
rather than the original; or version the original but replace the copy in the
location of the original; or make any other choices. These approaches are
intended to be implemented on top of--above--the zc.freeze API. This
package provides much simpler capabilities.
Conveniences
============
The package does provide two default implementations of IFreezing, and a few
conveniences.
One IFreezing implementation is for objects that are directly aware of this
API (as opposed to having the functionality assembled from adapters and other
components).
>>> import zc.freeze
>>> v = zc.freeze.Freezing()
>>> from zc.freeze import interfaces
>>> from zope.interface.verify import verifyObject
>>> verifyObject(interfaces.IFreezing, v)
True
>>> verifyObject(interfaces.IFreezable, v)
True
>>> v._z_frozen
False
>>> v._z_frozen = True
Traceback (most recent call last):
...
AttributeError: can't set attribute
>>> import pytz
>>> import datetime
>>> before = datetime.datetime.now(pytz.utc)
>>> v._z_freeze()
>>> before <= v._z_freeze_timestamp <= datetime.datetime.now(pytz.utc)
True
>>> v._z_frozen
True
>>> interfaces.IObjectFrozenEvent.providedBy(events[-1])
True
>>> events[-1].object is v
True
>>> v._z_freeze()
Traceback (most recent call last):
...
FrozenError
Another available implementation is an adapter, and stores the information in
an annotation. Here's a quick demo.
>>> import zope.annotation.interfaces
>>> from zope import interface, component
>>> class Demo(object):
... interface.implements(zope.annotation.interfaces.IAnnotatable)
...
>>> import UserDict
>>> class DemoAnnotations(UserDict.UserDict):
... interface.implements(zope.annotation.interfaces.IAnnotations)
... component.adapts(Demo)
... def __init__(self, context):
... self.context = context
... self.data = getattr(context, '_z_demo', None)
... if self.data is None:
... self.data = context._z_demo = {}
...
>>> component.provideAdapter(DemoAnnotations)
>>> component.provideAdapter(zc.freeze.FreezingAdapter)
>>> d = Demo()
>>> verifyObject(interfaces.IFreezing, interfaces.IFreezing(d))
True
>>> verifyObject(interfaces.IFreezable, interfaces.IFreezing(d))
True
>>> interfaces.IFreezing(d)._z_frozen
False
>>> interfaces.IFreezing(d)._z_frozen = True
Traceback (most recent call last):
...
AttributeError: can't set attribute
>>> before = datetime.datetime.now(pytz.utc)
>>> interfaces.IFreezing(d)._z_freeze()
>>> (before <= interfaces.IFreezing(d)._z_freeze_timestamp <=
... datetime.datetime.now(pytz.utc))
True
>>> interfaces.IFreezing(d)._z_frozen
True
>>> interfaces.IObjectFrozenEvent.providedBy(events[-1])
True
>>> events[-1].object is d
True
>>> interfaces.IFreezing(d)._z_freeze()
Traceback (most recent call last):
...
FrozenError
The zc.freeze module also contains three helpers for writing properties and
methods that are freeze-aware.
A 'method' function can generate a freeze-aware method that raises a
FrozenError if the object has been frozen.
'setproperty' and 'delproperty' functions can generate a freeze-aware
descriptor that raises a FrozenError if the set or del methods are called
on a frozen object. These are rwproperties.
'makeProperty' generates a freeze-aware descriptor that does a simple
get/set but raises FrozenError if the set is attempted on a frozen
object.
>>> class BiggerDemo(Demo):
... counter = 0
... @zc.freeze.method
... def increase(self):
... self.counter += 1
... _complex = 1
... @property
... def complex_property(self):
... return str(self._complex)
... @zc.freeze.setproperty
... def complex_property(self, value):
... self._complex = value * 2
... zc.freeze.makeProperty('simple_property')
...
>>> d = BiggerDemo()
>>> d.counter
0
>>> d.complex_property
'1'
>>> d.simple_property # None
>>> d.increase()
>>> d.counter
1
>>> d.complex_property = 4
>>> d.complex_property
'8'
>>> d.simple_property = 'hi'
>>> d.simple_property
'hi'
>>> interfaces.IFreezing(d)._z_frozen
False
>>> interfaces.IFreezing(d)._z_freeze()
>>> interfaces.IFreezing(d)._z_frozen
True
>>> d.counter
1
>>> d.increase()
Traceback (most recent call last):
...
FrozenError
>>> d.counter
1
>>> d.complex_property
'8'
>>> d.complex_property = 10
Traceback (most recent call last):
...
FrozenError
>>> d.complex_property
'8'
>>> d.simple_property
'hi'
>>> d.simple_property = 'bye'
Traceback (most recent call last):
...
FrozenError
>>> d.simple_property
'hi'
| zc.freeze | /zc.freeze-1.2.tar.gz/zc.freeze-1.2/src/zc/freeze/README.txt | README.txt |
import sys
import datetime
import pytz
import persistent
from zope import interface, event, component
import zope.annotation.interfaces
from zope.cachedescriptors.property import Lazy
from zc.freeze import interfaces
import rwproperty
def method(f):
def wrapper(self, *args, **kwargs):
try: # micro-optimize for the "yes, I'm already an IFreezing" story
frozen = self._z_frozen
except AttributeError:
frozen = interfaces.IFreezing(self)._z_frozen
if frozen:
raise interfaces.FrozenError
return f(self, *args, **kwargs)
return wrapper
class setproperty(rwproperty.rwproperty):
@staticmethod
def createProperty(func):
return property(None, method(func))
@staticmethod
def enhanceProperty(oldprop, func):
return property(oldprop.fget, method(func), oldprop.fdel)
class delproperty(rwproperty.rwproperty):
@staticmethod
def createProperty(func):
return property(None, None, method(func))
@staticmethod
def enhanceProperty(oldprop, func):
return property(oldprop.fget, oldprop.fset, method(func))
def makeProperty(name, default=None):
protected = '_z_%s__' % name
sys._getframe(1).f_locals[name] = property(
lambda self: getattr(self, protected, default),
method(lambda self, value: setattr(self, protected, value)))
def supercall(name):
sys._getframe(1).f_locals[name] = method(
lambda self, *args, **kwargs: getattr(
super(self.__class__, self), name)(*args, **kwargs))
class Data(persistent.Persistent):
interface.implements(interfaces.IData)
def __init__(self):
self._z__freeze_timestamp = datetime.datetime.now(pytz.utc)
@property
def _z_freeze_timestamp(self):
return self._z__freeze_timestamp
class Freezing(object):
interface.implements(interfaces.IFreezing)
_z__freezing_data = None
@property
def _z_frozen(self):
return self._z__freezing_data is not None
@property
def _z_freeze_timestamp(self):
res = self._z__freezing_data
if res is not None:
return res._z_freeze_timestamp
@method
def _z_freeze(self):
self._z__freezing_data = Data()
event.notify(interfaces.ObjectFrozenEvent(self))
KEY = "zc.freeze._z_freeze_timestamp"
class FreezingAdapter(object):
interface.implements(interfaces.IFreezing)
component.adapts(zope.annotation.interfaces.IAnnotatable)
def __init__(self, context):
self.context = context
@Lazy
def annotations(self):
return zope.annotation.interfaces.IAnnotations(self.context)
@property
def _z_frozen(self):
return self.annotations.get(KEY) is not None
@property
def _z_freeze_timestamp(self):
res = self.annotations.get(KEY)
if res is not None:
return res._z_freeze_timestamp
@method
def _z_freeze(self):
self.annotations[KEY] = Data()
event.notify(interfaces.ObjectFrozenEvent(self.context)) | zc.freeze | /zc.freeze-1.2.tar.gz/zc.freeze-1.2/src/zc/freeze/__init__.py | __init__.py |
Changes
*******
0.4.0 (2017-06-20)
==================
- Python 3 support
- When asking for updates from generation 0, only adds are sent. Never
removals, making this common case more efficient.
0.3.0 (2014-08-28)
==================
- Added a ``changed`` method to make intent clear when simply recording changes.
- Fixed: exceptions were raised when objects added to generational
sets quacked a little like generational sets.
0.2.0 (2014-08-10)
==================
- Improved subset APIs:
- No longer need to specify superset flag.
- Can have set and non-set children.
- A subset can be created without a parent and the parent will be
set when it's added to a containing set.
0.1.2 (2014-06-09)
==================
Fixed: Internal data structures were misshandled when there were more
than the maximum number of removals.
(Release 0.1.1 was made in error.)
0.1.0 (2014-06-08)
==================
Initial release
| zc.generationalset | /zc.generationalset-0.4.0.tar.gz/zc.generationalset-0.4.0/CHANGES.rst | CHANGES.rst |
Generational Sets
*****************
Generational sets (GSets) are designed to facilitate data synchronization
between a server and clients.
Goals:
- Make synchronization simple by sending all updates for a tree of sets
at once.
- Allow clients to be updated very quickly.
- Reduce data-transfer volume by sending only changes.
- Avoid conflict resolution.
Assumptions:
- Disconnected data updates aren't needed.
- Clients mirror server data.
This implies that the server data, or more specifically, the user's
view of the server data, aren't too large to store on the client.
GSets track state by generation. A client presents a generation and
is sent updates made since the presented generation. GSets can be
grouped into trees with a shared generation. A client can present a
single generation and be sent updates for all of the sets making up a
database.
This implementation of generational sets uses `ZODB
<http://zodb.org>`_ to store data on the server.
High-level usage pattern
========================
- Define a tree of sets representing the data in an application.
This may be user specific.
- Clients make updates via REST calls to a server. They don't make
local changes except in response to server updates.
- Client requests include their data generation.
- Most (JSON) responses to server calls have optional updates property
that contains generational updates since the generation provided by
the client. When the client gets updates, which include the new
generation, it applies the updates to it's internal data store.
- For native apps, the server sends push notifications when there are
updates for a user and, in response, the client polls for the
updates. This allows updates to be extremely timely without
constant polling.
Note that this package only provides the data structure
implementation. Wrapping the data structure in a REST interface or
sending notifications is up to applications.
API
===
Every object in a GSet must have an id. By default, this is provided
by an ``id`` attribute, but you can configure a GSet to use another
attribute or some other mechanism to get an id for an object.
When an object is added to a GSet, call the
``add`` method on the Gset with the object::
>>> from zc.generationalset import GSet
>>> things = GSet()
>>> athing = Thing(42)
>>> things.add(athing)
When an object is changed, call the ``changed`` method on the Gset
with the object. If object is not present in the Gset, update will
raise a KeyError::
>>> things.changed(athing)
>>> things.changed(Thing(43))
Traceback (most recent call last):
...
KeyError: 43
>>> things.generational_updates(0)
{'generation': 3, 'adds': [Thing(42)]}
To remove an object, call
the ``remove`` method with the object::
>>> things.remove(athing)
To get updates to a set since a given generation, call
``generational_updates``, passing a generation::
>>> things.generational_updates(0)
{'generation': 4, 'removals': [42]}
>>> things.add(Thing(1))
>>> things.generational_updates(0)
{'generation': 5, 'removals': [42], 'adds': [Thing(1)]}
>>> things.generational_updates(3)
{'generation': 5, 'adds': [Thing(1)]}
Note that generations start at 1.
The result of calling ``generational_updates`` is a dictionary with
keys:
generation
The current generation of the set
adds
Objects added since the given generation.
removals
Ids of objects removed since the given generation.
contents
All of the object in the set.
``contents`` are returned when there have been many removals since
the given generation. A generational set only keeps track of a
limited number (99 by default, but configurable) of removals. If a
client is too out of date for the set to have relevant removals, it
returns the entire contents, instead of returning adds and removals.
GSets support iteration, and querying length and containment. They
don't currently support set operations, like intersection and
union. You can also retrieve an item from a GSet using its id::
>>> len(things)
1
>>> list(things)
[Thing(1)]
>>> Thing(1) in things
True
>>> things[1]
Thing(1)
Nested sets
-----------
To define nested sets:
- Define a parent set::
>>> parent = GSet(superset=True)
Note the use of the ``superset`` parameter.
- Define child sets, and add them to the parent:
>>> messages = GSet("messages", parent)
>>> parent.add(messages)
When defining child sets, specify an id and the parent.
We haven't tested more than one level of nesting.
When asking for generational updates on parent sets, the adds and
contents contain the generational updates for subsets, with ids, but
without subset generations:
>>> messages.add(Thing(42))
>>> parent.generational_updates(0)
{'generation': 3, 'adds': [{'id': 'messages', 'adds': [Thing(42)]}]}
| zc.generationalset | /zc.generationalset-0.4.0.tar.gz/zc.generationalset-0.4.0/README.rst | README.rst |
Notes on implementing generatonal sets using RDBMSs
==================================================
It should be possible to use relational databases as well.
This document brainstorms how this might work.
.. contents::
Review: the basic object model
==============================
At the heart of the ZODB implementation are 3 mappings:
#. ``{generation -> object}``
#. ``{id -> generation}}``
#. Removals: ``{generation -> id}``
Note that we have both objects and ids. We can't rely on objects
being useful keys, so we need to have separate object ids. Also, in a
general, we may not be able to look up objects given an id, so we keep
a mapping from generation to object.
Leveraging an existing id object mapping.
------------------------------------------
If we had an id to object mapping, we could stop maintaining a
generation to object mapping.
If objects stored in relational databases had primary keys, we could
use these as the basis of generational sets.
Similarly, we could choose to leverage ZODB objects ids to do the same
thing.
An RDBMS approach
=================
Assume we want multiple generational sets, typically qualified by
something like a user id, or by type. For different types, we might
define separate tables to model different sets. For data-qualified
sets, we could use a single table with a colum to qualify data.
For example, let's suppose we want to model messages, such that users
can have multiple messages and messages can be sent to multiple users.
Assume we already have a `message`` table (and probably a ``user``
table), with a primary key, ``id``.
We create a ``user_message_generations`` table::
create table user_message_generations (
generation long auto_increment
user_id foreign key references user(id)
message_id foreign key references message(id)
);
When we send a message, we store the message in the database and, for
each recipient, we::
insert into user_message_generations (user_id, message_id)
values ($recipient_id, message_id);
In our system, messages are mutable. When we mutate a message, we
update the message record and then::
delete from user_message_generations where message_id = $updated_id;
and for each recipient::
insert into user_message_generations (user_id, message_id)
values ($recipient_id, $message_id);
To get new and updated messages for a user::
select message.* from message, user_message_generations
where message.id = user_message_generations.message_id
and
user_message_generations.user_id = $given_user_id
and
generation > $given_generation
We also allow message retraction. We need to be prepared to delete
messages users have already seen. We use a separate table for this::
create table user_message_generational_removals (
generation long auto_increment
user_id foreign key references user(id)
message_id foreign key references message(id)
);
When we retract a message, we remove it from
``user_message_generations`` and add it to
``user_message_generations_removals``::
delete from user_message_generations where message_id = $updated_id;
and for each recipient::
insert into user_message_generational_removals (user_id, message_id)
values ($recipient_id, $message_id);
Now, when computing user updates, we also need to look for removals::
select message_id from user_message_generations
where user_message_generations.user_id = $given_user_id
and
generation > $given_generation
At this point, we can delete the message from the message table, at
least as long as we're sure the message id won't be reused.
If we don't mind keeping retracted message records around in the
user_message_generational_removals table, we're done.
If we want to clean up removal records, it gets complicated. One
would be tempted to remove the removal records after we sent them to
the user, but the user might be using multiple clients. One could have
a rule that if a user's generation is > 0 and less than the minimum
removal generation and is there are at least as many removals as we're
willing to keep, then we can tell the user's client to discard all
messages and send them a complete set. This is what the ZODB
implementation does. This would require an extra query to get the removal
count for a user.
Unqualified generational sets
-----------------------------
If we have generational sets that aren't qualified based on a user, we
can include the generation in the data records and avoid the extra
tables. For example, suppose we have a task system. All users see all
tasks. When we create or update a task, we can assign it a
generation, and retrieve from the task table without resorting to
joins. We can use a remove flag on the task to keep track of removed
tasks and use that in the query.
Implementing generations
------------------------
The ideal way to implement generations is with sequences that can be
shared accross multiple tables. This is necessary of you want to
track different kinds of data for a single generation, or even if you
want separate content and removal tables.
| zc.generationalset | /zc.generationalset-0.4.0.tar.gz/zc.generationalset-0.4.0/RDBMS.rst | RDBMS.rst |
Generational Sets
=================
Generational sets are designed to support a model of data
synchronization between clients.
Assumptions:
- Timeliness is important, but it's OK to display out-of-date data if
disconnected.
- Connectivity is good enough to disallow local updates.
This has the major advantage that there's no need to resolve
conflicting updates made on disconnected clients.
(A valid alternative would be to assume, for a given problem, that
conflicts are so rare that failing hard on conflicts is acceptable,
or, again for a given problem, to have changes that are, by their
nature, non-conflicting.)
The approach is to have sets that have generations.
>>> import zc.generationalset
>>> set = zc.generationalset.GSet()
>>> set.generation
1
Note that ``GSet`` is just a shorthand:
>>> zc.generationalset.GSet.__name__
'GenerationalSet'
Items in a generational set must have ``id`` attributes (although the
attribute can have a different name)::
>>> class Thing:
... def __init__(self, id): self.id = id
... def __repr__(self): return "Thing(%r)" % self.id
>>> thing1 = Thing(1)
When items are added to a set, the set generation increases:
>>> set.add(thing1)
>>> set.generation
2
Containment is based on ids:
>>> thing1 in set
True
>>> Thing(1) in set
True
>>> Thing(2) in set
False
You can also query containment using ids rather than objects.
>>> 1 in set
True
>>> 2 in set
False
As with regular sets, adding the same thing multiple times doesn't
affect the set size:
>>> set.add(thing1)
>>> len(set)
1
>>> list(set)
[Thing(1)]
but it does increate the generation:
>>> set.generation
3
We can ask a set for generational_updates since a given generation:
>>> from pprint import pprint
>>> pprint(set.generational_updates(0))
{'adds': [Thing(1)], 'generation': 3}
>>> pprint(set.generational_updates(1))
{'adds': [Thing(1)], 'generation': 3}
>>> pprint(set.generational_updates(2))
{'adds': [Thing(1)], 'generation': 3}
>>> pprint(set.generational_updates(3))
{'generation': 3}
The generational_updates can include additions or removals:
>>> thing2 = Thing(2)
>>> set.add(thing2)
>>> set.remove(thing1)
>>> pprint(set.generational_updates(3))
{'adds': [Thing(2)], 'generation': 5, 'removals': [1]}
Note that a client can see a removal for an object it has never seen
an update for:
>>> pprint(set.generational_updates(1))
{'adds': [Thing(2)], 'generation': 5, 'removals': [1]}
But a client without data won't see removals:
>>> pprint(set.generational_updates(0))
{'adds': [Thing(2)], 'generation': 5}
A generational set keeps a limited number of removals generations. This
is configurable:
>>> set = zc.generationalset.GSet(maximum_removals=3)
>>> for i in range(4):
... set.add(Thing(i))
... set.remove(Thing(i))
If we ask for a generation that would require a number of removals
greater than the maximum, the output won't contain generational_updates
or removals, signaling that the client should request the
entire set (for example by iterating over it).
>>> pprint(set.generational_updates(0))
{'generation': 9}
>>> pprint(set.generational_updates(1))
{'contents': [], 'generation': 9}
>>> pprint(set.generational_updates(2))
{'contents': [], 'generation': 9}
>>> pprint(set.generational_updates(3))
{'contents': [], 'generation': 9}
>>> pprint(set.generational_updates(4))
{'contents': [], 'generation': 9}
>>> pprint(set.generational_updates(5))
{'generation': 9, 'removals': [2, 3]}
>>> pprint(set.generational_updates(6))
{'generation': 9, 'removals': [2, 3]}
>>> pprint(set.generational_updates(7))
{'generation': 9, 'removals': [3]}
>>> pprint(set.generational_updates(8))
{'generation': 9, 'removals': [3]}
>>> pprint(set.generational_updates(9))
{'generation': 9}
>>> pprint(set.generational_updates(10))
{'generation': 9}
The removals that are lost are evicted from the internal data
structures.
>>> len(set.removals)
3
>>> 0 in set.generations
False
We can iterate over a set:
>>> set = zc.generationalset.GSet()
>>> for i in range(4):
... set.add(Thing(i))
>>> list(set)
[Thing(0), Thing(1), Thing(2), Thing(3)]
We can ask for values from a generation:
>>> list(set.values(4))
[Thing(2), Thing(3)]
An object can only appear in one of adds and removals:
>>> set = zc.generationalset.GSet(maximum_removals=3)
>>> set.add(Thing(1))
>>> from pprint import pprint
>>> pprint(set.generational_updates(0))
{'adds': [Thing(1)], 'generation': 2}
>>> set.remove(Thing(1))
>>> pprint(set.generational_updates(1))
{'generation': 3, 'removals': [1]}
>>> set.add(Thing(1))
>>> pprint(set.generational_updates(0))
{'adds': [Thing(1)], 'generation': 4}
>>> set.remove(Thing(1))
>>> pprint(set.generational_updates(1))
{'generation': 5, 'removals': [1]}
Updating the Gset. Only if object is present in the set, it will be updated:
>>> set = zc.generationalset.GSet()
>>> set.add(Thing(1))
>>> pprint(set.generational_updates(0))
{'adds': [Thing(1)], 'generation': 2}
>>> set.changed(Thing(2))
Traceback (most recent call last):
...
KeyError: 2
>>> pprint(set.generational_updates(1))
{'adds': [Thing(1)], 'generation': 2}
>>> set.changed(Thing(1))
>>> pprint(set.generational_updates(2))
{'adds': [Thing(1)], 'generation': 3}
>>> set.add(Thing(2))
>>> pprint(set.generational_updates(3))
{'adds': [Thing(2)], 'generation': 4}
>>> set.changed(Thing(2))
>>> pprint(set.generational_updates(4))
{'adds': [Thing(2)], 'generation': 5}
>>> pprint(set.generational_updates(0))
{'adds': [Thing(1), Thing(2)], 'generation': 5}
Nested sets
-----------
You can define a nested set structure to form a tree. In this
structure, all of the sets share a common generation and you can get
changes for for the entire tree in a single call. To use nested sets,
you pass in a parent set and an id when creating a set:
>>> parent = zc.generationalset.GSet()
>>> child1 = zc.generationalset.GSet('1', parent)
>>> child2 = zc.generationalset.GSet('2', parent)
Note here that we didn't add child1 and child2 to parent.
>>> len(parent)
0
However, when we modify child1 and child, they'd add themselves to the parent:
>>> child1.add(Thing('11'))
>>> child1.add(Thing('12'))
>>> child2.add(Thing('21'))
>>> child2.add(Thing('22'))
>>> child2.remove(Thing('22'))
>>> len(parent)
2
Now we can ask the parent for updates:
>>> pprint(parent.generational_updates(2))
{'adds': [{'adds': [Thing('12')], 'id': '1'},
{'adds': [Thing('21')], 'id': '2', 'removals': ['22']}],
'generation': 6}
>>> pprint(parent.generational_updates(3)) # doctest: +NORMALIZE_WHITESPACE
{'adds': [{'adds': [Thing('21')], 'id': '2', 'removals': ['22']}],
'generation': 6}
>>> pprint(parent.generational_updates(4))
{'adds': [{'id': '2', 'removals': ['22']}], 'generation': 6}
>>> pprint(parent.generational_updates(5))
{'adds': [{'id': '2', 'removals': ['22']}], 'generation': 6}
>>> pprint(parent.generational_updates(6))
{'generation': 6}
Sets can be nested to arbitrary levels:
>>> child11 = zc.generationalset.GSet('11', child1)
>>> child12 = zc.generationalset.GSet('12', child1)
>>> child111 = zc.generationalset.GSet('111')
>>> child11.add(child111)
>>> child112 = zc.generationalset.GSet('112')
>>> child11.add(child112)
In these last 2 examples, we didn't set the parent. It was set when we
added the children to child11.
>>> child111.parent is child11
True
>>> pprint(parent.generational_updates(6))
{'adds': [{'adds': [{'adds': [{'id': '111'}, {'id': '112'}], 'id': '11'}],
'id': '1'}],
'generation': 8}
When a child is updated, it's generation because the same as the root object:
>>> child111.add(Thing('1111'))
>>> child111.generation == parent.generation
True
>>> pprint(parent.generational_updates(8))
{'adds': [{'adds': [{'adds': [{'adds': [Thing('1111')], 'id': '111'}],
'id': '11'}],
'id': '1'}],
'generation': 9}
Notifications
-------------
When a top-level set's generation increases, it calls
``zc.generationalset.notify`` passing itself. The ``notify`` function
doesn't do anything, but applications can replace it to do something
else (including replacing it with zope.event.notify).
>>> import mock
>>> with mock.patch('zc.generationalset.notify') as notify:
... child2.add(Thing('23'))
... notify.assert_called_once_with(parent)
Specifying object ids
---------------------
Normally, object ids come from item ``id`` attributes, but we can
supply ids explicitly:
>>> set = zc.generationalset.GSet()
>>> set.add((Thing(1), Thing(2)), (1, 2))
>>> set.add((Thing(3), Thing(4)), (3, 4))
>>> pprint(set.generational_updates(0))
{'adds': [(Thing(1), Thing(2)), (Thing(3), Thing(4))], 'generation': 3}
>>> (1, 2) in set
True
>>> (1, 3) in set
False
Retrieving objects by their ids
-------------------------------
Objects can be retrieved by the object id:
>>> ob = (Thing(1), Thing(2))
>>> set.add(ob, (1,2))
>>> set[(1,2)] == ob
True
Attempting to retrieve a non-existing object results in a KeyError:
>>> set[(42, 43)]
Traceback (most recent call last):
...
KeyError: (42, 43)
Using alternate ids
-------------------
By default, an ``id`` attribute is to get ids, but you can specify an
alternate id attribute:
>>> class Other:
... def __init__(self, **kw): self.__dict__.update(kw)
... def __repr__(self): return "Other(%r)" % self.__dict__
>>> set = zc.generationalset.GSet(id_attribute='name')
>>> set.add(Other(name='foo'))
>>> set.add(Other(name='bar'))
>>> set.remove(Other(name='foo'))
>>> pprint(set.generational_updates(1))
{'adds': [Other({'name': 'bar'})], 'generation': 4, 'removals': ['foo']}
For more complicated situations, you can subclass ``GenerationalSet``
and override ```get_id(ob)``:
>>> class StringIdGenerationalSet(zc.generationalset.GenerationalSet):
... def get_id(self, ob):
... return str(super(StringIdGenerationalSet, self).get_id(ob))
>>> set = StringIdGenerationalSet()
>>> set.add(Thing(1))
>>> set.add(Thing(2))
>>> set.remove(Thing(1))
>>> pprint(set.generational_updates(1))
{'adds': [Thing(2)], 'generation': 4, 'removals': ['1']}
Thanks to JavaScript, the need to convert integer ids to strings is
pretty common, so StringIdGenerationalSet is included:
>>> zc.generationalset.SGSet.__name__
'StringIdGenerationalSet'
>>> set = zc.generationalset.SGSet()
>>> set.add(Thing(1))
>>> set.add(Thing(2))
>>> set.remove(Thing(1))
>>> pprint(set.generational_updates(1))
{'adds': [Thing(2)], 'generation': 4, 'removals': ['1']}
There's also a flavor of generational set that uses items as their own ids:
>>> zc.generationalset.VGSet.__name__
'ValueGenerationalSet'
>>> set = zc.generationalset.VGSet()
>>> set.add((1, 2))
>>> set.add((3, 4))
>>> set.remove((1, 2))
>>> pprint(set.generational_updates(1))
{'adds': [(3, 4)], 'generation': 4, 'removals': [(1, 2)]}
| zc.generationalset | /zc.generationalset-0.4.0.tar.gz/zc.generationalset-0.4.0/src/zc/generationalset/README.txt | README.txt |
import BTrees.LOBTree
import BTrees.OLBTree
import BTrees.OOBTree
import persistent
class GenerationalSet(persistent.Persistent):
def __init__(
self,
id=None,
parent=None,
maximum_removals=99,
superset=False, # Ignored, for backward compatibility
id_attribute='id',
):
self.id = id
self.parent = parent
self.maximum_removals = maximum_removals
self.id_attribute = id_attribute
self.contents = BTrees.LOBTree.BTree() # {generation -> ob}
self.generations = BTrees.OLBTree.BTree() # {id -> generation}
self.removals = BTrees.LOBTree.BTree() # {generation -> id}
if parent is not None:
self.generation = parent.generation
else:
self.generation = 1
def get_id(self, ob):
return getattr(ob, self.id_attribute)
def _updated(self):
if self.parent is not None:
self.parent.add(self)
self.generation = self.parent.generation
else:
self.generation += 1
notify(self)
def add(self, ob, id=None):
if id is None:
id = self.get_id(ob)
generation = self.generations.get(id, None)
if generation is None:
if isinstance(ob, GenerationalSet) and ob.parent is None:
ob.parent = self
else:
self.contents.pop(generation, None)
self.removals.pop(generation, None)
self._updated()
self.contents[self.generation] = ob
self.generations[id] = self.generation
def changed(self, ob, id=None):
if id is None:
id = self.get_id(ob)
self.generations[id]
self.add(ob, id)
def remove(self, ob, id=None):
if id is None:
id = self.get_id(ob)
generation = self.generations[id]
self.contents.pop(generation)
self._updated()
removals = self.removals
removals[self.generation] = id
self.generations[id] = self.generation
while len(removals) > self.maximum_removals:
id = removals.pop(removals.minKey())
self.generations.pop(id)
def __getitem__(self, id):
generation = self.generations[id]
return self.contents[generation]
def __len__(self):
return len(self.contents)
def __iter__(self):
return iter(self.contents.values())
def values(self, minimim_generation):
return self.contents.values(minimim_generation)
def __contains__(self, ob_or_id):
try:
id = self.get_id(ob_or_id)
except AttributeError:
id = ob_or_id
generation = self.generations.get(id, None)
return generation is not None and generation in self.contents
def generational_updates(self, generation, subset=False):
result = {}
if subset:
result['id'] = self.id
else:
result['generation'] = self.generation
if generation >= self.generation:
return result # common short circuit
if (len(self.removals) >= self.maximum_removals and
generation < self.removals.minKey() and
generation
):
values = self.contents.values()
key = 'contents'
else:
values = self.contents.values(generation+1)
key = 'adds'
if generation:
removals = list(self.removals.values(generation+1))
if removals:
result['removals'] = removals
values = list(values)
if values or key == 'contents':
for i, v in enumerate(values):
if isinstance(v, GenerationalSet):
values[i] = v.generational_updates(generation, True)
result[key] = values
return result
GSet = GenerationalSet
class StringIdGenerationalSet(GenerationalSet):
"A set in which ids are stringified. This helps with long integer ids"
def get_id(self, ob):
return str(super(StringIdGenerationalSet, self).get_id(ob))
SGSet = StringIdGenerationalSet
class ValueGenerationalSet(GenerationalSet):
"A set in which items are their own ids. They must be orderable."
def get_id(self, ob):
return ob
VGSet = ValueGenerationalSet
def notify(s):
"Replace me to be notified." | zc.generationalset | /zc.generationalset-0.4.0.tar.gz/zc.generationalset-0.4.0/src/zc/generationalset/__init__.py | __init__.py |
HTML/DOM Checker
================
When testing code (like widgets) that generates DOM nodes, we want to
be able to make assertions about what matters. Examples of things we'd
like to ignore:
- attribute order
- extra attributes
- attribute order
- extra classes
- extra nodes
zc.htmlchecker provides a checker object that can be used by itself,
or as a doctest output checker.
.. contents::
Getting started
---------------
Let's look at some examples.
Here's a sample expected string::
<body>
<button class="mybutton">press me</button>
</body>
.. -> expected
Let's create a checker:
>>> import zc.htmlchecker
>>> checker = zc.htmlchecker.HTMLChecker()
You can call its check method with expected and observed HTML:
>>> checker.check(
... expected,
... """<html><body><button x='1' class="widget mybutton">press me</button>
... </body></html>""")
If there's a match, then nothing is returned. For there to be a
match, the expected output merely has to be unambiguously found in the
observed output. In the above example, there was a single body tag,
so it knew how to do the match. Note that whitespace differences were
ignored, as were extra observed attributes and an extra class.
doctest Checker
---------------
To use ``zc.htmlchecker`` as a doctest checker, pass an instance of
``HTMLChecker`` as an output checker when setting up your doctests.
.. low-level doctest checker tests
When used as a doctest output checker, its ``check_output`` method
returns a boolean indicating whether there was a match:
>>> checker.check_output(
... expected,
... """<html><body><button x='1' class="mybutton">press me</button>
... </body></html>""", 0)
True
And the ``output_difference`` shows differences. It's a little weird
(not our fault) in that it takes an example, rather than a wanted
text:
>>> class Example:
... def __init__(self, want): self.want = want
>>> checker.output_difference(
... Example(expected),
... """<html><body><button x='1' class="mybutton">press me</button>
... </body></html>""", 0)
''
Now let's make it fail:
>>> checker.check(
... expected,
... """<html><body><button x='1' class="button">press me</button>
... </body></html>""")
Traceback (most recent call last):
...
MatchError: missing class: mybutton
Expected:
<button class="mybutton">
press me
</button>
<BLANKLINE>
Observed:
<button class="button" x="1">
press me
</button>
<BLANKLINE>
>>> checker.check_output(
... expected,
... """<html><body><button x='1' class="button">press me</button>
... </body></html>""", 0)
False
>>> print checker.output_difference(
... Example(expected),
... """<html><body><button x='1' class="button">press me</button>
... </body></html>""", 0),
missing class: mybutton
Expected:
<button class="mybutton">
press me
</button>
<BLANKLINE>
Observed:
<button class="button" x="1">
press me
</button>
When used as a doctest checker, expected text that doesn't start with
``<`` is checked with the default checker, or a checker you pass in as
base.
.. test above
>>> checker.check_output('1', '2', 0)
False
>>> import doctest
>>> checker.check_output('1...3', '123', doctest.ELLIPSIS)
True
>>> class FooChecker:
... def check_output(self, want, got, flags):
... return 'foo' in got.lower()
>>> checker2 = zc.htmlchecker.HTMLChecker(FooChecker())
>>> checker2.check_output('1', '2 foo', 0)
True
>>> checker2.check_output('<a>', '2 foo', 0)
False
You may want to have some html examples checked with another
checker. In that case, you can specify a prefix. Only examples that
begin with the prefix will be checked with the HTML checker, and the
prefix will be removed.
.. test above
>>> checker2 = zc.htmlchecker.HTMLChecker(FooChecker(), prefix="<>")
>>> checker2.check_output('<a></a>', '2 foo', 0)
True
>>> checker2.check_output('<><a></a>', '2 foo', 0)
False
>>> checker2.check_output('<><a></a>', '<a></a>', 0)
True
>>> checker3 = zc.htmlchecker.HTMLChecker(prefix="<>")
>>> checker3.check_output('<><a></a>', '<b><a></a></b>', 0)
True
>>> checker3.check_output('<a></a>', '<b><a></a></b>', 0)
False
>>> print checker3.output_difference(Example('<a></a>'), '<c></c>', 0)
Expected:
<a></a>Got:
<c></c>
>>> print checker3.output_difference(Example('<><a></a>'), '<c></c>', 0)
Couldn't find wildcard match
Expected:
<a>
</a>
Observed:
<c>
</c>
Expecting multiple nodes
------------------------
We can expect more than a single node::
<button>Cancel</button>
<button>Save</button>
.. -> expected
This example expects 2 button nodes somewhere in the output.
>>> checker.check(expected,
... """<html><body>
... <button id='cancel_button' class="button">Cancel</button>
... <button id='save_button' class="button">Save</button>
... </body></html>""")
But if there isn't a match, it can be harder to figure out what's
wrong:
>>> checker.check(expected,
... """<html><body>
... <button id='cancel_button' class="button">Cancel</button>
... <button id='save_button' class="button">OK</button>
... </body></html>""")
Traceback (most recent call last):
...
MatchError: Couldn't find wildcard match
Expected:
<button>
Save
</button>
<BLANKLINE>
Observed:
<html>
<body>
<button class="button" id="cancel_button">
Cancel
</button>
<button class="button" id="save_button">
OK
</button>
</body>
</html>
We'll come back to wild card matches in a bit. Here, the matcher
detected that it didn't match a button, but couldn't be specific about
which button was the problem. We can make its job easier using ids::
<button id='cancel_button'>Cancel</button>
<button id='save_button'>Save</button>
.. -> expected
Now we're looking for button nodes with specific ids.
>>> checker.check(expected,
... """<html><body>
... <button id='cancel_button' class="button">Cancel</button>
... <button id='save_button' class="button">OK</button>
... </body></html>""")
Traceback (most recent call last):
...
MatchError: text nodes differ u'Save' != u'OK'
Expected:
<button id="save_button">
Save
</button>
<BLANKLINE>
Observed:
<button class="button" id="save_button">
OK
</button>
<BLANKLINE>
That's a lot more helpful.
Wildcards
---------
Speaking of wild card matches, sometimes you want to ignore
intermediate nodes. You can do this by using an ellipsis at the top of
a node that has intermediate nodes you want to ignore::
<form>
...
<button id='cancel_button'>Cancel</button>
<button id='save_button'>Save</button>
</form>
.. -> expected
In this case, we want to find button nodes inside a form node. We
don't care if there are intermediate nodes.
>>> checker.check(expected,
... """<html><body>
... <form>
... <div>
... <button id='cancel_button' class="button">Cancel</button>
... <button id='save_button' class="button">Save</button>
... </div>
... </form>
... </body></html>""")
When looking for expected text, we basically do a wild-card match on
the observed text.
Sometimes, we want to check for text nodes that may be embedded in
some generated construct that we can't control (like a grid produced
by a library). To do that, include a text node that starts with a
line containing an ellipsis. For example, we may expect a grid/table
with some data::
<div id="mygrid" name="">
...
Name Favorite Color
Sally Red
Bill Blue
</div>
.. -> expected
We don't know exactly how our library is going to wrap the data, so we
just test for the presense of the data.
>>> import sys
>>> try: checker.check(expected,
... """<html><body>
... <div id='mygrid' name='' xid="1">
... <table>
... <tr><th>Name</th><th>Favorite Color</th></tr>
... <tr><td>Sally</td><td>Red </td></tr>
... <tr><td>Bill </td><td>Green</td></tr>
... </table>
... </div>
... </body></html>""")
... except zc.htmlchecker.MatchError:
... error = sys.exc_info()[1]
... else: print 'oops'
>>> print error # doctest: +ELLIPSIS
Blue not found in text content.
...
>>> checker.check(expected,
... """<html><body>
... <div id='mygrid' name='' xid="1">
... <table>
... <tr><th>Name</th><th>Favorite Color</th></tr>
... <tr><td>Sally</td><td>Red </td></tr>
... <tr><td>Bill </td><td>Blue</td></tr>
... </table>
... </div>
... </body></html>""")
You can use other BeautifulSoup parsers
---------------------------------------
HTMLChecker uses BeautifulSoup. It uses the ``'html5lib'`` parser by
default, but you can pass a different parser name. You probably want
to stere clear of the ``'html.parser'`` parser, as it's buggy:
>>> checker = zc.htmlchecker.HTMLChecker(parser='html.parser')
>>> checker.check('<input id="x">', '<input id="x"><input>')
Traceback (most recent call last):
...
MatchError: Wrong number of children 1!=0
Expected:
<input id="x"/>
<BLANKLINE>
Observed:
<input id="x">
<input/>
</input>
Here, ``'html.parser'`` decided that the input tags needed closing
tags, even though the HTML input tag is empty. This is likely in part
because the underlying parser is an XHTML parser.
Changes
=======
0.1.0 2013-08-31
----------------
Initial release.
| zc.htmlchecker | /zc.htmlchecker-0.1.0.tar.gz/zc.htmlchecker-0.1.0/README.rst | README.rst |
from bs4 import BeautifulSoup
import doctest
import re
class MatchError(Exception):
def __str__(self):
message, expected, observed = self.args
out = []
out.append(maybe_encode(message))
out.append('Expected:')
if not isinstance(expected, basestring):
expected = expected.prettify()
out.append(maybe_encode(expected))
out.append('Observed:')
if not isinstance(observed, basestring):
observed = observed.prettify()
out.append(maybe_encode(observed))
return '\n'.join(out)
class HTMLChecker(doctest.OutputChecker):
def __init__(self, base=None, prefix=None, parser='html5lib'):
if base is None:
base = doctest.OutputChecker()
self.base = base
self.prefix = prefix
self.parser = parser
def _bs(self, want):
bs = BeautifulSoup(want, self.parser)
if self.parser == 'html5lib':
# html5lib adds body, head and html tags, which isn't what we want.
want = want.lower()
if '<body' not in want:
bs.body.unwrap()
if '<head' not in want:
bs.head.decompose()
if '<html' not in want:
bs.html.unwrap()
return bs
def check(self, want, got):
matches_(self._bs(got), self._bs(want), wild=True)
def applicable(self, want):
if self.prefix:
if want.startswith(self.prefix):
return want[len(self.prefix):]
elif want.startswith('<'):
return want
def check_output(self, want, got, optionflags):
expected = self.applicable(want)
if not expected:
return self.base.check_output(want, got, optionflags)
try:
self.check(expected, got)
except MatchError:
return False
else:
return True
def output_difference(self, example, got, optionflags):
expected = self.applicable(example.want)
if not expected:
return self.base.output_difference(example, got, optionflags)
try:
self.check(expected, got)
except MatchError, v:
return str(v)
else:
return ''
def maybe_encode(s):
if isinstance(s, unicode):
s = s.encode('utf8')
return s
def beautifulText(node):
if isinstance(node, unicode):
return node
if hasattr(node, 'name'):
return u' '.join(beautifulText(c) for c in node)
return ''
def matches_(observed, expected, wild=False):
if getattr(expected, 'name', None) != getattr(observed, 'name', None):
raise MatchError("tag names don't match", expected, observed)
for name, e_val in expected.attrs.items():
if not isinstance(e_val, basestring):
e_val = ' '.join(e_val)
o_val = observed.get(name)
if o_val is None:
raise MatchError("missing "+name, expected, observed)
if not isinstance(o_val, basestring):
o_val = ' '.join(o_val)
if (e_val != o_val and not
(re.match(r'^/.+/$', e_val) and re.match(e_val[1:-1], o_val))
):
if name == 'class':
oclasses = set(o_val.strip().split())
for cname in e_val.strip().split():
if cname not in oclasses:
raise MatchError("missing class: "+cname,
expected, observed)
else:
raise MatchError(
"attribute %s has different values: %r != %r"
% (name, e_val, o_val),
expected, observed)
for enode in expected:
if (not enode.name) and enode.strip().split('\n')[0] == '...':
enode.replace_with(enode.split('...', 1)[1])
wild = True
break
if wild:
match_text = ''
for enode in expected:
if enode.name:
if enode.get('id'):
onode = observed(id=enode['id'])
if not onode:
raise MatchError(
"In wildcard id search, couldn't find %r" %
enode['id'],
enode, observed)
matches_(onode[0], enode);
else:
onodes = observed(enode.name)
for onode in onodes:
try:
matches_(onode, enode);
except MatchError:
if len(onodes) == 1:
raise # won't get a second chance, so be precise
else:
break
else:
raise MatchError(
"Couldn't find wildcard match", enode, observed)
else:
match_text += ' ' + enode.encode('utf8')
match_text = match_text.strip()
if match_text:
text = beautifulText(observed)
for token in match_text.split():
try:
i = text.index(token)
except ValueError:
raise MatchError(token + " not found in text content.",
expected, observed)
text = text[i+len(token):]
else:
enodes = [n for n in expected
if not isinstance(n, basestring) or n.strip()]
onodes = [n for n in observed
if not isinstance(n, basestring) or n.strip()]
if len(enodes) != len(onodes):
raise MatchError(
"Wrong number of children %r!=%r"
% (len(onodes), len(enodes)),
expected, observed)
for enode, onode in zip(enodes, onodes):
if enode.name or onode.name:
matches_(onode, enode)
else:
e = beautifulText(enode).strip()
o = beautifulText(onode).strip()
if e != o:
raise MatchError(
'text nodes differ %r != %r' % (e, o),
expected, observed) | zc.htmlchecker | /zc.htmlchecker-0.1.0.tar.gz/zc.htmlchecker-0.1.0/src/zc/htmlchecker/__init__.py | __init__.py |
from zope import i18n
from zc.i18n.i18n import _
ONE_DAY = _('${number} day ${additional}')
MULTIPLE_DAYS = _('${number} days ${additional}')
ONE_HOUR = _('${number} hour ${additional}')
MULTIPLE_HOURS = _('${number} hours ${additional}')
ONE_MINUTE = _('${number} minute ${additional}')
MULTIPLE_MINUTES = _('${number} minutes ${additional}')
ONE_SECOND = _('${number} second')
MULTIPLE_SECONDS = _('${number} seconds')
NO_TIME = _('No time')
def format(request, duration):
# this could be better, and better internationalized, but it is a start.
# ICU does
# not appear to support internationalizing durations over a day, at least
# as found in
# http://icu.sourceforge.net/apiref/icu4c/classRuleBasedNumberFormat.html
# and related docs.
# The approach here is to do what English needs in a reasonably flexible,
# way and hope others tell us if we need to do more.
if (duration.days > 0
or duration.days < -1
or duration.days == -1 and not duration.seconds):
if duration.days > 0 or not duration.seconds:
big = duration.days
little = duration.seconds // 3600
else: # negative and seconds
big = duration.days + 1
seconds = duration.seconds - 86400
abs_seconds = abs(seconds)
sign = seconds/abs_seconds
little = (abs_seconds // 3600) * sign
main = (MULTIPLE_DAYS, ONE_DAY)
additional = (MULTIPLE_HOURS, ONE_HOUR)
elif duration.days or duration.seconds:
if duration.days == -1:
seconds = duration.seconds - 86400
else:
seconds = duration.seconds
abs_seconds = abs(seconds)
sign = seconds/abs_seconds
if abs_seconds // 3600:
big = (abs_seconds // 3600) * sign
little = ((abs_seconds % 3600) // 60) * sign
main = (MULTIPLE_HOURS, ONE_HOUR)
additional = (MULTIPLE_MINUTES, ONE_MINUTE)
elif abs_seconds // 60:
big = (abs_seconds // 60) * sign
little = (abs_seconds % 60) * sign
main = (MULTIPLE_MINUTES, ONE_MINUTE)
additional = (MULTIPLE_SECONDS, ONE_SECOND)
else:
big = seconds
little = None
main = (MULTIPLE_SECONDS, ONE_SECOND)
else:
return i18n.translate(NO_TIME, context=request)
if little:
message = additional[abs(little)==1]
additional = i18n.translate(
i18n.Message(
message,
mapping={'number': str(little), 'additional': ''}),
context=request)
else:
additional = ''
message = main[abs(big)==1]
return i18n.translate(
i18n.Message(
message,
mapping={'number': str(big), 'additional': additional}),
context=request) | zc.i18n | /zc.i18n-0.7.0.tar.gz/zc.i18n-0.7.0/src/zc/i18n/duration.py | duration.py |
import datetime
import pytz
from zope.interface.common.idatetime import ITZInfo
def now(request):
return datetime.datetime.now(ITZInfo(request))
def format(request, dt=None):
if dt is None:
dt = now(request)
formatter = request.locale.dates.getFormatter(
'dateTime', 'medium')
return formatter.format(dt)
def normalize(request, dt):
"""this method normalizes datetime instances by converting them to
utc, daylight saving times are also taken into account. This
method requires an adapter to get the tzinfo from the request.
>>> from zope import component, interface
>>> import pytz
>>> from zope.interface.common.idatetime import ITZInfo
>>> from zope.publisher.interfaces.browser import IBrowserRequest
>>> from zope.publisher.browser import TestRequest
>>> requestTZ = pytz.timezone('Europe/Vienna')
>>> @interface.implementer(ITZInfo)
... @component.adapter(IBrowserRequest)
... def tzinfo(request):
... return requestTZ
>>> component.provideAdapter(tzinfo)
>>> dt = datetime.datetime(2006,5,1,12)
>>> request = TestRequest()
The Vienna timezone has a 2 hour offset to utc at this date.
>>> normalize(request,dt)
datetime.datetime(2006, 5, 1, 10, 0, tzinfo=<UTC>)
At this date the timezone has only a one hour offset.
>>> dt = datetime.datetime(2006,2,1,12)
>>> normalize(request,dt)
datetime.datetime(2006, 2, 1, 11, 0, tzinfo=<UTC>)
Normalizing UTC to UTC should work also
>>> dt = datetime.datetime(2006,5,1,12,tzinfo=pytz.UTC)
>>> normalize(request,dt)
datetime.datetime(2006, 5, 1, 12, 0, tzinfo=<UTC>)
This way too UTC to UTC
>>> requestTZ = pytz.UTC
>>> dt = datetime.datetime(2006,5,1,12)
>>> normalize(request,dt)
datetime.datetime(2006, 5, 1, 12, 0, tzinfo=<UTC>)
Just so you would know that these are possible -
The time that does not exist (defaulting to is_dst=False will raise an
index error in this case):
>>> requestTZ = pytz.timezone('Europe/Vilnius')
>>> dt = datetime.datetime(2006,3,26,3,30)
>>> normalize(request,dt)
Traceback (most recent call last):
...
NonExistentTimeError: 2006-03-26 03:30:00
An ambiguous time:
>>> dt = datetime.datetime(2006,10,29,3,30)
>>> normalize(request,dt)
Traceback (most recent call last):
...
AmbiguousTimeError: 2006-10-29 03:30:00
"""
if dt.tzinfo is None:
tzinfo = ITZInfo(request)
dt = tzinfo.localize(dt, is_dst=None)
return dt.astimezone(pytz.utc) | zc.i18n | /zc.i18n-0.7.0.tar.gz/zc.i18n-0.7.0/src/zc/i18n/date.py | date.py |
====================================
Internet Cache Protocol (ICP) Server
====================================
In multi-machine (or multi-process) web server installations some set of web
servers will likely be more able to quickly service an HTTP request than
others. HTTP accelerators (reverse proxies) like Squid_ can use ICP_ queries
to find the most appropriate server(s) to handle a particular request. This
package provides a small UDP server that can respond to ICP queries based on
pluggable policies.
.. [ICP] http://www.faqs.org/rfcs/rfc2186.html
.. [Squid] http://www.squid-cache.org/
Change history
==============
1.0.0 (2008-02-07)
------------------
Initial release.
When ICP is Useful
==================
When generating content dynamically, having all the data available locally to
fulfil a request can have a profound effect on service time. One approach to
having the data available is to have one or more caches. In some situations
those caches are not large enough to contain the entire working set required
for efficient servicing of incoming requests. Adding additional request
handlers (servers or processes) doesn't help because the time to load the data
from one or more storage servers (e.g., databases) is the dominant factor in
request time. In those situations the request space can be partitioned such
that the portion of the working set a particular handler (server or process) is
responsible for can fit in its cache(s).
Statically configuring request space partitioning may be difficult,
error-prone, or even impossible. In those circumstances it would be nice to
let the origin servers provide feedback on whether or not they should handle a
particular request. That's where ICP comes in.
Hits and Misses
===============
When an ICP query request is received, the server can return one of ICP_OP_HIT,
ICP_OP_MISS, ICP_OP_ERR, ICP_OP_MISS_NOFETCH, or ICP_OP_DENIED. The meanings
of these result codes are defined in the ICP RFC as below.
ICP_OP_HIT
An ICP_OP_HIT response indicates that the requested URL exists in
this cache and that the requester is allowed to retrieve it.
ICP_OP_MISS
An ICP_OP_MISS response indicates that the requested URL does not
exist in this cache. The querying cache may still choose to fetch
the URL from the replying cache.
ICP_OP_ERR
An ICP_OP_ERR response indicates some kind of error in parsing or
handling the query message (e.g. invalid URL).
ICP_OP_MISS_NOFETCH
An ICP_OP_MISS_NOFETCH response indicates that this cache is up,
but is in a state where it does not want to handle cache misses.
An example of such a state is during a startup phase where a cache
might be rebuilding its object store. A cache in such a mode may
wish to return ICP_OP_HIT for cache hits, but not ICP_OP_MISS for
misses. ICP_OP_MISS_NOFETCH essentially means "I am up and
running, but please don't fetch this URL from me now."
Note, ICP_OP_MISS_NOFETCH has a different meaning than
ICP_OP_MISS. The ICP_OP_MISS reply is an invitation to fetch the
URL from the replying cache (if their relationship allows it), but
ICP_OP_MISS_NOFETCH is a request to NOT fetch the URL from the
replying cache.
ICP_OP_DENIED
An ICP_OP_DENIED response indicates that the querying site is not
allowed to retrieve the named object from this cache. Caches and
proxies may implement complex access controls. This reply must be
be interpreted to mean "you are not allowed to request this
particular URL from me at this particular time."
Because we want to use ICP to communicate about whether or not an origin server
(as opposed to a cache server) wants to handle a particular request, we will
use slightly different definitions for some of the result codes.
ICP_OP_HIT
An ICP_OP_HIT response indicates that the queried server would prefer to
handle the HTTP request. The reason the origin server is returning a hit
might be that it has recently handled "similar" requests, or that it has
been configured to handle the partition of the URL space occupied by the
given URL.
ICP_OP_MISS
An ICP_OP_MISS response indicates that the queried server does not have a
preference to service the request, but will be able to handle the request
nonetheless. This is normally the default response.
ICP_OP_MISS_NOFETCH
An ICP_OP_MISS_NOFETCH response indicates that the requesting server may
not request the named object from this server. This may be because the
origin server is under heavy load at the time or some other policy
indicates that the request must not be forwarded at the moment.
The response (hit, miss, etc.) to a particular ICP query is based on one or
more configured policies. The mechanics of defining and registering those
policies is explained in the next section.
This package does not implement the deprecated ICP_OP_HIT_OBJ.
Defining Policies
=================
To use this package one or more polices must be defined and registered. The
Zope component architecture is used to manage the polices as "utilities".
Policies must implement the IICPPolicy interface.
>>> from zc.icp.interfaces import IICPPolicy
>>> IICPPolicy
<InterfaceClass zc.icp.interfaces.IICPPolicy>
At this point no policy is registered, so any URL will generate a miss.
>>> import zc.icp
>>> zc.icp.check_url('http://example.com/foo')
'ICP_OP_MISS'
Let's say we want to return an ICP_OP_HIT for all URLs containing "foo", we
can define that policy like so:
>>> def foo_hit_policy(url):
... if 'foo' in url:
... return 'ICP_OP_HIT'
When registering this policy we have to provide an associated name. Any
subsequent registration with the same name will override the previous
registration. The default name is the empty string.
>>> import zope.component
>>> zope.component.provideUtility(foo_hit_policy, IICPPolicy, 'foo')
The registered policy is immediately available.
>>> zc.icp.check_url('http://example.com/foo')
'ICP_OP_HIT'
Non-foo URLs are still misses.
>>> zc.icp.check_url('http://example.com/bar')
'ICP_OP_MISS'
Now we can add another policy to indicate that we don't want any requests with
"baz" in them.
>>> def baz_hit_policy(url):
... if 'baz' in url:
... return 'ICP_OP_MISS_NOFETCH'
>>> zope.component.provideUtility(baz_hit_policy, IICPPolicy, 'baz')
>>> zc.icp.check_url('http://example.com/foo')
'ICP_OP_HIT'
>>> zc.icp.check_url('http://example.com/bar')
'ICP_OP_MISS'
>>> zc.icp.check_url('http://example.com/baz')
'ICP_OP_MISS_NOFETCH'
The policies are prioritized by name. The first policy to return a non-None
result is followed. Therefore if we check a URL with both "foo" and "baz" in
it, the policy for "baz" is followed.
>>> zc.icp.check_url('http://example.com/foo/baz')
'ICP_OP_MISS_NOFETCH'
Running the Server
==================
Starting the server begins listening on the given port and IP.
>>> zc.icp.start_server('localhost', 3130)
info: ICP server started
Address: localhost
Port: 3130
Now we can start sending ICP requests and get responses back. To do so we must
first construct an ICP request.
>>> import struct
>>> query = zc.icp.HEADER_LAYOUT + zc.icp.QUERY_LAYOUT
>>> url = 'http://example.com/\0'
>>> format = query % len(url)
>>> icp_request = struct.pack(
... format, 1, 2, struct.calcsize(format), 0xDEADBEEF, 0, 0, 0, 0, url)
>>> print zc.icp.format_datagram(icp_request)
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| ICP_OP_QUERY | Version: 2 | Message Length: 44 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Request Number: DEADBEEF |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Options: 0 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Option Data: 0 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Sender Host Address: 0 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
| Payload: \x00\x00\x00\x00http://example.com/\x00 |
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
After sending the request we get back a response.
>>> import socket
>>> s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
>>> s.connect(('localhost', 3130))
>>> s.send(icp_request)
44
>>> icp_response = s.recv(16384)
>>> icp_response
'\x03\x02\x00(\xde\xad\xbe\xef\x00\x00\x00\x00\...http://example.com/\x00'
>>> print zc.icp.format_datagram(icp_response)
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| ICP_OP_MISS | Version: 2 | Message Length: 40 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Request Number: DEADBEEF |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Options: 0 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Option Data: 0 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Sender Host Address: 0 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
| Payload: http://example.com/\x00 |
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
That was a miss. We can also provoke a hit by satisfying one of our policies.
>>> url = 'http://example.com/foo\0'
>>> format = query % len(url)
>>> icp_request = struct.pack(
... format, 1, 2, struct.calcsize(format), 0xDEADBEEF, 0, 0, 0, 0, url)
>>> print zc.icp.format_datagram(icp_request)
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| ICP_OP_QUERY | Version: 2 | Message Length: 47 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Request Number: DEADBEEF |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Options: 0 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Option Data: 0 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Sender Host Address: 0 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
| Payload: \x00\x00\x00\x00http://example.com/foo\x00 |
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
>>> s.send(icp_request)
47
>>> print zc.icp.format_datagram(s.recv(16384))
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| ICP_OP_HIT | Version: 2 | Message Length: 43 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Request Number: DEADBEEF |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Options: 0 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Option Data: 0 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Sender Host Address: 0 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
| Payload: http://example.com/foo\x00 |
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| zc.icp | /zc.icp-1.0.0.tar.gz/zc.icp-1.0.0/src/zc/icp/README.txt | README.txt |
import logging
import asyncore
import errno
import os
import socket
import string
import struct
import sys
import threading
import zc.icp.interfaces
import zope.component
ICP_OP_INVALID = 0
ICP_OP_QUERY = 1
ICP_OP_HIT = 2
ICP_OP_MISS = 3
ICP_OP_ERR = 4
ICP_OP_SECHO = 10
ICP_OP_DECHO = 11
ICP_OP_MISS_NOFETCH = 21
ICP_OP_DENIED = 22
HEADER_LAYOUT = '!BBHIIII'
RESPONSE_LAYOUT = '%ds'
QUERY_LAYOUT = 'I' + RESPONSE_LAYOUT
MAX_DATAGRAM_LOG_LENGTH = 70
def check_url(url):
policies = zope.component.getUtilitiesFor(zc.icp.interfaces.IICPPolicy)
for name, policy in sorted(policies):
result = policy(url)
if result is not None:
return result
return 'ICP_OP_MISS'
def print_datagram(datagram):
try:
return format_datagram(datagram)
except:
return repr(datagram)
def handle_request(datagram, check_url=check_url):
log_message = None
out_header = HEADER_LAYOUT + RESPONSE_LAYOUT % 1
out_group = [ICP_OP_ERR, 2, len(datagram), 0, 0, 0, 0, '\0']
try:
in_group = list(struct.unpack(HEADER_LAYOUT, datagram[:20]))
opcode, version, length, number, options, opdata, sender_ip = in_group
except struct.error:
log_message = 'Error unpacking ICP header'
else:
out_group[2:4] = [struct.calcsize(out_header), number]
if version == 2 and length == len(datagram) and length <= 16384:
if opcode == ICP_OP_QUERY:
if length > 24:
try:
(requester_ip, url) = struct.unpack(
'!' + QUERY_LAYOUT % (length - 24), datagram[20:])
except:
log_message = 'Error unpacking ICP query'
else:
in_group.extend([requester_ip, url])
out_header = HEADER_LAYOUT + RESPONSE_LAYOUT % len(url)
out_group[2] = struct.calcsize(out_header)
out_group[6:] = [sender_ip, url]
if url[-1:] == '\x00':
out_group[0] = globals()[check_url(url[:-1])]
else:
log_message = (
'URL in ICP query is not null-terminated')
else:
log_message = 'Query is not long enough'
if log_message:
if len(datagram) > MAX_DATAGRAM_LOG_LENGTH:
chunk_size = MAX_DATAGRAM_LOG_LENGTH / 2
log_gram = datagram[:chunk_size] + '...' + datagram[-chunk_size:]
else:
log_gram = datagram
logging.error('%s:\n %r' % (log_message, log_gram))
result = struct.pack(out_header, *out_group)
return result
# We want our own, independent map for running an asyncore mainloop.
asyncore_map = {}
class ICPServer(asyncore.dispatcher):
REQUESTS_PER_LOOP = 4
def __init__(self, ip, port):
asyncore.dispatcher.__init__(self, map=asyncore_map)
self.create_socket(socket.AF_INET, socket.SOCK_DGRAM)
self.set_reuse_addr()
self.bind((ip, port))
if ip=='':
addr = 'any'
else:
addr = ip
self.log_info(
'ICP server started\n\tAddress: %s\n\tPort: %s' % (addr, port))
def handle_read(self):
for i in range(self.REQUESTS_PER_LOOP):
try:
datagram, whence = self.socket.recvfrom(16384)
except socket.error, e:
if e[0] == errno.EWOULDBLOCK:
break
else:
raise
else:
reply = handle_request(datagram)
if reply:
self.socket.sendto(reply, whence)
def readable(self):
return 1
def writable(self):
return 0
def handle_connect(self):
pass
def handle_write(self):
self.log_info('unexpected write event', 'warning')
def handle_error(self):
# don't close the socket on error
(file, fun, line), t, v, tbinfo = asyncore.compact_traceback()
self.log_info('Problem in ICP (%s:%s %s)' % (t, v, tbinfo), 'error')
def start_server(ip='', port=3130):
server = ICPServer(ip, port)
thread = threading.Thread(target=asyncore.loop,
kwargs=dict(map=asyncore_map))
thread.setDaemon(True)
thread.start()
template = """\
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| %-13s| Version: %-3s| Message Length: %-10s|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Request Number: %-27X|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Options: %-34X|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Option Data: %-30X|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Sender Host Address: %-22s|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
| Payload: %-50s|
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+"""
reverse_opcode_map = {
0: 'ICP_OP_INVALID',
1: 'ICP_OP_QUERY',
2: 'ICP_OP_HIT',
3: 'ICP_OP_MISS',
4: 'ICP_OP_ERR',
10: 'ICP_OP_SECHO',
11: 'ICP_OP_DECHO',
21: 'ICP_OP_MISS_NOFETCH',
22: 'ICP_OP_DENIED',
}
def format_datagram(datagram):
header_size = struct.calcsize(HEADER_LAYOUT)
parts = list(struct.unpack(HEADER_LAYOUT, datagram[:header_size]))
parts[0] = reverse_opcode_map[parts[0]]
payload = repr(datagram[header_size:])[1:-1]
parts.append(payload)
return template % tuple(parts) | zc.icp | /zc.icp-1.0.0.tar.gz/zc.icp-1.0.0/src/zc/icp/__init__.py | __init__.py |
=========
Changes
=========
2.1.0 (2022-04-01)
==================
- Add support for Python 3.6, 3.7, 3.8, 3.9, and 3.10.
- Drop support for running the tests using `python setup.py test`.
2.0.0 (2016-12-16)
==================
- Add zope.lifecycleevent subscribers. You must include ``subscribers.zcml``
to use these and have :mod:`zope.intid` installed. See :issue:`5`.
- Documentation is now hosted at http://zcintid.readthedocs.io
- Add continuous integration testing for supported Python versions.
- Add PyPy support.
- Add Python 3 support.
- Drop support for Python less than 2.7.
- Remove ZODB3 dependency in favor of explicit dependencies on BTrees.
- The zope-intid.zcml file included in this package now works to make
the IntId utility from this package implement the zope.intids
interface, if that package is installed.
- Interfaces and event implementations have been refactored into the
new module :mod:`zc.intid.interfaces`. Backwards compatibility
aliases remain for the old names. See :issue:`9`.
- Raise more informative KeyError subclasses from the utility when intids
or objects cannot be found. This distinguishes them from errors
raised by normal dictionaries or BTrees, and is useful in unit
testing or when persisting intids or sharing them among processes
for later or concurrent use. See :issue:`8`
- Propagate ``POSKeyError`` from ``queryId`` instead of returning the
default object. This exception indicates a corrupt database, not a
missing object. The ``queryObject`` function already behaved this way.
- Attempting to ``register`` an object that cannot have the utility's
attribute set on it (for example, it has restrictive ``__slots__``)
no longer corrupts the utility's state.
1.0.1 (2011-06-27)
==================
- Make the behavior of the utility's `getId` method consistent with
zope.intid in regard to its handling of proxied objects.
1.0.0 (2011-02-21)
==================
- Initial release.
| zc.intid | /zc.intid-2.1.0.tar.gz/zc.intid-2.1.0/CHANGES.rst | CHANGES.rst |
==============================================
zc.intid - Reduced-conflict integer id utility
==============================================
.. image:: https://github.com/zopefoundation/zc.intid/actions/workflows/tests.yml/badge.svg
:target: https://github.com/zopefoundation/zc.intid/actions/workflows/tests.yml
.. image:: https://readthedocs.org/projects/zcintid/badge/?version=latest
:target: https://zcintid.readthedocs.io/
:alt: Documentation Status
This package provides an API to create integer ids for any object.
Objects can later be looked up by their id as well. This is similar to
the ``zope.intid`` package, but it has the advantage of producing
fewer conflicts.
`Documentation`_, including installation and configuration
instructions and a detailed `changelog`_ is hosted at
http://zcintid.readthedocs.io.
.. _Documentation: http://zcintid.readthedocs.io
.. _changelog: http://zcintid.readthedocs.io/en/latest/changelog.html
| zc.intid | /zc.intid-2.1.0.tar.gz/zc.intid-2.1.0/README.rst | README.rst |
import zope.interface
import zope.intid.interfaces
class IntIdMismatchError(zope.intid.interfaces.IntIdMissingError):
"""
Raised from ``getId`` if the id of an object doesn't match
what's recorded in the utility.
"""
class IntIdInUseError(ValueError):
"""
Raised by the utility when ``register`` tries to reuse
an intid.
"""
class IIntIdsQuery(zope.interface.Interface):
"""
Finding IDs by object and objects by ID.
"""
def getObject(uid):
"""
Return an object by its unique id
:raises zope.intid.interfaces.ObjectMissingError: if
there is no object with that id.
"""
def getId(ob):
"""
Get a unique id of an object.
:raises zope.intid.interfaces.IntIdMissingError: if
there is no id for that object.
:raises zc.intid.interfaces.IntIdMismatchError: if the recorded id
doesn't match the id of the object.
"""
def queryObject(uid, default=None):
"""Return an object by its unique id
Return the default if the uid isn't registered
"""
def queryId(ob, default=None):
"""Get a unique id of an object.
Return the default if the object isn't registered
"""
def __iter__():
"""Return an iteration on the ids"""
class IIntIdsSet(zope.interface.Interface):
"""
Establishing and destroying the connection between an object
and an ID.
"""
def register(ob):
"""Register an object and returns a unique id generated for it.
If the object is already registered, its id is returned anyway.
If not already registered, the registration is made and an
:class:`IIdAddedEvent` is generated.
"""
def unregister(ob):
"""
Remove the object from the indexes.
If the *ob* is not previously registered, this has no effect.
An :class:`IIdRemovedEvent` is triggered for successful
unregistrations.
"""
class IIntIdsManage(zope.interface.Interface):
"""Some methods used by the view."""
def __len__():
"""Return the number of objects indexed."""
def items():
"""Return a list of (id, object) pairs."""
class IIntIds(IIntIdsSet, IIntIdsQuery, IIntIdsManage):
"""A utility that assigns unique ids to objects.
Allows to query object by id and id by object.
"""
class IIntIdsSubclass(zope.interface.Interface):
"""Additional interface that subclasses can usefully use."""
family = zope.interface.Attribute(
"""BTree family used for this id utility.
This will be either BTree.family32 or BTree.family64.
This may not be modified, but may be used to create additional
structures of the same integer family as the ``refs`` structure.
""")
refs = zope.interface.Attribute(
"""BTree mapping from id to object.
Subclasses can use this to determine whether an id has already
been assigned.
This should not be directly modified by subclasses.
""")
def generateId(ob):
"""Return a new iid that isn't already used.
``ob`` is the object the id is being generated for.
The default behavior is to generate arbitrary integers without
reference to the objects they're generated for.
This method may be overriden.
If this method returns an id that is already in use,
``register`` will raise an :exc:`IntIdInUseError`.
"""
class IIdEvent(zope.interface.Interface):
"""Generic base interface for IntId-related events"""
object = zope.interface.Attribute(
"The object related to this event")
idmanager = zope.interface.Attribute(
"The int id utility generating the event.")
id = zope.interface.Attribute(
"The id that is being assigned or unassigned.")
class IIdRemovedEvent(IIdEvent):
"""
A unique id will be removed.
The event is published before the unique id is removed
from the utility so that the indexing objects can unindex the object.
"""
class IIdAddedEvent(IIdEvent):
"""
A unique id has been added.
The event gets sent when an object is registered in a unique id
utility.
"""
class Event(object):
def __init__(self, object, idmanager, id):
self.object = object
self.idmanager = idmanager
self.id = id
@zope.interface.implementer(IIdAddedEvent)
class AddedEvent(Event):
pass
@zope.interface.implementer(IIdRemovedEvent)
class RemovedEvent(Event):
pass
class ISubscriberEvent(zope.interface.Interface):
"""
An event fired by the subscribers in relation to another event.
"""
object = zope.interface.Attribute(
"The object related to this event")
original_event = zope.interface.Attribute(
"The ObjectEvent related to this event")
class IAfterIdAddedEvent(ISubscriberEvent):
"""
Fired after all utilities have registered unique ids.
This event is guaranteed to be the last event fired by the
subscribers that register ids. It will be fired exactly once, no
matter how many utilities registered ids.
This has a similar purpose and structure to
:class:`zope.intid.interfaces.IIntIdAddedEvent`.
"""
idmap = zope.interface.Attribute(
"The dictionary that holds an (utility -> id) mapping of created ids")
class IBeforeIdRemovedEvent(ISubscriberEvent):
"""
Fired before any utility removes an object's unique ID.
This event is guaranteed to be the first event fired by the
subscriber that removes IDs. It will only be fired if at least
one utility will remove an ID.
"""
@zope.interface.implementer(IBeforeIdRemovedEvent)
class BeforeIdRemovedEvent(object):
def __init__(self, o, event):
self.object = o
self.original_event = event
@zope.interface.implementer(IAfterIdAddedEvent)
class AfterIdAddedEvent(object):
def __init__(self, o, event, idmap=None):
self.object = o
self.idmap = idmap
self.original_event = event | zc.intid | /zc.intid-2.1.0.tar.gz/zc.intid-2.1.0/src/zc/intid/interfaces.py | interfaces.py |
from zc.intid.interfaces import AddedEvent
from zc.intid.interfaces import IIntIds
from zc.intid.interfaces import IIntIdsSubclass
from zc.intid.interfaces import IntIdMismatchError
from zc.intid.interfaces import IntIdInUseError
from zc.intid.interfaces import RemovedEvent
from zope.event import notify
from zope.interface import implementer
from zope.intid.interfaces import IntIdMissingError
from zope.intid.interfaces import ObjectMissingError
try:
# POSKeyError is a subclass of KeyError; in the cases where we
# catch KeyError for an item missing from a BTree, we still
# want to propagate this exception that indicates a corrupt database
# (as opposed to a corrupt IntIds)
from ZODB.POSException import POSKeyError as _POSKeyError
except ImportError: # pragma: no cover (we run tests with ZODB installed)
# In practice, ZODB will probably be installed. But if not,
# then POSKeyError can never be generated, so use a unique
# exception that we'll never catch.
class _POSKeyError(BaseException):
pass
from zope.security.proxy import removeSecurityProxy as unwrap
import BTrees
import persistent
import random
@implementer(IIntIds, IIntIdsSubclass)
class IntIds(persistent.Persistent):
"""This utility provides a two way mapping between objects and
integer ids.
The objects are stored directly in the internal structures.
"""
_v_nextid = None
_randrange = random.randrange
family = BTrees.family32
def __init__(self, attribute, family=None):
if family is not None:
self.family = family
self.attribute = attribute
self.refs = self.family.IO.BTree()
def __len__(self):
return len(self.refs)
def items(self):
return list(self.refs.items())
def __iter__(self):
return self.refs.iterkeys()
def getObject(self, id):
try:
return self.refs[id]
except _POSKeyError:
raise
except KeyError:
raise ObjectMissingError(id)
def queryObject(self, id, default=None):
if id in self.refs:
return self.refs[id]
return default
def getId(self, ob):
unwrapped = unwrap(ob)
uid = getattr(unwrapped, self.attribute, None)
if uid is None:
raise IntIdMissingError(ob)
if uid not in self.refs or self.refs[uid] is not unwrapped:
# not an id that matches
raise IntIdMismatchError(ob)
return uid
def queryId(self, ob, default=None):
try:
return self.getId(ob)
except _POSKeyError:
raise
except KeyError:
return default
def generateId(self, ob):
"""Generate an id which is not yet taken.
This tries to allocate sequential ids so they fall into the same
BTree bucket, and randomizes if it stumbles upon a used one.
"""
while True:
if self._v_nextid is None:
self._v_nextid = self._randrange(0, self.family.maxint)
uid = self._v_nextid
self._v_nextid += 1
if uid not in self.refs:
return uid
self._v_nextid = None
def register(self, ob):
ob = unwrap(ob)
uid = self.queryId(ob)
if uid is None:
uid = self.generateId(ob)
if uid in self.refs:
raise IntIdInUseError("id generator returned used id")
self.refs[uid] = ob
try:
setattr(ob, self.attribute, uid)
except: # noqa: E722 do not use bare 'except'
# cleanup our mess
del self.refs[uid]
raise
notify(AddedEvent(ob, self, uid))
return uid
def unregister(self, ob):
ob = unwrap(ob)
uid = self.queryId(ob)
if uid is None:
return
# This should not raise KeyError, we checked that in queryId
del self.refs[uid]
setattr(ob, self.attribute, None)
notify(RemovedEvent(ob, self, uid)) | zc.intid | /zc.intid-2.1.0.tar.gz/zc.intid-2.1.0/src/zc/intid/utility.py | utility.py |
from __future__ import print_function, absolute_import, division
from zope import component
from zope.component import handle
from zope.lifecycleevent.interfaces import IObjectAddedEvent
from zope.lifecycleevent.interfaces import IObjectRemovedEvent
from zope.location.interfaces import ILocation
from zope.event import notify
from zope.intid.interfaces import IntIdAddedEvent
from zope.intid.interfaces import IntIdRemovedEvent
from zope.keyreference.interfaces import IKeyReference
from zc.intid.interfaces import IIntIds
from zc.intid.interfaces import BeforeIdRemovedEvent
from zc.intid.interfaces import AfterIdAddedEvent
def _utilities_and_key(ob):
utilities = tuple(component.getAllUtilitiesRegisteredFor(IIntIds))
# Don't even bother trying to adapt if no utilities
return utilities, IKeyReference(ob, None) if utilities else None
@component.adapter(ILocation, IObjectAddedEvent)
def addIntIdSubscriber(ob, event):
"""
Registers the object in all unique id utilities and fires
an event for the catalogs. Notice that each utility will
fire :class:`zc.intid.interfaces.IIdAddedEvent`; this subscriber
will then fire one single :class:`zope.intid.interfaces.IIntIdAddedEvent`,
followed by one single :class:`zc.intid.interfaces.IAfterIdAddedEvent`;
this gives a guaranteed order such that :mod:`zope.catalog` and other Zope
event listeners will have fired.
"""
utilities, key = _utilities_and_key(ob)
if not utilities or key is None:
return
idmap = {}
for utility in utilities:
idmap[utility] = utility.register(ob)
# Notify the catalogs that this object was added.
notify(IntIdAddedEvent(ob, event, idmap))
notify(AfterIdAddedEvent(ob, event, idmap))
@component.adapter(ILocation, IObjectRemovedEvent)
def removeIntIdSubscriber(ob, event):
"""
Removes the unique ids registered for the object in all the unique
id utilities.
Just before this happens (for the first time), an
:class:`zc.intid.interfaces.IBeforeIdRemovedEvent` is fired,
followed by an :class:`zope.intid.interfaces.IIntIdRemovedEvent`.
Notice that this is fired before the id is actually removed from
any utility, giving other subscribers time to do their cleanup.
Before each utility removes its registration, it will fire
:class:`zc.intid.interfaces.IIdRemovedEvent`. This gives a
guaranteed order such that :mod:`zope.catalog` and other Zope
event listeners will have fired.
"""
utilities, key = _utilities_and_key(ob)
if not utilities or key is None:
return
# Notify the catalogs that this object is about to be removed,
# if we actually find something to remove
fired_event = False
for utility in utilities:
if not fired_event and utility.queryId(ob) is not None:
fired_event = True
notify(BeforeIdRemovedEvent(ob, event))
notify(IntIdRemovedEvent(ob, event))
try:
utility.unregister(ob)
except KeyError: # pragma: no cover
# Ignoring POSKeyError and broken registrations
pass
def intIdEventNotify(event):
"""
Event subscriber to dispatch IntIdEvent to interested adapters.
See subscribers.zcml for its registrations (it handles two types of
events).
"""
handle(event.object, event) | zc.intid | /zc.intid-2.1.0.tar.gz/zc.intid-2.1.0/src/zc/intid/subscribers.py | subscribers.py |
=====================
What is zc.intid?
=====================
.. image:: https://travis-ci.org/zopefoundation/zc.intid.svg?branch=master
:target: https://travis-ci.org/zopefoundation/zc.intid
zc.intid provides an API to create integer ids for any object.
Objects can later be looked up by their id as well. This functionality
is commonly used in situations where dealing with objects is
undesirable, such as in search indices or any code that needs an easy
hash of an object.
This is similar to the :mod:`zope.intid` package, but with the
advantage of inducing fewer ZODB conflict errors, since object ids are
not used as part of the stored data. The id for an object is stored in
an attribute of the object itself, with the attribute name being
configured by the construction of the id utility.
This does require that the object being registered "play nice" with this approach. At
a minimum, the attributes used to store ids on objects should
- persist with the rest of the object's state, and
- not be modified by the object.
Events generated on the assignment and removal of ids are generated by
the :meth:`~zc.intid.interfaces.IIntIdsSet.register` and
:meth:`~zc.intid.interfaces.IIntIdsSet.unregister` methods rather than
by the callers of those methods.
Installation
============
zc.intid may be installed using pip::
pip install zc.intid
For information on configuring zc.intid, see :ref:`configuring`.
.. toctree::
:maxdepth: 1
api
subscribers
.. toctree::
:maxdepth: 2
changelog
Development
===========
zc.intid is hosted at GitHub:
https://github.com/zopefoundation/zc.intid/
Project URLs
============
* http://pypi.python.org/pypi/zc.intid (PyPI entry and downloads)
====================
Indices and tables
====================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| zc.intid | /zc.intid-2.1.0.tar.gz/zc.intid-2.1.0/docs/index.rst | index.rst |
=============================
Lifecycle Event Subscribers
=============================
.. automodule:: zc.intid.subscribers
:no-members:
.. _configuring:
Configuring
===========
To configure, you need to include ``subscribers.zcml``, while being
careful about how ``zope.intid`` is configured:
.. code-block:: xml
<!-- configure.zcml -->
<!--
If we load zope.intid, we get subscribers for the Object events
that ensure all ILocation objects are registered/unregistered when
they are added/removed, plus another set of events when they
get/lose intids. This second set of events is meant to update
zope.catalog. A consequence of this is that ILocation objects must
be adaptable to KeyReferences when they are ObjectAdded (for
purposes of zope.intid, which we don't care about, but this also
ensures that they have ZODB Connections, which is good).
We cannot use these subscribers as-is due to the way the use IKeyReference
and try to register that. However, our subscribers *do* make sure that
the given objects can be adapted to IKeyReference because that's useful and
may be required by catalogs or other subscribers.
-->
<exclude package="zope.intid" file="subscribers.zcml" />
<include package="zope.intid" />
<!-- Make sure the default IKeyReference adapters are in place -->
<include package="zope.keyreference" />
<include package="zc.intid" />
<!--
Make zc.intid utilities compatible with zope.intid utilities.
-->
<include package="zc.intid" file="zope-intid.zcml" />
<!-- To hook them up to the Object events, we need to include the file -->
<include package="zc.intid" file="subscribers.zcml" />
KeyReferences and zope.intid
============================
These subscribers do not register/unregister a :class:`~zope.keyreference.IKeyReference`
with the intid utilities. Instead, it registers the actual object, and the
events that are broadcast are broadcast holding the actual object.
``IKeyReferenceces``, especially
:class:`~zope.keyreference.persistent.KeyReferenceToPersistent`, are
used for a few reasons. First, they provide a stable,
object-identity-based pointer to objects. To be identity based, this
pointer is independent of the equality and hashing algorithms of the
underlying object. Identity-based comparisons are necessary for the
classic :mod:`zope.intid` utility implementation which uses a second
``OIBTree`` to maintain the backreferece from object to assigned intid
(clearly you don't want two non-identical objects which happen to
compare equally *now* to get the same intid as that condition may
change). Likewise, these references are all defined to be mutually
comparable, no matter how they are implemented, a condition necessary
for them to all work together in a ``OIBTree``. Lastly, these
references are meant to be comparable during ZODB conflict resolution
(the original persistent objects probably won't be), which, again, is
a condition of the implementation using a ``OIBTree.``
A consequence of avoiding these references is that generally
persistent objects that are expected to have intids assigned *should
not* be used as keys in an ``OxBTree`` or stored in an ``OOSet.``
Instead, all such data structures *should* use the integer
variations (e.g., ``IISet``), with the intid as the key.
Subscriber Functions
====================
.. autofunction:: zc.intid.subscribers.addIntIdSubscriber
.. autofunction:: zc.intid.subscribers.removeIntIdSubscriber
.. autofunction:: zc.intid.subscribers.intIdEventNotify
| zc.intid | /zc.intid-2.1.0.tar.gz/zc.intid-2.1.0/docs/subscribers.rst | subscribers.rst |
======================================================
A zc.monitor plugin for testing whether function hangs
======================================================
Somtimes, computation stops and it can be hard to find out why. Tools
like strace can be helpful, but are very low level. If a call hangs
calling external network services, all you might see is a select or
poll call and not what serveice was being called.
Isithanging provides a simple registry and a helper function for
registering and unregistering calls. To illustrate how this, we'll
use a test function that blocks until we unblock it by setting an
event:
>>> import zc.isithanging.tests
>>> event, blocker = zc.isithanging.tests.create_blocker()
The blocker function just returns any arguments it was passed.
To check whether a function is blocking, we use ``zc.isinhanging.run`` to
run the function. We'll do so here in a thread:
>>> import zc.thread
>>> @zc.thread.Thread
... def thread():
... print zc.isithanging.run(blocker, 1, foo=2)
There's also a decorator that takes wraps a function and takes care of
calling ``run``.
Let's create seome more jobs:
>>> e1, b1 = zc.isithanging.tests.create_blocker()
>>> suspect = zc.isithanging.suspect(b1)
>>> @zc.thread.Thread
... def t1():
... print suspect(1)
.. metadata
>>> for name in '__name__', '__module__', '__code__', '__defaults__':
... if not getattr(suspect, name) is getattr(b1, name):
... print(name)
Above, we used the suspect decorator as a function (rather than with
decorator syntax.)
>>> e2, b2 = zc.isithanging.tests.create_blocker()
>>> @zc.thread.Thread
... def t2():
... print zc.isithanging.run(b2, 2)
.. Give a little time for the threads to start:
>>> import time; time.sleep(.1)
.. Some time passes:
>>> timetime += 1
We can see what's running by looking at ``zc.isithanging.running``:
>>> now = time.time()
>>> for r in zc.isithanging.running:
... print r.show(now)
Sun Nov 16 09:48:29 2014 1s <function f at 0x10251e500> (1,) {'foo': 2}
Sun Nov 16 09:48:29 2014 1s <function f at 0x10251e9b0> (1,) {}
Sun Nov 16 09:48:29 2014 1s <function f at 0x10251eb18> (2,) {}
The show function shows start time, elapsed time in seconds, function
and arguments.
.. Some time passes:
>>> timetime += 1
When a job stops, it's automatically unregistered:
>>> e1.set(); t1.join()
((1,), {})
>>> for r in zc.isithanging.running:
... print r
Sun Nov 16 09:48:29 2014 2s <function f at 0x102d1e500> (1,) {'foo': 2}
Sun Nov 16 09:48:29 2014 2s <function f at 0x102d1eb18> (2,) {}
There's a zc.monitor command that prints the jobs:
>>> import sys
>>> zc.isithanging.isithanging(sys.stdout)
Sun Nov 16 09:48:29 2014 2s <function f at 0x102d1e500> (1,) {'foo': 2}
Sun Nov 16 09:48:29 2014 2s <function f at 0x102d1eb18> (2,) {}
Let's finish the jobs and try again:
>>> event.set(); thread.join()
((1,), {'foo': 2})
>>> e2.set(); t2.join()
((2,), {})
>>> zc.isithanging.isithanging(sys.stdout)
=======
Changes
=======
0.3.0 (2014-11-17)
==================
Added a "suspect" decorator to decorate functions suspected of hanging.
0.2.0 (2014-11-17)
==================
(Accidental re-release of 0.1.)
0.1.0 (2014-11-17)
==================
Initial release
| zc.isithanging | /zc.isithanging-0.3.0.tar.gz/zc.isithanging-0.3.0/README.rst | README.rst |
==========================
ISO 8601 utility functions
==========================
This package collects together functions supporting the data formats described
in ISO 8601. Time zone support is provided by the ``pytz`` package.
The following functions are provided in the ``zc.iso8601.parse`` module:
``date(s)``
Parse a date value that does not include time information.
Returns a Python date value.
``datetime(s)``
Parse a date-time value that does not include time-zone information.
Returns a Python datetime value.
``datetimetz(s)``
Parse a date-time value that includes time-zone information. Returns a
Python datetime value in the UTC timezone.
| zc.iso8601 | /zc.iso8601-0.2.0.tar.gz/zc.iso8601-0.2.0/README.txt | README.txt |
__docformat__ = "reStructuredText"
# We have to use import-as since we mask the module name.
import datetime as _datetime
import pytz
import re
# "Verbose" ISO 8601, with hyphens and colons:
_date_re1 = """\
(?P<year>\d\d\d\d)
-(?P<month>\d\d)
-(?P<day>\d\d)
"""
# "Compact" ISO 8601, without hyphens and colons:
_date_re2 = """\
(?P<year>\d\d\d\d)
(?P<month>\d\d)
(?P<day>\d\d)
"""
_date_rx1 = re.compile(_date_re1 + "$", re.VERBOSE)
_date_rx2 = re.compile(_date_re2 + "$", re.VERBOSE)
_date_rxs = [_date_rx1, _date_rx2]
_tz_re = "(?:Z|(?P<tzdir>[-+])(?P<tzhour>\d\d):(?P<tzmin>\d\d))$"
# "Verbose" ISO 8601, with hyphens and colons:
_datetime_re1 = _date_re1 + """\
[T\ ]
(?P<hour>\d\d)
:(?P<minute>\d\d)
(?::(?P<second>\d\d(?:\.\d+)?))?
"""
_datetimetz_re1 = _datetime_re1 + _tz_re
_datetime_re1 += "$"
# "Compact" ISO 8601, without hyphens and colons:
_datetime_re2 = _date_re2 + """\
[T\ ]
(?P<hour>\d\d)
(?P<minute>\d\d)
(?P<second>\d\d(?:\.\d+)?)?
"""
_datetimetz_re2 = _datetime_re2 + _tz_re.replace("):(", "):?(")
_datetime_re2 += "$"
_datetime_rx1 = re.compile(_datetime_re1, re.VERBOSE)
_datetime_rx2 = re.compile(_datetime_re2, re.VERBOSE)
_datetime_rxs = [_datetime_rx1, _datetime_rx2]
_datetimetz_rx1 = re.compile(_datetimetz_re1, re.VERBOSE)
_datetimetz_rx2 = re.compile(_datetimetz_re2, re.VERBOSE)
_datetimetz_rxs = [_datetimetz_rx1, _datetimetz_rx2]
def date(string):
"""Parse an ISO 8601 date without time information.
Returns a Python date object.
"""
m = _find_match(string, _date_rxs, "date")
year, month, day = map(int, m.group("year", "month", "day"))
return _datetime.date(year, month, day)
def datetime(string):
"""Parse an ISO 8601 date without timezone information.
Returns a Python datetime object.
"""
m = _find_match(string, _datetime_rxs)
parts = _get_datetime_parts(m)
return _datetime.datetime(*parts)
def datetimetz(string):
"""Parse an ISO 8601 date including timezone information.
Returns a Python datetime object.
"""
m = _find_match(string, _datetimetz_rxs)
parts = _get_datetime_parts(m)
year, month, day, hour, minute, second, microsecond = parts
if m.group("tzhour"):
tzhour, tzmin = map(int, m.group("tzhour", "tzmin"))
offset = (tzhour * 60) + tzmin
if m.group("tzdir") == "-":
offset *= -1
if offset:
tzinfo = pytz.FixedOffset(offset)
dt = _datetime.datetime(
year, month, day, hour, minute, second, microsecond,
tzinfo=tzinfo)
return dt.astimezone(pytz.UTC)
return _datetime.datetime(
year, month, day, hour, minute, second, microsecond,
tzinfo=pytz.UTC)
def _find_match(string, rxs, what="datetime"):
string = " ".join(string.split())
for rx in rxs:
m = rx.match(string)
if m is not None:
return m
raise ValueError("could not parse ISO 8601 %s: %r" % (what, string))
def _get_datetime_parts(m):
year, month, day, hour, minute = map(
int, m.group("year", "month", "day", "hour", "minute"))
second = 0
microsecond = 0
s = m.group("second")
if s:
try:
second = int(s)
except ValueError:
seconds = float(s)
second = int(seconds)
# We figure out microseconds this way to avoid floating-point
# issues. Anything smaller than one microsecond is simply thrown
# away.
fractional = s.split(".")[1]
while len(fractional) < 6:
fractional += "0"
fractional = fractional[:6]
microsecond = int(fractional)
return year, month, day, hour, minute, second, microsecond | zc.iso8601 | /zc.iso8601-0.2.0.tar.gz/zc.iso8601-0.2.0/src/zc/iso8601/parse.py | parse.py |
==========================
ISO 8601 utility functions
==========================
This package collects together functions supporting the data formats described
in ISO 8601.
For the parsing functions, both the "verbose" and "short" forms of ISO 8601
times are accepted. The verbose form includes hyphens in the date and colons
in the time, and the short form omits both. For each function, we'll start
with verbose examples, and will then repeat all of the examples in short form.
The verbose form is generally preferred in practice since it is substantially
more readable for humans.
Parsing date values
-------------------
There's a simple function that parses a date:
>>> from zc.iso8601.parse import date
This function only accepts a date; no time information may be included:
>>> date(u"2006-12-02T23:40:42")
Traceback (most recent call last):
ValueError: could not parse ISO 8601 date: u'2006-12-02T23:40:42'
>>> date(u"2006-12-02T23:40:42Z")
Traceback (most recent call last):
ValueError: could not parse ISO 8601 date: u'2006-12-02T23:40:42Z'
>>> date(u"2006-12-02T23:40:42+00:00")
Traceback (most recent call last):
ValueError: could not parse ISO 8601 date: u'2006-12-02T23:40:42+00:00'
>>> date(u"2006-12-02T23:40:42-00:00")
Traceback (most recent call last):
ValueError: could not parse ISO 8601 date: u'2006-12-02T23:40:42-00:00'
>>> date(u"2006-12-02T23:40:42-01:00")
Traceback (most recent call last):
ValueError: could not parse ISO 8601 date: u'2006-12-02T23:40:42-01:00'
A date without time information is parsed as expected:
>>> date(u"2011-10-10")
datetime.date(2011, 10, 10)
>>> date(u"20111010")
datetime.date(2011, 10, 10)
>>> date(u"0001-01-01")
datetime.date(1, 1, 1)
>>> date(u"00010101")
datetime.date(1, 1, 1)
>>> date(u"9999-12-31")
datetime.date(9999, 12, 31)
>>> date(u"99991231")
datetime.date(9999, 12, 31)
Surrounding whitespace is ignored:
>>> date(u"\t\n\r 2011-10-10 \r\n\t")
datetime.date(2011, 10, 10)
>>> date(u"\t\n\r 20111010 \r\n\t")
datetime.date(2011, 10, 10)
Embedded whitespace is not:
>>> date("2011 10 10")
Traceback (most recent call last):
ValueError: could not parse ISO 8601 date: '2011 10 10'
Parsing date/time values
------------------------
There is a function that parses text and returns date/time values:
>>> from zc.iso8601.parse import datetime
This function does not support or accept values that include time zone
information:
>>> datetime(u"2006-12-02T23:40:42Z")
Traceback (most recent call last):
ValueError: could not parse ISO 8601 datetime: u'2006-12-02T23:40:42Z'
>>> datetime(u"2006-12-02T23:40:42+00:00")
Traceback (most recent call last):
ValueError: could not parse ISO 8601 datetime: u'2006-12-02T23:40:42+00:00'
>>> datetime(u"2006-12-02T23:40:42-00:00")
Traceback (most recent call last):
ValueError: could not parse ISO 8601 datetime: u'2006-12-02T23:40:42-00:00'
>>> datetime(u"2006-12-02T23:40:42-01:00")
Traceback (most recent call last):
ValueError: could not parse ISO 8601 datetime: u'2006-12-02T23:40:42-01:00'
For times that don't include zone offsets, the results are as expected:
>>> datetime(u"2006-12-02T23:40:42")
datetime.datetime(2006, 12, 2, 23, 40, 42)
The seconds field, as shown above, is optional. If omitted, the seconds field
of the time will be zero:
>>> datetime(u"2006-12-02T23:40")
datetime.datetime(2006, 12, 2, 23, 40)
When the seconds are specified, fractional seconds are supported:
>>> datetime(u"2008-05-12T14:30:32.000")
datetime.datetime(2008, 5, 12, 14, 30, 32)
>>> datetime(u"2008-05-12T14:30:32.5")
datetime.datetime(2008, 5, 12, 14, 30, 32, 500000)
>>> datetime(u"2008-05-12T14:30:32.01")
datetime.datetime(2008, 5, 12, 14, 30, 32, 10000)
>>> datetime(u"2008-05-12T14:30:32.000001")
datetime.datetime(2008, 5, 12, 14, 30, 32, 1)
Fractional seconds smaller than 1 microsecond are simply thrown away:
>>> datetime(u"2008-05-12T14:30:32.00000099999")
datetime.datetime(2008, 5, 12, 14, 30, 32)
If a space is used instead of the "T" separator, the input is still
interpreted properly:
>>> datetime(u"2006-12-02 23:40:42")
datetime.datetime(2006, 12, 2, 23, 40, 42)
>>> datetime(u"2008-05-12 14:30:32.01")
datetime.datetime(2008, 5, 12, 14, 30, 32, 10000)
Surrounding whitespace is ignored, and multiple whitespace characters between
the date and time fields is collapsed and treated as if the extra whitespace
characters were not present:
>>> datetime(u"""
... 2006-12-02
... \t\r\f
... 23:40:42
... """)
datetime.datetime(2006, 12, 2, 23, 40, 42)
>>> datetime(u"""
... 2008-05-12
... \t\r\f
... 14:30:32.01
... """)
datetime.datetime(2008, 5, 12, 14, 30, 32, 10000)
Other whitespace is considered an error:
>>> datetime(u" 2006 -12-02 23:40:42 ")
Traceback (most recent call last):
ValueError: could not parse ISO 8601 datetime: u'2006 -12-02 23:40:42'
Now, let's look at how the same examples do in the short form:
>>> datetime(u"20061202T234042Z")
Traceback (most recent call last):
ValueError: could not parse ISO 8601 datetime: u'20061202T234042Z'
>>> datetime(u"20061202T234042+0000")
Traceback (most recent call last):
ValueError: could not parse ISO 8601 datetime: u'20061202T234042+0000'
>>> datetime(u"20061202T234042-0000")
Traceback (most recent call last):
ValueError: could not parse ISO 8601 datetime: u'20061202T234042-0000'
>>> datetime(u"20061202T234042-0100")
Traceback (most recent call last):
ValueError: could not parse ISO 8601 datetime: u'20061202T234042-0100'
>>> datetime(u"20061202T234042")
datetime.datetime(2006, 12, 2, 23, 40, 42)
>>> datetime(u"20061202T2340")
datetime.datetime(2006, 12, 2, 23, 40)
>>> datetime(u"20080512T143032.000")
datetime.datetime(2008, 5, 12, 14, 30, 32)
>>> datetime(u"20080512T143032.5")
datetime.datetime(2008, 5, 12, 14, 30, 32, 500000)
>>> datetime(u"20080512T143032.01")
datetime.datetime(2008, 5, 12, 14, 30, 32, 10000)
>>> datetime(u"20080512T143032.000001")
datetime.datetime(2008, 5, 12, 14, 30, 32, 1)
>>> datetime(u"20080512T143032.00000099999")
datetime.datetime(2008, 5, 12, 14, 30, 32)
>>> datetime(u"20061202 234042")
datetime.datetime(2006, 12, 2, 23, 40, 42)
>>> datetime(u"20080512 143032.01")
datetime.datetime(2008, 5, 12, 14, 30, 32, 10000)
>>> datetime(u"""
... 20061202
... \t\r\f
... 234042
... """)
datetime.datetime(2006, 12, 2, 23, 40, 42)
>>> datetime(u"""
... 20080512
... \t\r\f
... 143032.01
... """)
datetime.datetime(2008, 5, 12, 14, 30, 32, 10000)
>>> datetime(u" 2006 1202 234042 ")
Traceback (most recent call last):
ValueError: could not parse ISO 8601 datetime: u'2006 1202 234042'
Parsing date/time values with time zone information
---------------------------------------------------
There is a function that parses text and returns date/time values with time
zone offsets:
>>> from zc.iso8601.parse import datetimetz
Times in UTC may be encoded using either the "Z" notation or "+00:00" (or
"-00:00"). Let try a few examples:
>>> datetimetz(u"2006-12-02T23:40:42Z")
datetime.datetime(2006, 12, 2, 23, 40, 42, tzinfo=<UTC>)
>>> datetimetz(u"2006-12-02T23:40:42+00:00")
datetime.datetime(2006, 12, 2, 23, 40, 42, tzinfo=<UTC>)
>>> datetimetz(u"2006-12-02T23:40:42-00:00")
datetime.datetime(2006, 12, 2, 23, 40, 42, tzinfo=<UTC>)
The time zone information must be given explicitly, however; it cannot be
omitted:
>>> datetimetz(u"2006-12-02T23:40:42")
Traceback (most recent call last):
ValueError: could not parse ISO 8601 datetime: u'2006-12-02T23:40:42'
Other time zones are converted to UTC:
>>> datetimetz(u"2006-12-02T23:40:42-01:00")
datetime.datetime(2006, 12, 3, 0, 40, 42, tzinfo=<UTC>)
>>> datetimetz(u"2006-12-02T23:40:42-04:00")
datetime.datetime(2006, 12, 3, 3, 40, 42, tzinfo=<UTC>)
>>> datetimetz(u"2006-12-02T23:40:42-05:00")
datetime.datetime(2006, 12, 3, 4, 40, 42, tzinfo=<UTC>)
>>> datetimetz(u"2006-12-02T23:40:42+01:00")
datetime.datetime(2006, 12, 2, 22, 40, 42, tzinfo=<UTC>)
We'll even make up a few that have non-zero values for the minutes portion of
the offset. While these are made-up time zones, there are real time zones
that aren't exactly some interger number of hours offset from UTC:
>>> datetimetz(u"2006-12-02T23:40:42-05:25")
datetime.datetime(2006, 12, 3, 5, 5, 42, tzinfo=<UTC>)
>>> datetimetz(u"2006-12-02T23:40:42+01:25")
datetime.datetime(2006, 12, 2, 22, 15, 42, tzinfo=<UTC>)
The seconds field, as shown above, is optional. If omitted, the seconds field
of the time will be zero:
>>> datetimetz(u"2006-12-02T23:40Z")
datetime.datetime(2006, 12, 2, 23, 40, tzinfo=<UTC>)
>>> datetimetz(u"2006-12-02T23:40-05:00")
datetime.datetime(2006, 12, 3, 4, 40, tzinfo=<UTC>)
When the seconds are specified, fractional seconds are supported:
>>> datetimetz(u"2008-05-12T14:30:32.000Z")
datetime.datetime(2008, 5, 12, 14, 30, 32, tzinfo=<UTC>)
>>> datetimetz(u"2008-05-12T14:30:32.5Z")
datetime.datetime(2008, 5, 12, 14, 30, 32, 500000, tzinfo=<UTC>)
>>> datetimetz(u"2008-05-12T14:30:32.01Z")
datetime.datetime(2008, 5, 12, 14, 30, 32, 10000, tzinfo=<UTC>)
>>> datetimetz(u"2008-05-12T14:30:32.000001+00:00")
datetime.datetime(2008, 5, 12, 14, 30, 32, 1, tzinfo=<UTC>)
Fractional seconds smaller than 1 microsecond are simply thrown away:
>>> datetimetz(u"2008-05-12T14:30:32.00000099999+00:00")
datetime.datetime(2008, 5, 12, 14, 30, 32, tzinfo=<UTC>)
If a space is used instead of the "T" separator, the input is still
interpreted properly:
>>> datetimetz(u"2006-12-02 23:40:42Z")
datetime.datetime(2006, 12, 2, 23, 40, 42, tzinfo=<UTC>)
>>> datetimetz(u"2008-05-12 14:30:32.01Z")
datetime.datetime(2008, 5, 12, 14, 30, 32, 10000, tzinfo=<UTC>)
Surrounding whitespace is ignored, and multiple whitespace characters between
the date and time fields is collapsed and treated as if the extra whitespace
characters were not present:
>>> datetimetz(u"""
... 2006-12-02
... \t\r\f
... 23:40:42Z
... """)
datetime.datetime(2006, 12, 2, 23, 40, 42, tzinfo=<UTC>)
>>> datetimetz(u"""
... 2008-05-12
... \t\r\f
... 14:30:32.01Z
... """)
datetime.datetime(2008, 5, 12, 14, 30, 32, 10000, tzinfo=<UTC>)
Other whitespace is considered an error:
>>> datetimetz(u" 2006 -12-02 23:40:42Z ")
Traceback (most recent call last):
ValueError: could not parse ISO 8601 datetime: u'2006 -12-02 23:40:42Z'
Now, let's look at how the same examples do in the short form. Note that time
zone offsets given in numeric form continue to include the minus sign; that
carries necessary information, while the hyphens in the date are purely for
human consumption:
>>> datetimetz(u"20061202T234042Z")
datetime.datetime(2006, 12, 2, 23, 40, 42, tzinfo=<UTC>)
>>> datetimetz(u"20061202T234042+0000")
datetime.datetime(2006, 12, 2, 23, 40, 42, tzinfo=<UTC>)
>>> datetimetz(u"20061202T234042-0000")
datetime.datetime(2006, 12, 2, 23, 40, 42, tzinfo=<UTC>)
>>> datetimetz(u"20061202T234042")
Traceback (most recent call last):
ValueError: could not parse ISO 8601 datetime: u'20061202T234042'
>>> datetimetz(u"20061202T234042-0100")
datetime.datetime(2006, 12, 3, 0, 40, 42, tzinfo=<UTC>)
>>> datetimetz(u"20061202T234042-0400")
datetime.datetime(2006, 12, 3, 3, 40, 42, tzinfo=<UTC>)
>>> datetimetz(u"20061202T234042-0500")
datetime.datetime(2006, 12, 3, 4, 40, 42, tzinfo=<UTC>)
>>> datetimetz(u"20061202T234042+0100")
datetime.datetime(2006, 12, 2, 22, 40, 42, tzinfo=<UTC>)
>>> datetimetz(u"20061202T234042-0525")
datetime.datetime(2006, 12, 3, 5, 5, 42, tzinfo=<UTC>)
>>> datetimetz(u"20061202T234042+0125")
datetime.datetime(2006, 12, 2, 22, 15, 42, tzinfo=<UTC>)
>>> datetimetz(u"20061202T2340Z")
datetime.datetime(2006, 12, 2, 23, 40, tzinfo=<UTC>)
>>> datetimetz(u"20061202T2340-0500")
datetime.datetime(2006, 12, 3, 4, 40, tzinfo=<UTC>)
>>> datetimetz(u"20080512T143032.000Z")
datetime.datetime(2008, 5, 12, 14, 30, 32, tzinfo=<UTC>)
>>> datetimetz(u"20080512T143032.5Z")
datetime.datetime(2008, 5, 12, 14, 30, 32, 500000, tzinfo=<UTC>)
>>> datetimetz(u"20080512T143032.01Z")
datetime.datetime(2008, 5, 12, 14, 30, 32, 10000, tzinfo=<UTC>)
>>> datetimetz(u"20080512T143032.000001+0000")
datetime.datetime(2008, 5, 12, 14, 30, 32, 1, tzinfo=<UTC>)
>>> datetimetz(u"20080512T143032.00000099999+00:00")
datetime.datetime(2008, 5, 12, 14, 30, 32, tzinfo=<UTC>)
| zc.iso8601 | /zc.iso8601-0.2.0.tar.gz/zc.iso8601-0.2.0/src/zc/iso8601/README.txt | README.txt |
import itertools
unspecified = object()
class LazyList(object):
def __init__(self, iterable, length=unspecified):
self.length = length
self.iterator = iter(iterable)
try:
self.len = iterable.__len__
except AttributeError:
self.len = unspecified
self.data = []
def __add__(self, other):
return LazyList(itertools.chain(self, other))
def __radd__(self, other):
return LazyList(itertools.chain(self, other))
def __getslice__(self, i1, i2):
result = []
for i in xrange(i1, i2):
try:
result.append(self[i])
except IndexError:
break
return result
def __nonzero__(self):
try:
self[0]
except IndexError:
return False
return True
def __repr__(self):
return '<' + self.__class__.__name__ + ' ' + repr(list(self)) + '>'
def __getitem__(self, index):
i = index
# handle negative indices
if i < 0:
i += len(self)
if i < 0:
raise IndexError, index
# move through the input sequence mapping values until we get to the
# requested index
if len(self.data) <= i:
for x in range(len(self.data), i+1):
try:
self.data.append(self.iterator.next())
except StopIteration:
raise IndexError, index
return self.data[i]
def __len__(self):
if self.length is unspecified:
if self.len is not unspecified:
self.length = self.len()
else:
# This may be expensive, but we don't have a choice, I hope we
# weren't given an infinite iterable.
i = -1
for i, x in enumerate(self):
pass
self.length = i + 1
if self.length is None:
raise RuntimeError('Calling len() on this object is not allowed.')
return self.length | zc.lazylist | /zc.lazylist-0.1.1.tar.gz/zc.lazylist-0.1.1/src/zc/lazylist/__init__.py | __init__.py |
Lock file support
=================
The ZODB lock_file module provides support for creating file system
locks. These are locks that are implemented with lock files and
OS-provided locking facilities. To create a lock, instantiate a
LockFile object with a file name:
>>> import zc.lockfile
>>> lock = zc.lockfile.LockFile('lock')
If we try to lock the same name, we'll get a lock error:
>>> import zope.testing.loggingsupport
>>> handler = zope.testing.loggingsupport.InstalledHandler('zc.lockfile')
>>> try:
... zc.lockfile.LockFile('lock')
... except zc.lockfile.LockError:
... print("Can't lock file")
Can't lock file
.. We don't log failure to acquire.
>>> for record in handler.records: # doctest: +ELLIPSIS
... print(record.levelname+' '+record.getMessage())
To release the lock, use it's close method:
>>> lock.close()
The lock file is not removed. It is left behind:
>>> import os
>>> os.path.exists('lock')
True
Of course, now that we've released the lock, we can create it again:
>>> lock = zc.lockfile.LockFile('lock')
>>> lock.close()
.. Cleanup
>>> import os
>>> os.remove('lock')
Hostname in lock file
=====================
In a container environment (e.g. Docker), the PID is typically always
identical even if multiple containers are running under the same operating
system instance.
Clearly, inspecting lock files doesn't then help much in debugging. To identify
the container which created the lock file, we need information about the
container in the lock file. Since Docker uses the container identifier or name
as the hostname, this information can be stored in the lock file in addition to
or instead of the PID.
Use the ``content_template`` keyword argument to ``LockFile`` to specify a
custom lock file content format:
>>> lock = zc.lockfile.LockFile('lock', content_template='{pid};{hostname}')
>>> lock.close()
If you now inspected the lock file, you would see e.g.:
$ cat lock
123;myhostname
| zc.lockfile | /zc.lockfile-3.0-py3-none-any.whl/zc/lockfile/README.txt | README.txt |
import logging
import os
logger = logging.getLogger("zc.lockfile")
class LockError(Exception):
"""Couldn't get a lock
"""
try:
import fcntl
except ImportError:
try:
import msvcrt
except ImportError:
def _lock_file(file):
raise TypeError('No file-locking support on this platform')
def _unlock_file(file):
raise TypeError('No file-locking support on this platform')
else:
# Windows
def _lock_file(file):
# Lock just the first byte
try:
msvcrt.locking(file.fileno(), msvcrt.LK_NBLCK, 1)
except OSError:
raise LockError("Couldn't lock %r" % file.name)
def _unlock_file(file):
try:
file.seek(0)
msvcrt.locking(file.fileno(), msvcrt.LK_UNLCK, 1)
except OSError:
raise LockError("Couldn't unlock %r" % file.name)
else:
# Unix
_flags = fcntl.LOCK_EX | fcntl.LOCK_NB
def _lock_file(file):
try:
fcntl.flock(file.fileno(), _flags)
except OSError:
raise LockError("Couldn't lock %r" % file.name)
def _unlock_file(file):
fcntl.flock(file.fileno(), fcntl.LOCK_UN)
class LazyHostName:
"""Avoid importing socket and calling gethostname() unnecessarily"""
def __str__(self):
import socket
return socket.gethostname()
class SimpleLockFile:
_fp = None
def __init__(self, path):
self._path = path
try:
# Try to open for writing without truncation:
fp = open(path, 'r+')
except OSError:
# If the file doesn't exist, we'll get an IO error, try a+
# Note that there may be a race here. Multiple processes
# could fail on the r+ open and open the file a+, but only
# one will get the the lock and write a pid.
fp = open(path, 'a+')
try:
_lock_file(fp)
self._fp = fp
except BaseException:
fp.close()
raise
# Lock acquired
self._on_lock()
fp.flush()
def close(self):
if self._fp is not None:
_unlock_file(self._fp)
self._fp.close()
self._fp = None
def _on_lock(self):
"""
Allow subclasses to supply behavior to occur following
lock acquisition.
"""
class LockFile(SimpleLockFile):
def __init__(self, path, content_template='{pid}'):
self._content_template = content_template
super().__init__(path)
def _on_lock(self):
content = self._content_template.format(
pid=os.getpid(),
hostname=LazyHostName(),
)
self._fp.write(" %s\n" % content)
self._fp.truncate() | zc.lockfile | /zc.lockfile-3.0-py3-none-any.whl/zc/lockfile/__init__.py | __init__.py |
Logger Monitor
==============
The zc.loggermonitor package provides a zc.monitor plugin for getting
and setting logger levels.
>>> import sys, zc.loggermonitor
It is an error to call the monitor without user arguments.
>>> zc.loggermonitor.level(sys.stdout)
Traceback (most recent call last):
...
TypeError: level() takes at least 2 arguments (1 given)
If you pass it a logger name, it returns the current effective level:
>>> zc.loggermonitor.level(sys.stdout, '.')
NOTSET
>>> zc.loggermonitor.level(sys.stdout, 'mylogger')
NOTSET
If you pass a level it sets the level:
>>> zc.loggermonitor.level(sys.stdout, '.', 'INFO')
>>> zc.loggermonitor.level(sys.stdout, '.')
INFO
>>> zc.loggermonitor.level(sys.stdout, 'mylogger')
INFO
You can also pass a numeric value:
>>> zc.loggermonitor.level(sys.stdout, 'mylogger', '5')
>>> zc.loggermonitor.level(sys.stdout, '.')
INFO
>>> zc.loggermonitor.level(sys.stdout, 'mylogger')
Level 5
>>> zc.loggermonitor.level(sys.stdout, 'mylogger', '10')
>>> zc.loggermonitor.level(sys.stdout, '.')
INFO
>>> zc.loggermonitor.level(sys.stdout, 'mylogger')
DEBUG
>>> zc.loggermonitor.level(sys.stdout, 'mylogger', 'NOTSET')
>>> zc.loggermonitor.level(sys.stdout, '.')
INFO
>>> zc.loggermonitor.level(sys.stdout, 'mylogger')
INFO
>>> zc.loggermonitor.level(sys.stdout, '.', 'NOTSET')
>>> zc.loggermonitor.level(sys.stdout, '.')
NOTSET
>>> zc.loggermonitor.level(sys.stdout, 'mylogger')
NOTSET
| zc.loggermonitor | /zc.loggermonitor-0.1.tar.gz/zc.loggermonitor-0.1/src/zc/loggermonitor/README.txt | README.txt |
============
Meta-recipes
============
Buildout recipes provide reusable Python modules for common
configuration tasks. The most widely used recipes tend to provide
low-level functions, like installing eggs or software distributions,
creating configuration files, and so on. The normal recipe framework
is fairly well suited to building these general components.
Full-blown applications may require many, often tens, of parts.
Defining the many parts that make up an application can be tedious and
often entails a lot of repetition. Buildout provides a number of
mechanisms to avoid repetition, including merging of configuration
files and macros, but these, while useful to an extent, don't scale
very well. Buildout isn't and shouldn't be a programming language.
Meta-recipes allow us to bring Python to bear to provide higher-level
abstractions for buildouts.
A meta-recipe is a regular Python recipe that primarily operates by
creating parts. A meta recipe isn't merely a high level recipe. It's
a recipe that defers most of it's work to lower-level recipe by
manipulating the buildout database.
Unfortunately, buildout doesn't yet provide a high-level API for
creating parts. It has a private low-level API which has been
promoted to public (meaning it won't be broken by future release), and
it's straightforward to write the needed high-level API, but it's
annoying to repeat the high-level API in each meta recipe.
This small package provides the high-level API needed for meta recipes
and a simple testing framework. It will be merged into a future
buildout release.
A `presentation at PyCon 2011
<http://blip.tv/pycon-us-videos-2009-2010-2011/pycon-2011-deploying-applications-with-zc-buildout-4897770>`_
described early work with meta recipes.
.. contents::
A simple meta-recipe example
============================
Let's look at a fairly simple meta-recipe example. First, consider a
buildout configuration that builds a database deployment::
[buildout]
parts = ctl pack
[deployment]
recipe = zc.recipe.deployment
name = ample
user = zope
[ctl]
recipe = zc.recipe.rhrc
deployment = deployment
chkconfig = 345 99 10
parts = main
[main]
recipe = zc.zodbrecipes:server
deployment = deployment
address = 8100
path = /var/databases/ample/main.fs
zeo.conf =
<zeo>
address ${:address}
</zeo>
%import zc.zlibstorage
<zlibstorage>
<filestorage>
path ${:path}
</filestorage>
</zlibstorage>
[pack]
recipe = zc.recipe.deployment:crontab
deployment = deployment
times = 1 2 * * 6
command = ${buildout:bin-directory}/zeopack -d3 -t00 ${main:address}
.. -> low_level
This buildout doesn't build software. Rather it builds configuration
for deploying a database configuration using already-deployed
software. For the purpose of this document, however, the details are
totally unimportant.
Rather than crafting the configuration above every time, we can write
a meta-recipe that crafts it for us. We'll use our meta-recipe as
follows::
[buildout]
parts = ample
[ample]
recipe = com.example.ample:db
path = /var/databases/ample/main.fs
The idea here is that the meta recipe allows us to specify the minimal
information necessary. A meta-recipe often automates policies and
assumptions that are application and organization dependent. The
example above assumes, for example, that we want to pack to 3
days in the past on Saturdays.
So now, let's see the meta recipe that automates this::
import zc.metarecipe
class Recipe(zc.metarecipe.Recipe):
def __init__(self, buildout, name, options):
super(Recipe, self).__init__(buildout, name, options)
self.parse('''
[deployment]
recipe = zc.recipe.deployment
name = %s
user = zope
''' % name)
self['main'] = dict(
recipe = 'zc.zodbrecipes:server',
deployment = 'deployment',
address = 8100,
path = options['path'],
**{
'zeo.conf': '''
<zeo>
address ${:address}
</zeo>
%import zc.zlibstorage
<zlibstorage>
<filestorage>
path ${:path}
</filestorage>
</zlibstorage>
'''}
)
self.parse('''
[pack]
recipe = zc.recipe.deployment:crontab
deployment = deployment
times = 1 2 * * 6
command =
${buildout:bin-directory}/zeopack -d3 -t00 ${main:address}
[ctl]
recipe = zc.recipe.rhrc
deployment = deployment
chkconfig = 345 99 10
parts = main
''')
.. -> source
>>> exec source
The meta recipe just adds parts to the buildout. It does this by
calling inherited __setitem__ and ``parse`` methods. The ``parse``
method just takes a string in ``ConfigParser`` syntax. It's useful
when we want to add static, or nearly static part data. The setitem
syntax is useful when we have non-trivial computation for part data.
The order that we add parts is important. When adding a part, any
string substitutions and other dependencies are evaluated, so the
referenced parts must be defined first. This is why, for example, the
``pack`` part is added after the ``main`` part.
Note that the meta recipe supplied an integer for one of the
options. In addition to strings, it's legal to supply integer and
unicode values.
Testing
=======
Now, let's test it. We'll test it without actually running
buildout. Rather, we'll use a faux buildout provided by the
zc.metarecipe.testing module.
>>> import zc.metarecipe.testing
>>> buildout = zc.metarecipe.testing.Buildout()
>>> _ = Recipe(buildout, 'ample', dict(path='/var/databases/ample/main.fs'))
[deployment]
name = ample
recipe = zc.recipe.deployment
user = zope
[main]
address = 8100
deployment = deployment
path = /var/databases/ample/main.fs
recipe = zc.zodbrecipes:server
zeo.conf = <zeo>
address ${:address}
</zeo>
<BLANKLINE>
%import zc.zlibstorage
<BLANKLINE>
<zlibstorage>
<filestorage>
path ${:path}
</filestorage>
</zlibstorage>
[ctl]
chkconfig = 345 99 10
deployment = deployment
parts = main
recipe = zc.recipe.rhrc
[pack]
command = ${buildout:bin-directory}/zeopack -d3 -t00 ${main:address}
deployment = deployment
recipe = zc.recipe.deployment:crontab
times = 1 2 * * 6
When we call our recipe, it will add sections to the test buildout and
these are simply printed as added, so we can verify that the correct
data was generated.
That's pretty much it.
Changes
=======
0.2.1 (2014-01-24)
------------------
- Fixed: When parsing configuration text, sections were input and
evaluated at the same time in section sorted order. This
caused problems if a section that sorted early referred to a
section that sorted late.
0.2.0 (2012-09-24)
------------------
- When setting option values, unicode and int values will be converted
to strings. Other non-string values are rejected. Previously, it
was easy to get errors from buildout when setting options with
values read from ZooKeeper trees, which are unicode due to the use
of JSON.
- Fixed: When using the meta-recipe parse method, the order that
resulting sections were added was non-deterministic, due to the
way ConfigParser works. Not sections are added to a buildout
in sortd order, by section name.
0.1.0 (2012-05-31)
------------------
Initial release
| zc.metarecipe | /zc.metarecipe-0.2.1.tar.gz/zc.metarecipe-0.2.1/README.txt | README.txt |
============
Meta-recipes
============
Buildout recipes provide reusable Python modules for common
configuration tasks. The most widely used recipes tend to provide
low-level functions, like installing eggs or software distributions,
creating configuration files, and so on. The normal recipe framework
is fairly well suited to building these general components.
Full-blown applications may require many, often tens, of parts.
Defining the many parts that make up an application can be tedious and
often entails a lot of repetition. Buildout provides a number of
mechanisms to avoid repetition, including merging of configuration
files and macros, but these, while useful to an extent, don't scale
very well. Buildout isn't and shouldn't be a programming language.
Meta-recipes allow us to bring Python to bear to provide higher-level
abstractions for buildouts.
A meta-recipe is a regular Python recipe that primarily operates by
creating parts. A meta recipe isn't merely a high level recipe. It's
a recipe that defers most of it's work to lower-level recipe by
manipulating the buildout database.
Unfortunately, buildout doesn't yet provide a high-level API for
creating parts. It has a private low-level API which has been
promoted to public (meaning it won't be broken by future release), and
it's straightforward to write the needed high-level API, but it's
annoying to repeat the high-level API in each meta recipe.
This small package provides the high-level API needed for meta recipes
and a simple testing framework. It will be merged into a future
buildout release.
A `presentation at PyCon 2011
<http://blip.tv/pycon-us-videos-2009-2010-2011/pycon-2011-deploying-applications-with-zc-buildout-4897770>`_
described early work with meta recipes.
.. contents::
A simple meta-recipe example
============================
Let's look at a fairly simple meta-recipe example. First, consider a
buildout configuration that builds a database deployment::
[buildout]
parts = ctl pack
[deployment]
recipe = zc.recipe.deployment
name = ample
user = zope
[ctl]
recipe = zc.recipe.rhrc
deployment = deployment
chkconfig = 345 99 10
parts = main
[main]
recipe = zc.zodbrecipes:server
deployment = deployment
address = 8100
path = /var/databases/ample/main.fs
zeo.conf =
<zeo>
address ${:address}
</zeo>
%import zc.zlibstorage
<zlibstorage>
<filestorage>
path ${:path}
</filestorage>
</zlibstorage>
[pack]
recipe = zc.recipe.deployment:crontab
deployment = deployment
times = 1 2 * * 6
command = ${buildout:bin-directory}/zeopack -d3 -t00 ${main:address}
.. -> low_level
This buildout doesn't build software. Rather it builds configuration
for deploying a database configuration using already-deployed
software. For the purpose of this document, however, the details are
totally unimportant.
Rather than crafting the configuration above every time, we can write
a meta-recipe that crafts it for us. We'll use our meta-recipe as
follows::
[buildout]
parts = ample
[ample]
recipe = com.example.ample:db
path = /var/databases/ample/main.fs
The idea here is that the meta recipe allows us to specify the minimal
information necessary. A meta-recipe often automates policies and
assumptions that are application and organization dependent. The
example above assumes, for example, that we want to pack to 3
days in the past on Saturdays.
So now, let's see the meta recipe that automates this::
import zc.metarecipe
class Recipe(zc.metarecipe.Recipe):
def __init__(self, buildout, name, options):
super(Recipe, self).__init__(buildout, name, options)
self.parse('''
[deployment]
recipe = zc.recipe.deployment
name = %s
user = zope
''' % name)
self['main'] = dict(
recipe = 'zc.zodbrecipes:server',
deployment = 'deployment',
address = 8100,
path = options['path'],
**{
'zeo.conf': '''
<zeo>
address ${:address}
</zeo>
%import zc.zlibstorage
<zlibstorage>
<filestorage>
path ${:path}
</filestorage>
</zlibstorage>
'''}
)
self.parse('''
[pack]
recipe = zc.recipe.deployment:crontab
deployment = deployment
times = 1 2 * * 6
command =
${buildout:bin-directory}/zeopack -d3 -t00 ${main:address}
[ctl]
recipe = zc.recipe.rhrc
deployment = deployment
chkconfig = 345 99 10
parts = main
''')
.. -> source
>>> exec source
The meta recipe just adds parts to the buildout. It does this by
calling inherited __setitem__ and ``parse`` methods. The ``parse``
method just takes a string in ``ConfigParser`` syntax. It's useful
when we want to add static, or nearly static part data. The setitem
syntax is useful when we have non-trivial computation for part data.
The order that we add parts is important. When adding a part, any
string substitutions and other dependencies are evaluated, so the
referenced parts must be defined first. This is why, for example, the
``pack`` part is added after the ``main`` part.
Note that the meta recipe supplied an integer for one of the
options. In addition to strings, it's legal to supply integer and
unicode values.
Testing
=======
Now, let's test it. We'll test it without actually running
buildout. Rather, we'll use a faux buildout provided by the
zc.metarecipe.testing module.
>>> import zc.metarecipe.testing
>>> buildout = zc.metarecipe.testing.Buildout()
>>> _ = Recipe(buildout, 'ample', dict(path='/var/databases/ample/main.fs'))
[deployment]
name = ample
recipe = zc.recipe.deployment
user = zope
[main]
address = 8100
deployment = deployment
path = /var/databases/ample/main.fs
recipe = zc.zodbrecipes:server
zeo.conf = <zeo>
address ${:address}
</zeo>
<BLANKLINE>
%import zc.zlibstorage
<BLANKLINE>
<zlibstorage>
<filestorage>
path ${:path}
</filestorage>
</zlibstorage>
[ctl]
chkconfig = 345 99 10
deployment = deployment
parts = main
recipe = zc.recipe.rhrc
[pack]
command = ${buildout:bin-directory}/zeopack -d3 -t00 ${main:address}
deployment = deployment
recipe = zc.recipe.deployment:crontab
times = 1 2 * * 6
When we call our recipe, it will add sections to the test buildout and
these are simply printed as added, so we can verify that the correct
data was generated.
That's pretty much it.
Changes
=======
0.2.1 (2014-01-24)
------------------
- Fixed: When parsing configuration text, sections were input and
evaluated at the same time in section sorted order. This
caused problems if a section that sorted early referred to a
section that sorted late.
0.2.0 (2012-09-24)
------------------
- When setting option values, unicode and int values will be converted
to strings. Other non-string values are rejected. Previously, it
was easy to get errors from buildout when setting options with
values read from ZooKeeper trees, which are unicode due to the use
of JSON.
- Fixed: When using the meta-recipe parse method, the order that
resulting sections were added was non-deterministic, due to the
way ConfigParser works. Not sections are added to a buildout
in sortd order, by section name.
0.1.0 (2012-05-31)
------------------
Initial release
| zc.metarecipe | /zc.metarecipe-0.2.1.tar.gz/zc.metarecipe-0.2.1/src/zc/metarecipe/README.txt | README.txt |
==============
Change History
==============
0.4.0.post1 (2019-12-06)
------------------------
- Fix change log on PyPI.
0.4.0 (2019-12-06)
------------------
- Use new Python 2.6/3.x compatible exception syntax. (This does not mean that
this package is already Python 3 compatible.)
0.3.1 (2012-04-27)
------------------
- When binding the monitor to a Unix-domain socket, remove an existing
socket at the same path so the bind is successful. This may affect
existing usage with respect to zopectl debug behavior, but will be
more predictable.
0.3.0 (2011-12-12)
------------------
- Added a simplified registration interface.
0.2.1 (2011-12-10)
------------------
- Added an ``address`` option to ``start`` to be able to specify an adapter
to bind to.
- ``start`` now returns the address being listened on, which is useful
when binding to port 0.
- Using Python's ``doctest`` module instead of depreacted
``zope.testing.doctest``.
0.2.0 (2009-10-28)
------------------
- Add the "MORE" mode so commands can co-opt user interaction
0.1.2 (2008-09-15)
------------------
- Bugfix: The z3monitor server lacked a handle_close method, which
caused errors to get logged when users closed connections before
giving commands.
0.1.1 (2008-09-14)
------------------
- Bugfix: fixed and added test for regression in displaying tracebacks.
0.1.0 (2008-09-14)
------------------
Initial release
| zc.monitor | /zc.monitor-0.4.0.post1.tar.gz/zc.monitor-0.4.0.post1/CHANGES.rst | CHANGES.rst |
import os
import shutil
import sys
import tempfile
from optparse import OptionParser
__version__ = '2015-07-01'
# See zc.buildout's changelog if this version is up to date.
tmpeggs = tempfile.mkdtemp(prefix='bootstrap-')
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("--version",
action="store_true", default=False,
help=("Return bootstrap.py version."))
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
parser.add_option("--allow-site-packages",
action="store_true", default=False,
help=("Let bootstrap.py use existing site packages"))
parser.add_option("--buildout-version",
help="Use a specific zc.buildout version")
parser.add_option("--setuptools-version",
help="Use a specific setuptools version")
parser.add_option("--setuptools-to-dir",
help=("Allow for re-use of existing directory of "
"setuptools versions"))
options, args = parser.parse_args()
if options.version:
print("bootstrap.py version %s" % __version__)
sys.exit(0)
######################################################################
# load/install setuptools
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
ez = {}
if os.path.exists('ez_setup.py'):
exec(open('ez_setup.py').read(), ez)
else:
exec(urlopen('https://bootstrap.pypa.io/ez_setup.py').read(), ez)
if not options.allow_site_packages:
# ez_setup imports site, which adds site packages
# this will remove them from the path to ensure that incompatible versions
# of setuptools are not in the path
import site
# inside a virtualenv, there is no 'getsitepackages'.
# We can't remove these reliably
if hasattr(site, 'getsitepackages'):
for sitepackage_path in site.getsitepackages():
# Strip all site-packages directories from sys.path that
# are not sys.prefix; this is because on Windows
# sys.prefix is a site-package directory.
if sitepackage_path != sys.prefix:
sys.path[:] = [x for x in sys.path
if sitepackage_path not in x]
setup_args = dict(to_dir=tmpeggs, download_delay=0)
if options.setuptools_version is not None:
setup_args['version'] = options.setuptools_version
if options.setuptools_to_dir is not None:
setup_args['to_dir'] = options.setuptools_to_dir
ez['use_setuptools'](**setup_args)
import setuptools
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
setuptools_path = ws.find(
pkg_resources.Requirement.parse('setuptools')).location
# Fix sys.path here as easy_install.pth added before PYTHONPATH
cmd = [sys.executable, '-c',
'import sys; sys.path[0:0] = [%r]; ' % setuptools_path +
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
requirement = 'zc.buildout'
version = options.buildout_version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
try:
return not parsed_version.is_prerelease
except AttributeError:
# Older setuptools
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd) != 0:
raise Exception(
"Failed to execute command:\n%s" % repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs) | zc.monitor | /zc.monitor-0.4.0.post1.tar.gz/zc.monitor-0.4.0.post1/bootstrap.py | bootstrap.py |
==============
Monitor Server
==============
The monitor server is a server that provides a command-line interface to
request various bits of information. The server is zc.ngi based, so we can use
the zc.ngi testing infrastructure to demonstrate it.
>>> import zc.ngi.testing
>>> import zc.monitor
>>> connection = zc.ngi.testing.TextConnection()
>>> server = zc.monitor.Server(connection)
The server supports an extensible set of commands. It looks up
commands as named zc.monitor.interfaces.IMonitorPlugin "utilities", as defined
by the zope.component package.
To see this, we'll create a hello plugin:
>>> def hello(connection, name='world'):
... """Say hello
...
... Provide a name if you're not the world.
... """
... connection.write("Hi %s, nice to meet ya!\n" % name)
and register it:
>>> zc.monitor.register(hello)
When we register a command, we can provide a name. To see this, we'll
register ``hello`` again:
>>> zc.monitor.register(hello, 'hi')
Now we can give the hello command to the server:
>>> connection.test_input('hi\n')
Hi world, nice to meet ya!
-> CLOSE
We can pass a name:
>>> connection = zc.ngi.testing.TextConnection()
>>> server = zc.monitor.Server(connection)
>>> connection.test_input('hello Jim\n')
Hi Jim, nice to meet ya!
-> CLOSE
The server comes with a few basic commands. Let's register
them so we can see what they do. We'll use the simplfied registration
interface:
>>> zc.monitor.register_basics()
The first is the help command. Giving help without input, gives a
list of available commands:
>>> connection = zc.ngi.testing.TextConnection()
>>> server = zc.monitor.Server(connection)
>>> connection.test_input('help\n')
Supported commands:
hello -- Say hello
help -- Get help about server commands
hi -- Say hello
interactive -- Turn on monitor's interactive mode
quit -- Quit the monitor
-> CLOSE
We can get detailed help by specifying a command name:
>>> connection = zc.ngi.testing.TextConnection()
>>> server = zc.monitor.Server(connection)
>>> connection.test_input('help help\n')
Help for help:
<BLANKLINE>
Get help about server commands
<BLANKLINE>
By default, a list of commands and summaries is printed. Provide
a command name to get detailed documentation for a command.
<BLANKLINE>
-> CLOSE
>>> connection = zc.ngi.testing.TextConnection()
>>> server = zc.monitor.Server(connection)
>>> connection.test_input('help hello\n')
Help for hello:
<BLANKLINE>
Say hello
<BLANKLINE>
Provide a name if you're not the world.
<BLANKLINE>
-> CLOSE
The ``interactive`` command switches the monitor into interactive mode. As
seen above, the monitor usually responds to a single command and then closes
the connection. In "interactive mode", the connection is not closed until
the ``quit`` command is used. This can be useful when accessing the monitor
via telnet for diagnostics.
>>> connection = zc.ngi.testing.TextConnection()
>>> server = zc.monitor.Server(connection)
>>> connection.test_input('interactive\n')
Interactive mode on. Use "quit" To exit.
>>> connection.test_input('help interactive\n')
Help for interactive:
<BLANKLINE>
Turn on monitor's interactive mode
<BLANKLINE>
Normally, the monitor releases the connection after a single command.
By entering the interactive mode, the monitor will not end the connection
until you enter the "quit" command.
<BLANKLINE>
In interactive mode, an empty line repeats the last command.
<BLANKLINE>
>>> connection.test_input('help quit\n')
Help for quit:
<BLANKLINE>
Quit the monitor
<BLANKLINE>
This is only really useful in interactive mode (see the "interactive"
command).
<BLANKLINE>
Notice that the result of the commands did not end with "-> CLOSE", which would
have indicated a closed connection.
Also notice that the interactive mode allows you to repeat commands.
>>> connection.test_input('hello\n')
Hi world, nice to meet ya!
>>> connection.test_input('\n')
Hi world, nice to meet ya!
>>> connection.test_input('hello Jim\n')
Hi Jim, nice to meet ya!
>>> connection.test_input('\n')
Hi Jim, nice to meet ya!
Now we will use ``quit`` to close the connection.
>>> connection.test_input('quit\n')
Goodbye.
-> CLOSE
Finally, it's worth noting that exceptions will generate a
traceback on the connection.
>>> connection = zc.ngi.testing.TextConnection()
>>> server = zc.monitor.Server(connection)
>>> connection.test_input('hello Jim 42\n') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: hello() takes at most 2 arguments (3 given)
<BLANKLINE>
-> CLOSE
.. Edge cases
Closing the connection:
>>> connection.test_close('test')
Command loops
-------------
Using the "MORE" mode, commands can signal that they want to claim all future
user input. We'll implement a silly example to demonstrate how it works.
Here's a command that implements a calculator.
>>> PROMPT = '.'
>>> def calc(connection, *args):
... if args and args[0] == 'quit':
... return zc.monitor.QUIT_MARKER
...
... if args:
... connection.write(str(eval(''.join(args))))
... connection.write('\n')
...
... connection.write(PROMPT)
... return zc.monitor.MORE_MARKER
If we register this command...
>>> zc.monitor.register(calc)
...we can invoke it and we get a prompt.
>>> connection = zc.ngi.testing.TextConnection()
>>> server = zc.monitor.Server(connection)
>>> connection.test_input('calc\n')
.
If we then give it more input we get the result plus another prompt.
>>> connection.test_input('2+2\n')
4
.
>>> connection.test_input('4*2\n')
8
.
Once we're done we can tell the calculator to let us go.
>>> connection.test_input('quit\n')
-> CLOSE
Start server
------------
>>> import time
>>> import zope.testing.loggingsupport, logging
>>> loghandler = zope.testing.loggingsupport.InstalledHandler(
... None, level=logging.INFO)
>>> zc.monitor.start(9644)
('', 9644)
>>> print loghandler
zc.ngi.async.server INFO
listening on ('', 9644)
>>> zc.monitor.last_listener.close()
>>> zc.monitor.last_listener = None
>>> time.sleep(0.1)
>>> loghandler.clear()
>>> zc.monitor.start(('127.0.0.1', 9644))
('127.0.0.1', 9644)
>>> print loghandler
zc.ngi.async.server INFO
listening on ('127.0.0.1', 9644)
>>> zc.monitor.last_listener.close()
>>> zc.monitor.last_listener = None
>>> time.sleep(0.1)
Bind to port 0:
>>> addr = zc.monitor.start(0)
>>> addr == zc.monitor.last_listener.address
True
>>> zc.monitor.last_listener.close()
>>> zc.monitor.last_listener = None
>>> time.sleep(0.1)
Trying to rebind to a port in use:
>>> loghandler.clear()
>>> zc.monitor.start(('127.0.0.1', 9644))
('127.0.0.1', 9644)
>>> zc.monitor.start(('127.0.0.1', 9644))
False
>>> print loghandler
zc.ngi.async.server INFO
listening on ('127.0.0.1', 9644)
zc.ngi.async.server WARNING
unable to listen on ('127.0.0.1', 9644)
root WARNING
unable to start zc.monitor server because the address ('127.0.0.1', 9644) is in use.
>>> zc.monitor.last_listener.close()
>>> zc.monitor.last_listener = None
>>> time.sleep(0.1)
>>> loghandler.uninstall()
| zc.monitor | /zc.monitor-0.4.0.post1.tar.gz/zc.monitor-0.4.0.post1/src/zc/monitor/README.txt | README.txt |
import errno, logging, os, stat, traceback, socket
import zope.component
import zc.monitor.interfaces
INTERACTIVE_MARKER = object()
QUIT_MARKER = object()
MORE_MARKER = object()
class Server:
last_command = None
def __init__(self, connection):
import zc.ngi.adapters
connection = zc.ngi.adapters.Lines(connection)
self.connection = connection
connection.set_handler(self)
self.mode = QUIT_MARKER
def handle_input(self, connection, data):
args = data.strip().split()
if self.mode is MORE_MARKER:
command_name = self.last_command[0]
elif not args:
if self.last_command is not None:
command_name, args = self.last_command
else:
return
else:
command_name = args.pop(0)
self.last_command = (command_name, args)
command = zope.component.queryUtility(
zc.monitor.interfaces.IMonitorPlugin,
command_name)
if command is None:
connection.write(
'Invalid command %r\nTry "help".\n' % command_name)
else:
try:
res = command(connection, *args)
except Exception as v:
traceback.print_exc(100, connection)
print >> connection, "%s: %s\n" % (v.__class__.__name__, v)
else:
if res in (INTERACTIVE_MARKER, QUIT_MARKER, MORE_MARKER):
self.mode = res
if self.mode is QUIT_MARKER:
connection.write(zc.ngi.END_OF_DATA)
def handle_close(self, connection, reason):
pass # Don't care
#testing support
last_listener = None
def start(address):
"""start monitor server.
Returns the listener address (which may be different from the
given address) if monitor server started; returns False if the
port is already in use; and raises an exception otherwise.
"""
import zc.ngi.async
ourAddress = None
if isinstance(address, int):
#a port is passed as int
ourAddress = ('', address)
elif isinstance(address, tuple):
#an (address, port) tuple is passed
ourAddress = address
elif isinstance(address, basestring):
#a unix domain socket string is passed
ourAddress = address
if os.path.exists(ourAddress):
m = os.stat(ourAddress)
if stat.S_ISSOCK(m.st_mode):
os.unlink(ourAddress)
try:
global last_listener
last_listener = zc.ngi.async.listener(ourAddress, Server)
except socket.error as e:
if e.args[0] == errno.EADDRINUSE:
# Don't kill the process just because somebody else has our port.
# This might be a zopectl debug or some other innocuous problem.
# (Existing Unix-domain sockets are removed before binding, so
# this doesn't work that way for those. Use a separate offline
# configuration in that case.)
logging.warning(
'unable to start zc.monitor server because the address %s '\
'is in use.',
ourAddress)
return False
else:
raise
return last_listener.address
# default commands
def interactive(connection):
"""Turn on monitor's interactive mode
Normally, the monitor releases the connection after a single command.
By entering the interactive mode, the monitor will not end the connection
until you enter the "quit" command.
In interactive mode, an empty line repeats the last command.
"""
connection.write('Interactive mode on. Use "quit" To exit.\n')
return INTERACTIVE_MARKER
def quit(connection):
"""Quit the monitor
This is only really useful in interactive mode (see the "interactive"
command).
"""
connection.write('Goodbye.\n')
return QUIT_MARKER
def help(connection, command_name=None):
"""Get help about server commands
By default, a list of commands and summaries is printed. Provide
a command name to get detailed documentation for a command.
"""
if command_name is None:
connection.write(str(
"Supported commands:\n "
+ '\n '.join(sorted(
"%s -- %s" % (name, (u.__doc__ or '?').split('\n', 1)[0])
for (name, u) in
zope.component.getUtilitiesFor(
zc.monitor.interfaces.IMonitorPlugin)))
+ '\n'))
else:
command = zope.component.getUtility(
zc.monitor.interfaces.IMonitorPlugin,
command_name)
connection.write("Help for %s:\n\n%s\n"
% (command_name, command.__doc__)
)
def register(command, name=None):
if name is None:
name = command.__name__
zope.component.provideUtility(
command, zc.monitor.interfaces.IMonitorPlugin, name)
def register_basics():
register(help)
register(interactive)
register(quit) | zc.monitor | /zc.monitor-0.4.0.post1.tar.gz/zc.monitor-0.4.0.post1/src/zc/monitor/__init__.py | __init__.py |
zc.monitorcache
===============
zc.montorcache is a zc.z3monitor plugin that allows one to modify or check
the cache size (in objects or bytes) of a running instance.
>>> import zc.monitorcache
>>> import zope.component
>>> import zc.ngi.testing
>>> import zc.monitor
>>> import zc.monitor.interfaces
>>> import zc.z3monitor
>>> import zc.z3monitor.interfaces
>>> connection = zc.ngi.testing.TextConnection()
>>> server = zc.monitor.Server(connection)
>>> zope.component.provideUtility(zc.monitorcache.cacheMonitor,
... zc.z3monitor.interfaces.IZ3MonitorPlugin, 'cache_size')
>>> connection.test_input('cache_size\n')
-> CLOSE
We have no databases right now. Let's add a few so that we can test.
>>> import ZODB.tests.util
>>> import ZODB.interfaces
>>> main = ZODB.tests.util.DB()
>>> zope.component.provideUtility(main, ZODB.interfaces.IDatabase)
>>> test = ZODB.tests.util.DB()
>>> zope.component.provideUtility(
... test, ZODB.interfaces.IDatabase, 'test')
Now we should get information on each of the database's cache sizes
>>> connection = zc.ngi.testing.TextConnection()
>>> server = zc.monitor.Server(connection)
>>> connection.test_input('cache_size\n')
DB cache sizes for main
Max objects: 400
Max object size bytes: 0MB
DB cache sizes for test
Max objects: 400
Max object size bytes: 0MB
-> CLOSE
We can request information about a specific db as well
>>> connection = zc.ngi.testing.TextConnection()
>>> server = zc.monitor.Server(connection)
>>> connection.test_input('cache_size -\n')
DB cache sizes for main
Max objects: 400
Max object size bytes: 0MB
-> CLOSE
>>> connection = zc.ngi.testing.TextConnection()
>>> server = zc.monitor.Server(connection)
>>> connection.test_input('cache_size test\n')
DB cache sizes for test
Max objects: 400
Max object size bytes: 0MB
-> CLOSE
We can also modify cache sizes for a specific db
>>> connection = zc.ngi.testing.TextConnection()
>>> server = zc.monitor.Server(connection)
>>> connection.test_input('cache_size test 300\n')
Set max objects to 300
-> CLOSE
>>> connection = zc.ngi.testing.TextConnection()
>>> server = zc.monitor.Server(connection)
>>> connection.test_input('cache_size test 10MB\n')
Set max object size bytes to 10MB
-> CLOSE
>>> connection = zc.ngi.testing.TextConnection()
>>> server = zc.monitor.Server(connection)
>>> connection.test_input('cache_size test\n')
DB cache sizes for test
Max objects: 300
Max object size bytes: 10MB
-> CLOSE
| zc.monitorcache | /zc.monitorcache-0.1.0.tar.gz/zc.monitorcache-0.1.0/src/zc/monitorcache/README.txt | README.txt |
zc.z3monitor plugin and log handler for getting Log statistics
==============================================================
zc.monitorlogstats provides a zc.z3monitor plugin and log handler to
track log statistics. The idea is that you can conect to it to find
out how many log entries of various types have been posted. If you
sample it over time, youcan see how many entries are added. In
particular, if you get new warning, error, or critical entries,
someone might want to look at the logs to find out what's going on.
Counting Log Handler
--------------------
Let's start by looking at the log handler. The factory
zc.monitorlogstats.CountingHandler can be installed like any other
handler. It doesn't emit anything. It just counts.
Let's create one to see how it works:
>>> import logging, zc.monitorlogstats
>>> handler = zc.monitorlogstats.CountingHandler()
>>> logging.getLogger().addHandler(handler)
>>> logging.getLogger().setLevel(logging.INFO)
Now, let's log:
>>> for i in range(5):
... logging.getLogger('foo').critical('Yipes')
>>> for i in range(9):
... logging.getLogger('bar').error('oops')
>>> for i in range(12):
... logging.getLogger('baz').warn('hm')
>>> for i in range(21):
... logging.getLogger('foo').info('yawn')
>>> for i in range(99):
... logging.getLogger('xxx').log(5, 'yuck yuck')
We can ask the handler for statistics:
>>> handler.start_time
datetime.datetime(2008, 9, 5, 21, 10, 14)
>>> for level, count, message in handler.statistics:
... print level, count
... print `message`
20 21
'yawn'
30 12
'hm'
40 9
'oops'
50 5
'Yipes'
The statistics consist of the log level, the count of log messages,
and the formatted text of last message.
We can also ask it to clear it's statistics:
>>> handler.clear()
>>> for i in range(3):
... logging.getLogger('foo').critical('Eek')
>>> handler.start_time
datetime.datetime(2008, 9, 5, 21, 10, 15)
>>> for level, count, message in handler.statistics:
... print level, count
... print `message`
50 3
'Eek'
There's ZConfig support for defining counting handlers:
>>> import ZConfig, StringIO
>>> schema = ZConfig.loadSchemaFile(StringIO.StringIO("""
... <schema>
... <import package="ZConfig.components.logger"/>
... <multisection type="logger" attribute="loggers" name="*" required="no">
... </multisection>
... </schema>
... """))
>>> conf, _ = ZConfig.loadConfigFile(schema, StringIO.StringIO("""
... %import zc.monitorlogstats
... <logger>
... name test
... level INFO
... <counter>
... format %(name)s %(message)s
... </counter>
... </logger>
... """))
>>> testhandler = conf.loggers[0]().handlers[0]
>>> for i in range(2):
... logging.getLogger('test').critical('Waaa')
>>> for i in range(22):
... logging.getLogger('test.foo').info('Zzzzz')
>>> for level, count, message in handler.statistics:
... print level, count
... print `message`
20 22
'Zzzzz'
50 5
'Waaa'
>>> for level, count, message in testhandler.statistics:
... print level, count
... print `message`
20 22
'test.foo Zzzzz'
50 2
'test Waaa'
Note that the message output from the test handler reflects the format
we used when we set it up.
The example above illustrates that you can install as many counting
handlers as you want to.
Monitor Plugin
--------------
The zc.monitorlogstats Monitor plugin can be used to query log statistics.
>>> import sys
>>> plugin = zc.monitorlogstats.monitor(sys.stdout)
2008-09-05T21:10:15
20 22 'Zzzzz'
50 5 'Waaa'
The output consists of the start time and line for each log level for
which there are statistics. Each statistics line has the log level,
entry count, and a repr of the last log message.
By default, the root logger will be used. You can specify a logger name:
>>> plugin = zc.monitorlogstats.monitor(sys.stdout, 'test')
2008-09-05T21:10:16
20 22 'test.foo Zzzzz'
50 2 'test Waaa'
You can use '.' for the root logger:
>>> plugin = zc.monitorlogstats.monitor(sys.stdout, '.')
2008-09-05T21:10:15
20 22 'Zzzzz'
50 5 'Waaa'
Note that if there are multiple counting handlers for a logger, only
the first will be used. (So don't define more than one. :)
It is an error to name a logger without a counting handler:
>>> plugin = zc.monitorlogstats.monitor(sys.stdout, 'test.foo')
Traceback (most recent call last):
...
ValueError: Invalid logger name: test.foo
You can specify a second argument with a value of 'clear', ro clear
statistics:
>>> plugin = zc.monitorlogstats.monitor(sys.stdout, 'test', 'clear')
2008-09-05T21:10:16
20 22 'test.foo Zzzzz'
50 2 'test Waaa'
>>> plugin = zc.monitorlogstats.monitor(sys.stdout, 'test', 'clear')
2008-09-05T21:10:17
.. Edge case:
>>> plugin = zc.monitorlogstats.monitor(sys.stdout, 'test', 'yes')
Traceback (most recent call last):
...
ValueError: The second argument, if present, must have the value 'clear'.
.. Cleanup:
>>> logging.getLogger().removeHandler(handler)
>>> logging.getLogger().setLevel(logging.NOTSET)
>>> logging.getLogger('test').removeHandler(testhandler)
>>> logging.getLogger('test').setLevel(logging.NOTSET)
| zc.monitorlogstats | /zc.monitorlogstats-0.1.0.tar.gz/zc.monitorlogstats-0.1.0/src/zc/monitorlogstats/README.txt | README.txt |
zc.monitorpdb
=============
zc.montorpdb is a small plugin for the (very) lightweight zc.monitor
system. It allows a user to telnet to a monitor port and invoke a
Python debugger (PDB) prompt.
To use it, one must first register the command so zc.monitor is aware of
it.
>>> import zc.monitorpdb
>>> import zope.component
>>> import zc.monitor.interfaces
>>> zope.component.provideUtility(zc.monitorpdb.command,
... zc.monitor.interfaces.IMonitorPlugin, 'pdb')
Since zc.monitor is implemented with zc.ngi, we can use zc.ngi's testing
helpers.
>>> import zc.ngi.testing
>>> connection = zc.ngi.testing.TextConnection()
>>> server = zc.monitor.Server(connection)
If we invoke the command, we'll get the appropriate prompt.
>>> connection.test_input('pdb\n')
(Pdb)
Now we can do normal pdb things like list the code being executed.
>>> connection.test_input('l\n')
34 global fakeout
35
36 fakeout = FakeStdout(connection.connection)
37 debugger = pdb.Pdb(stdin=None, stdout=fakeout)
38 debugger.reset()
39 -> debugger.setup(sys._getframe(), None)
40
41
42 def command(connection, *args):
43 global debugger
44 global fakeout
(Pdb)
As well as go "up" in the function call stack.
>>> connection.test_input('u\n')
> /graphted-storage/workspace/zc.monitorpdb/src/zc/monitorpdb/__init__.py(48)command()
-> reset(connection)
(Pdb)
There is a "reset" command that gives us a fresh debugger (just in case
something bad happend to ours and we don't want to restart the host
process). Here we go from the current location being one thing (the
result of the previous "u" command) to another.
>>> connection.test_input('l\n')
57 return zc.monitor.QUIT_MARKER
58 else:
59 debugger.onecmd(' '.join(args))
60
61 connection.write(debugger.prompt)
62 -> return zc.monitor.MORE_MARKER
[EOF]
(Pdb)
>>> connection.test_input('reset\n')
(Pdb)
>>> connection.test_input('l\n')
34 global fakeout
35
36 fakeout = FakeStdout(connection.connection)
37 debugger = pdb.Pdb(stdin=None, stdout=fakeout)
38 debugger.reset()
39 -> debugger.setup(sys._getframe(), None)
40
41
42 def command(connection, *args):
43 global debugger
44 global fakeout
(Pdb)
Some features don't work, however.
>>> connection.test_input('debug 1+1\n')
the "debug" command is not supported
(Pdb)
Once we're done, we ask to be let go.
>>> connection.test_input('quit\n')
-> CLOSE
| zc.monitorpdb | /zc.monitorpdb-1.0.0.tar.gz/zc.monitorpdb-1.0.0/src/zc/monitorpdb/README.txt | README.txt |
Changelog
=========
2.1.0 (2017-08-31)
------------------
New features:
- support IPv6
2.0.1 (2012-04-06)
------------------
Bugs Fixed
- Sending data faster than a socket could transmit it wasn't handled
correctly.
2.0.0 (2011-12-10)
------------------
Bugs Fixed
- zc.ngi.async listeners didn't provide the real address when binding
to port 0.
2.0.0a6 (2011-05-26)
--------------------
Bugs Fixed
- If application code made many small writes, each write was sent
individually, which could trigger Nagle's algorithm.
2.0.0a5 (2010-08-19)
--------------------
New Features:
- Connection objects have a new peer_address attribute, which is
equivilent to calling ``getpeername()`` on sockets.
Bugs Fixed:
- Servers using unix-domain sockets didn't clean up socket files.
- When testing listeners were closed, handle_close, rather than close,
was called on server connections.
- The zc.ngi.async connections' ``write`` and ``writelines`` methods
didn't raise errors when called on closed connections.
- The built-in connection adapters and handy adapter base class
didn't implement __nonzero__.
2.0.0a4 (2010-07-27)
--------------------
Bugs Fixed:
- When using zc.ngi.testing and a server sent input and closed a
connection before set_handler was called on the client, the input
sent by the server was lost.
- By default, calling close on a connection could caause already
written data not to be sent. Now, don't close connections until
data passed to write or writelines as, at least, been passed to the
underlying IO system (e.g. socket.send).
(This means the undocumented practive of sending zc.ngi.END_OF_DATA
to write is now deprecated.)
2.0.0a3 (2010-07-22)
--------------------
Bugs Fixed:
- Fixed a packaging bug.
2.0.0a2 (2010-07-22)
--------------------
New Features:
- There's a new experimental zc.ngi.async.Implementation.listener
option to run each client (server connection) in it's own thread.
(It's not documented. It's experimental, but there is a doctest.)
Bugs Fixed:
- There was a bug in handling connecting to testing servers that
caused printing handlers to be used when they shouldn't have been.
2.0.0a1 (2010-07-08)
--------------------
New Features:
- New improved documentation
- Support for writing request handlers in an imperative style using
generators.
- Cleaner testing interfaces
- Refactored ``zc.ngi.async`` thread management to make the blocking
APIs unnecessary. ``zc.ngi.async.blocking`` is now deprecated.
- Added support for running multiple ``async`` implementations in
separate threads. This is useful in applications with fewer network
connections and with handlers that tend to perform long-lating
computations that would be unacceptable with a single select loop.
- Renamed IConnection.setHandler to set_handler.
- Dropped support for Python 2.4.
Bugs Fixed:
- The ``Sized`` request adapter's ``writelines`` method was broken.
- There we a number of problems with error handling in the ``async``
implementation.
1.1.6 (2010-03-01)
------------------
Bug fixed:
- Fixed bad logging of ``listening on ...``. The message was emitted
before the actual operation was successful. Emits now a warning
``unable to listen on...`` if binding to the given address fails.
1.1.5 (2010-01-19)
------------------
Bug fixed:
- Fixed a fatal win32 problem (socket.AF_UNIX usage).
- Removed impropper use of the SO_REUSEADDR socket option on windows.
- The sized adapter performed poorly (because it triggered Nagle's
algorithm).
1.1.4 (2009-10-28)
------------------
Bug fixed:
- Spurious warnings sometimes occurred due to a race condition in
setting up servers.
- Added missing "writelines" method to zc.ngi.adapters.Lines.
1.1.3 (2009-07-30)
------------------
Bug fixed:
- zc.ngi.async bind failures weren't handled properly, causing lots of
annoying log messages to get spewed, which tesnded to fill up log
files.
1.1.2 (2009-07-02)
------------------
Bugs fixed:
- The zc.ngi.async thread wasn't named. All threads should be named.
1.1.1 (2009-06-29)
------------------
Bugs fixed:
- zc.ngi.blocking didn't properly handle connection failures.
1.1.0 (2009-05-26)
------------------
Bugs fixed:
- Blocking input and output files didn't properly synchronize closing.
- The testing implementation made muiltiple simultaneous calls to
handler methods in violation of the promise made in interfaces.py.
- Async TCP servers used too low a listen depth, causing performance
issues and spurious test failures.
New features:
- Added UDP support.
- Implementation responsibilities were clarified through an
IImplementation interface. The "connector" attribute of the testing
and async implementations was renamed to "connect". The old name
still works.
- Implementations are now required to log handler errors and to close
connections in response to connection-handler errors. (Otherwise,
handlers, and especially handler adapters, would have to do this.)
1.0.1 (2007-05-30)
------------------
Bugs fixed:
- Server startups sometimes failed with an error like::
warning: unhandled read event
warning: unhandled write event
warning: unhandled read event
warning: unhandled write event
------
2007-05-30T22:22:43 ERROR zc.ngi.async.server listener error
Traceback (most recent call last):
File "asyncore.py", line 69, in read
obj.handle_read_event()
File "asyncore.py", line 385, in handle_read_event
self.handle_accept()
File "/zc/ngi/async.py", line 325, in handle_accept
sock, addr = self.accept()
TypeError: unpack non-sequence
| zc.ngi | /zc.ngi-2.1.0.tar.gz/zc.ngi-2.1.0/CHANGES.rst | CHANGES.rst |
Network Gateway Interface
-------------------------
The Network Gateway Interface provides:
- the ability to test application networking code without use of
sockets, threads or subprocesses
- clean separation of application code and low-level networking code
- a fairly simple inheritence free set of networking APIs
- an event-based framework that makes it easy to handle many
simultaneous connections while still supporting an imperative
programming style.
To learn more, see http://packages.python.org/zc.ngi/
| zc.ngi | /zc.ngi-2.1.0.tar.gz/zc.ngi-2.1.0/README.rst | README.rst |
import os
import shutil
import sys
import tempfile
from optparse import OptionParser
__version__ = '2015-07-01'
# See zc.buildout's changelog if this version is up to date.
tmpeggs = tempfile.mkdtemp(prefix='bootstrap-')
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("--version",
action="store_true", default=False,
help=("Return bootstrap.py version."))
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
parser.add_option("--allow-site-packages",
action="store_true", default=False,
help=("Let bootstrap.py use existing site packages"))
parser.add_option("--buildout-version",
help="Use a specific zc.buildout version")
parser.add_option("--setuptools-version",
help="Use a specific setuptools version")
parser.add_option("--setuptools-to-dir",
help=("Allow for re-use of existing directory of "
"setuptools versions"))
options, args = parser.parse_args()
if options.version:
print("bootstrap.py version %s" % __version__)
sys.exit(0)
######################################################################
# load/install setuptools
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
ez = {}
if os.path.exists('ez_setup.py'):
exec(open('ez_setup.py').read(), ez)
else:
exec(urlopen('https://bootstrap.pypa.io/ez_setup.py').read(), ez)
if not options.allow_site_packages:
# ez_setup imports site, which adds site packages
# this will remove them from the path to ensure that incompatible versions
# of setuptools are not in the path
import site
# inside a virtualenv, there is no 'getsitepackages'.
# We can't remove these reliably
if hasattr(site, 'getsitepackages'):
for sitepackage_path in site.getsitepackages():
# Strip all site-packages directories from sys.path that
# are not sys.prefix; this is because on Windows
# sys.prefix is a site-package directory.
if sitepackage_path != sys.prefix:
sys.path[:] = [x for x in sys.path
if sitepackage_path not in x]
setup_args = dict(to_dir=tmpeggs, download_delay=0)
if options.setuptools_version is not None:
setup_args['version'] = options.setuptools_version
if options.setuptools_to_dir is not None:
setup_args['to_dir'] = options.setuptools_to_dir
ez['use_setuptools'](**setup_args)
import setuptools
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
setuptools_path = ws.find(
pkg_resources.Requirement.parse('setuptools')).location
# Fix sys.path here as easy_install.pth added before PYTHONPATH
cmd = [sys.executable, '-c',
'import sys; sys.path[0:0] = [%r]; ' % setuptools_path +
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
requirement = 'zc.buildout'
version = options.buildout_version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
try:
return not parsed_version.is_prerelease
except AttributeError:
# Older setuptools
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd) != 0:
raise Exception(
"Failed to execute command:\n%s" % repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs) | zc.ngi | /zc.ngi-2.1.0.tar.gz/zc.ngi-2.1.0/bootstrap.py | bootstrap.py |
import errno
import logging
import os
import random
import socket
import sys
import threading
import time
import zc.ngi
import zc.ngi.async
import zc.ngi.message
_lock = threading.Lock()
_lock.acquire()
logger = logging.getLogger('zc.ngi.wordcount')
class Server:
def __init__(self, connection):
if __debug__:
logger.debug("Server(%r)", connection)
self.input = ''
connection.set_handler(self)
def handle_input(self, connection, data):
if __debug__:
logger.debug("server handle_input(%r, %r)", connection, data)
self.input += data
while '\0' in self.input:
data, self.input = self.input.split('\0', 1)
if data == 'Q':
connection.write('Q\n')
connection.close()
connection.control.close()
return
elif data == 'C':
connection.close()
return
elif data == 'E':
raise ValueError(data)
else:
cc = len(data)
lc = len(data.split('\n'))-1
wc = len(data.split())
connection.write("%s %s %s\n" % (lc, wc, cc))
def handle_close(self, connection, reason):
if __debug__:
logger.debug("server handle_close(%r, %r)", connection, reason)
def serve():
port, level = sys.argv[1:]
logfile = open('server.log', 'w')
handler = logging.StreamHandler(logfile)
logging.getLogger().addHandler(handler)
logger.setLevel(int(level))
logger.addHandler(logging.StreamHandler())
logger.info('serving')
zc.ngi.async.listener(('localhost', int(port)), Server)
zc.ngi.async.wait(11)
logging.getLogger().removeHandler(handler)
handler.close()
def get_port():
"""Return a port that is not in use.
Checks if a port is in use by trying to connect to it. Assumes it
is not in use if connect raises an exception.
Raises RuntimeError after 10 tries.
"""
for i in range(10):
port = random.randrange(20000, 30000)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
try:
s.connect(('localhost', port))
except socket.error:
# Perhaps we should check value of error too.
return port
finally:
s.close()
raise RuntimeError("Can't find port")
def wait(addr, up=True):
for i in range(120):
time.sleep(0.25)
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(addr)
s.close()
if up:
break
except socket.error, e:
if e[0] not in (errno.ECONNREFUSED, errno.ECONNRESET):
raise
s.close()
if not up:
break
else:
if up:
print "Could not connect"
else:
print "Server still accepting connections"
def start_server_process(loglevel=None):
"""Start a server in a subprocess and return the port used
"""
port = get_port()
env = dict(
os.environ,
PYTHONPATH=os.pathsep.join(sys.path),
)
if loglevel is None:
loglevel = logger.getEffectiveLevel()
os.spawnle(os.P_NOWAIT, sys.executable, sys.executable, __file__,
str(port), str(loglevel),
env)
addr = 'localhost', port
wait(addr)
return port
def stop_server_process(connect, addr):
zc.ngi.message.message(connect, addr, 'Q\0')
wait(addr, up=False)
log = open('server.log').read()
os.remove('server.log')
print log,
sample_docs = [
"""Hello world
""",
"""I give my pledge as an earthling
to save and faithfully to defend from waste
the natural resources of my planet
its soils, minerals, forests, waters and wildlife.
""",
"""On my honor, I will do my best
to do my duty to God and my country
and to obey the Scout Law
to always help others
to keep myself physically strong, mentally awake, and morally straight.
""",
"""What we have here, is a failure to communicate.
""",
]
class Client:
def __init__(self, docs=sample_docs, notify=None):
self.docs = list(docs)
self.notify = notify
self.input = ''
def connected(self, connection):
if __debug__:
logger.debug("connected(%r)", connection)
connection.write(self.docs[0]+'\0')
connection.set_handler(self)
def failed_connect(self, reason):
print 'Failed to connect:', reason
self.notify()
def handle_input(self, connection, data):
if __debug__:
logger.debug("client handle_input(%r, %r)", connection, data)
self.input += data
if '\n' in self.input:
data, self.input = self.input.split('\n', 1)
doc = self.docs.pop(0)
cc = len(doc)
lc = len(doc.split('\n'))-1
wc = len(doc.split())
expected = "%s %s %s" % (lc, wc, cc)
if data != expected:
print '%r != %r' % (data, expected)
if self.docs:
connection.write(self.docs[0]+'\0')
else:
connection.close()
if self.notify is not None:
self.notify()
def handle_close(self, connection, reason):
if __debug__:
logger.debug("client handle_close(%r, %r)", connection, reason)
if self.docs:
print 'unexpected close', reason
def client_thread(connect, addr):
logger.info('client started for %s', addr)
lock = threading.Lock()
lock.acquire()
client = Client(notify=lock.release)
connect(addr, client)
logger.info('client waiting')
lock.acquire() # wait till done
logger.info('client done')
if __name__ == '__main__':
serve() | zc.ngi | /zc.ngi-2.1.0.tar.gz/zc.ngi-2.1.0/src/zc/ngi/wordcount.py | wordcount.py |
class Interface:
pass
def Attribute(text):
return text
def implements(*args):
pass
moduleProvides = implements
try:
from zope.interface import Interface, Attribute, implements, moduleProvides
except ImportError:
pass
class IImplementation(Interface):
"""Standard interface for ngi implementations
"""
def connect(address, handler):
"""Try to make a connection to the given address
The handler is an ``IClientConnectHandler``. The handler
``connected`` method will be called with an ``IConnection`` object
if and when the connection succeeds or ``failed_connect`` method
will be called if the connection fails.
This method is thread safe. It may be called by any thread at
any time.
"""
def listener(address, handler):
"""Listen for incoming TCP connections
When a connection is received, call the handler.
An ``IListener`` object is returned.
This method is thread safe. It may be called by any thread at
any time.
"""
def udp(address, message):
"""Send a UDP message
This method is thread safe. It may be called by any thread at
any time.
"""
def udp_listener(address, handler, buffer_size=4096):
"""Listen for incoming UDP messages
When a message is received, call the handler with the message.
An ``IUDPListener`` object is returned.
This method is thread safe. It may be called by any thread at
any time.
"""
class IConnection(Interface):
"""Network connections
This is an implementation interface.
Network connections support communication over a network
connection, or any connection having separate input and output
channels.
"""
def __nonzero__():
"""Return the connection status
True is returned if the connection is open/active and
False otherwise.
"""
def set_handler(handler):
"""Set the ``IConnectionHandler`` for a connection.
This method may be called multiple times, but it should only
be called in direct response to an implementation call to a
``IConnectionHandler``, ``IClientConnectHandler``, or
``IServer``.
Any failure of a handler call must be caught and logged. If
an exception is raised by a call to ``hande_input`` or
``handle_exception``, the connection must be closed by the
implementation.
"""
def write(data):
"""Output a string to the connection.
The write call is non-blocking.
This method is thread safe. It may be called by any thread at
any time.
"""
def writelines(data):
"""Output an iterable of strings to the connection.
The ``writelines`` call is non-blocking. Note, that the data may
not have been consumed when the method returns.
This method is thread safe. It may be called by any thread at
any time.
"""
def close():
"""Close the connection
This method is thread safe. It may be called by any thread at
any time.
"""
peer_address = Attribute(
"""The peer address
For socket-based connectionss, this is the result of calling
getpeername on the socket.
This is primarily interesting for servers that want to vary
behavior depending on where clients connect from.
""")
class IServerConnection(IConnection):
"""Server connection
This is an implementation interface.
"""
control = Attribute("An IListener")
class IConnectionHandler(Interface):
"""Application objects that can handle connection input-data events
This is an application interface.
The methods defined be this interface will never be called
simultaneously from separate threads, so implementation of the
methods needn't be concerned with thread safety with respect to
these methods.
"""
def handle_input(connection, data):
"""Handle input data from a connection
The data is an 8-bit string.
Note that there are no promises about data organization. The
data isn't necessarily record oriented. For example, data
could, in theory be passed one character at a time. It is up
to applications to organize data into records, if desired.
"""
def handle_close(connection, reason):
"""Receive notification that a connection has closed
The reason argument can be converted to a string for logging
purposes. It may have data useful for debugging, but this
is undefined.
Notifications are received when the connection is closed
externally, for example, when the other side of the
connection is closed or in case of a network failure. No
notification is given when the connection's close method is
called.
"""
def handle_exception(connection, exception):
"""Recieve a report of an exception encountered by a connection
This method is used to recieve exceptions from an NGI
implementation. This will only be due to an error
encounted processing data passed to the connection
``writelines`` methods.
"""
class IClientConnectHandler(Interface):
"""Receive notifications of connection results
This is an application interface.
"""
def connected(connection):
"""Receive notification that a connection had been established
"""
def failed_connect(reason):
"""Receive notification that a connection could not be established
The reason argument can be converted to a string for logging
purposes. It may have data useful for debugging, but this
is undefined.
"""
class IServer(Interface):
"""Handle server connections
This is an application interface.
A server is just a callable that takes a connection and set's it's
handler.
"""
def __call__(connection):
"""Handle a connection from a client
"""
class IUDPHandler(Interface):
"""Handle udp messages
This is an application interface.
A UDP handler is a callable that takes a client address and an
8-bit string message.
"""
def __call__(addr, data):
"""Handle a connection from a client
"""
class IListener(Interface):
"""Listener information and close control
This is an implementation interface.
"""
address = Attribute("The address the listener is listening on.")
def connections():
"""return an iterable of the current connections
"""
def close(handler=None):
"""Close the listener and all of its connections
If no handler is passed, the listener and its connections
are closed immediately without waiting for any pending input
to be handled or for pending output to be sent.
If a handler is passed, the listener will stop accepting new
connections and existing connections will be left open. The
handler will be called when all of the existing connections
have been closed.
"""
class IUDPListener(Interface):
"""UDP Listener close control
This is an implementation interface.
"""
def close():
"""Close the listener
"""
class ConnectionFailed(Exception):
"""A Connection attempt failed
"""
class Timeout(Exception):
"""Something took too long
""" | zc.ngi | /zc.ngi-2.1.0.tar.gz/zc.ngi-2.1.0/src/zc/ngi/interfaces.py | interfaces.py |
"""NGI connection adapters
"""
import struct
import warnings
import zc.ngi.generator
class Base(object):
def __init__(self, connection):
self.connection = connection
def close(self):
self.connection.close()
def write(self, data):
self.write = self.connection.write
self.write(data)
def writelines(self, data):
self.writelines = self.connection.writelines
self.writelines(data)
def set_handler(self, handler):
self.handler = handler
try:
self.connection.set_handler(self)
except AttributeError:
self.connection.setHandler(self)
warnings.warn("setHandler is deprecated. Use set_handler,",
DeprecationWarning, stacklevel=2)
def setHandler(self, handler):
warnings.warn("setHandler is deprecated. Use set_handler,",
DeprecationWarning, stacklevel=2)
self.set_handler(handler)
def handle_input(self, connection, data):
handle_input = self.handler.handle_input
self.handle_input(connection, data)
def handle_close(self, connection, reason):
self.handler.handle_close(connection, reason)
def handle_exception(self, connection, reason):
self.handler.handle_exception(connection, reason)
@classmethod
def handler(class_, func):
return zc.ngi.generator.handler(func, class_)
@property
def peer_address(self):
return self.connection.peer_address
def __nonzero__(self):
return bool(self.connection)
class Lines(Base):
input = ''
def handle_input(self, connection, data):
self.input += data
data = self.input.split('\n')
self.input = data.pop()
for line in data:
self.handler.handle_input(self, line)
class Sized(Base):
want = 4
got = 0
getting_size = True
def set_handler(self, handler):
self.input = []
Base.set_handler(self, handler)
def handle_input(self, connection, data):
self.got += len(data)
self.input.append(data)
while self.got >= self.want:
extra = self.got - self.want
if extra == 0:
collected = ''.join(self.input)
self.input = []
else:
input = self.input
self.input = [data[-extra:]]
input[-1] = input[-1][:-extra]
collected = ''.join(input)
self.got = extra
if self.getting_size:
# we were recieving the message size
assert self.want == 4
if collected == '\xff\xff\xff\xff':
# NULL message. Ignore
continue
self.want = struct.unpack(">I", collected)[0]
self.getting_size = False
else:
self.want = 4
self.getting_size = True
self.handler.handle_input(self, collected)
def writelines(self, data):
self.connection.writelines(sized_iter(data))
def write(self, message):
if message is None:
self.connection.write('\xff\xff\xff\xff')
else:
self.connection.write(struct.pack(">I", len(message)))
self.connection.write(message)
def sized_iter(data):
for message in data:
if message is None:
yield '\xff\xff\xff\xff'
else:
yield struct.pack(">I", len(message))
yield message | zc.ngi | /zc.ngi-2.1.0.tar.gz/zc.ngi-2.1.0/src/zc/ngi/adapters.py | adapters.py |
"""Asyncore-based implementation of the NGI
"""
from __future__ import with_statement
import asyncore
import errno
import logging
import os
import socket
import sys
import thread
import threading
import time
import warnings
import zc.ngi
import zc.ngi.interfaces
zc.ngi.interfaces.moduleProvides(zc.ngi.interfaces.IImplementation)
pid = os.getpid()
is_win32 = sys.platform == 'win32'
expected_socket_read_errors = {
errno.EWOULDBLOCK: 0,
errno.EAGAIN: 0,
errno.EINTR: 0,
}
expected_socket_write_errors = {
errno.EAGAIN: 0,
errno.EWOULDBLOCK: 0,
errno.ENOBUFS: 0,
errno.EINTR: 0,
}
BUFFER_SIZE = 8*1024
def get_family_from_address(addr):
if addr is None:
# keep backward compatibility
return socket.AF_INET
elif isinstance(addr, str):
return socket.AF_UNIX
elif isinstance(addr, tuple):
if ":" in addr[0]:
return socket.AF_INET6
else:
return socket.AF_INET
raise ValueError("addr should be string or tuple of ip address, port")
class Implementation:
zc.ngi.interfaces.implements(zc.ngi.interfaces.IImplementation)
logger = logging.getLogger('zc.ngi.async.Implementation')
def __init__(self, daemon=True, name='zc.ngi.async application created'):
self.name = name
self.daemon = daemon
self._map = {}
self._callbacks = []
self._start_lock = threading.Lock()
thread_ident = None
def call_from_thread(self, func):
if thread.get_ident() == self.thread_ident:
func()
return
self._callbacks.append(func)
self.notify_select()
self.start_thread()
def notify_select(self):
pass
def connect(self, addr, handler):
self.call_from_thread(lambda : _Connector(addr, handler, self))
self.start_thread()
def listener(self, addr, handler, thready=False):
result = _Listener(addr, handler, self, thready)
self.start_thread()
return result
def udp(self, address, message):
family = get_family_from_address(address)
try:
sock = _udp_socks[family].pop()
except IndexError:
sock = socket.socket(family, socket.SOCK_DGRAM)
sock.sendto(message, address)
_udp_socks[family].append(sock)
def udp_listener(self, addr, handler, buffer_size=4096):
result = _UDPListener(addr, handler, buffer_size, self)
self.start_thread()
return result
_thread = None
def start_thread(self):
with self._start_lock:
if self._thread is None:
self._thread = threading.Thread(
target=self.loop, name=self.name)
self._thread.setDaemon(self.daemon)
self._thread.start()
def wait(self, timeout=None):
with self._start_lock:
if self._thread is None:
return
join = self._thread.join
join(timeout)
if self._thread is not None:
raise zc.ngi.interfaces.Timeout
def loop(self, timeout=None):
self.thread_ident = thread.get_ident()
if timeout is not None:
deadline = time.time() + timeout
else:
deadline = None
timeout = 30
map = self._map
callbacks = self._callbacks
logger = logging.getLogger('zc.ngi.async.loop')
trigger = _Trigger(self._map)
self.notify_select = trigger.pull_trigger
try:
while 1:
while callbacks:
callback = callbacks.pop(0)
try:
callback()
except:
self.logger.exception('Calling callback')
self.handle_error()
if deadline:
timeout = min(deadline - time.time(), 30)
try:
if (timeout > 0) and (len(map) > 1):
asyncore.poll(timeout, map)
except:
logger.exception('loop error')
raise
if trigger._fileno is None:
# oops, the trigger got closed. Recreate it.
trigger = _Trigger(self._map)
self.notify_select = trigger.pull_trigger
with self._start_lock:
if (len(map) <= 1) and not callbacks:
self._thread = None
return
if timeout <= 0:
raise zc.ngi.interfaces.Timeout
finally:
del self.thread_ident
del self.notify_select
trigger.close()
def cleanup_map(self):
for c in self._map.values():
if isinstance(c, _Trigger):
continue
c.close()
for c in self._map.values():
if isinstance(c, _Trigger):
continue
c.close()
def handle_error(self):
pass
class Inline(Implementation):
"""Run in an application thread, rather than a separate thread.
"""
logger = logging.getLogger('zc.ngi.async.Inline')
def start_thread(self):
pass
def handle_error(self):
raise
def wait(self, *args):
self.loop(*args)
class dispatcher(asyncore.dispatcher):
def __init__(self, sock, addr, implementation):
self.addr = addr
self.implementation = implementation
asyncore.dispatcher.__init__(self, sock, implementation._map)
def handle_error(self):
reason = sys.exc_info()[1]
self.logger.exception('handle_error')
try:
self.handle_close(reason)
except:
self.logger.exception(
"Exception raised by dispatcher handle_close(%r)",
reason)
self.close()
self.implementation.handle_error()
def close(self):
self.del_channel(self._map)
self.implementation.call_from_thread(self.socket.close)
def writable(self):
return False
class _ConnectionDispatcher(dispatcher):
__closed = None
__handler = None
__iterator_exception = None
_connection = None
def __init__(self, sock, addr, logger, implementation):
self.__output = []
dispatcher.__init__(self, sock, addr, implementation)
self.logger = logger
def __nonzero__(self):
return self.__output is not None
def set_handler(self, handler):
if self.__handler is not None:
raise TypeError("Handler already set")
self.__handler = handler
if self.__iterator_exception:
v = self.__iterator_exception
self.__iterator_exception = None
try:
handler.handle_exception(self, v)
except:
self.logger.exception("handle_exception failed")
raise
if self.__closed:
try:
handler.handle_close(self._connection, self.__closed)
except:
self.logger.exception("Exception raised by handle_close(%r)",
self.__closed)
raise
def setHandler(self, handler):
warnings.warn("setHandler is deprecated. Use set_handler,",
DeprecationWarning, stacklevel=2)
self.set_handler(handler)
def write(self, data):
if __debug__:
self.logger.debug('write %r', data)
assert isinstance(data, str) or (data is zc.ngi.END_OF_DATA)
try:
self.__output.append(data)
except AttributeError:
if self.__output is None:
raise ValueError("write called on closed connection")
raise
self.implementation.notify_select()
def writelines(self, data):
if __debug__:
self.logger.debug('writelines %r', data)
assert not isinstance(data, str), "writelines does not accept strings"
try:
self.__output.append(iter(data))
except AttributeError:
if self.__output is None:
raise ValueError("writelines called on closed connection")
raise
self.implementation.notify_select()
def close_after_write(self):
try:
self.__output.append(zc.ngi.END_OF_DATA)
except AttributeError:
if self.__output is None:
return # already closed
raise
self.implementation.notify_select()
def close(self):
self.__output = None
dispatcher.close(self)
self.implementation.notify_select()
def readable(self):
return self.__handler is not None
def writable(self):
return bool(self.__output)
def handle_read_event(self):
assert self.readable()
while 1:
try:
d = self.recv(BUFFER_SIZE)
except socket.error, err:
if err[0] in expected_socket_read_errors:
return
raise
if not d:
return
if __debug__:
self.logger.debug('input %r', d)
try:
self.__handler.handle_input(self._connection, d)
except:
self.logger.exception("handle_input failed")
raise
if len(d) < BUFFER_SIZE:
break
def handle_write_event(self):
if __debug__:
self.logger.debug('handle_write_event')
tosend = []
nsend = 0
send_size = 60000
output = self.__output
try:
while output:
v = output[0]
if v is zc.ngi.END_OF_DATA:
if not nsend:
self.close()
return
send_size = 0
elif isinstance(v, str):
tosend.append(v)
nsend += len(v)
output.pop(0)
else:
# Must be an iterator
try:
v = v.next()
if not isinstance(v, str):
raise TypeError(
"writelines iterator must return strings", v)
except StopIteration:
# all done
output.pop(0)
except Exception, v:
self.logger.exception("writelines iterator failed")
if self.__handler is None:
self.__iterator_exception = v
else:
self.__handler.handle_exception(self._connection, v)
raise
else:
tosend.append(v)
nsend += len(v)
if output and nsend < send_size:
continue
v = ''.join(tosend)
try:
n = self.send(v)
except socket.error, err:
if err[0] in expected_socket_write_errors:
return # we couldn't write anything
raise
except Exception, v:
self.logger.exception("send failed")
raise
if n == nsend:
nsend = 0
del tosend[:]
else:
nsend -= n
tosend[:] = v[n:],
return # can't send any more
finally:
if nsend:
output[0:0] = tosend
def handle_close(self, reason='end of input'):
if __debug__:
self.logger.debug('close %r', reason)
if self.__handler is not None:
try:
self.__handler.handle_close(self._connection, reason)
except:
self.logger.exception("Exception raised by handle_close(%r)",
reason)
else:
self.__closed = reason
self.close()
def handle_expt(self):
self.handle_close('socket error')
def __hash__(self):
return hash(self.socket)
class _ServerConnectionDispatcher(_ConnectionDispatcher):
def __init__(self, control, *args):
self.control = control
_ConnectionDispatcher.__init__(self, *args)
def close(self):
_ConnectionDispatcher.close(self)
self.control.closed(self._connection)
class _Connection:
zc.ngi.interfaces.implements(zc.ngi.interfaces.IConnection)
def __init__(self, dispatcher):
self._dispatcher = dispatcher
dispatcher._connection = self
def __nonzero__(self):
return bool(self._dispatcher)
def set_handler(self, handler):
return self._dispatcher.set_handler(handler)
def setHandler(self, handler):
warnings.warn("setHandler is deprecated. Use set_handler,",
DeprecationWarning, stacklevel=2)
self.set_handler(handler)
def write(self, data):
write = self._dispatcher.write
self.write = write
write(data)
def writelines(self, data):
writelines = self._dispatcher.writelines
self.writelines = writelines
writelines(data)
def close(self):
self._dispatcher.close_after_write()
@property
def peer_address(self):
return self._dispatcher.socket.getpeername()
class _ServerConnection(_Connection):
zc.ngi.interfaces.implements(zc.ngi.interfaces.IServerConnection)
@property
def control(self):
return self._dispatcher.control
class _Connector(dispatcher):
logger = logging.getLogger('zc.ngi.async.client')
# When trying to do a connect on a non-blocking socket, some outcomes
# are expected. Set _CONNECT_IN_PROGRESS to the errno value(s) expected
# when an initial connect can't complete immediately. Set _CONNECT_OK
# to the errno value(s) expected if the connect succeeds *or* if it's
# already connected (our code can attempt redundant connects).
if hasattr(errno, "WSAEWOULDBLOCK"): # Windows
# Caution: The official Winsock docs claim that WSAEALREADY should be
# treated as yet another "in progress" indicator, but we've never
# seen this.
_CONNECT_IN_PROGRESS = (errno.WSAEWOULDBLOCK,)
# Win98: WSAEISCONN; Win2K: WSAEINVAL
_CONNECT_OK = (0, errno.WSAEISCONN, errno.WSAEINVAL)
else: # Unix
_CONNECT_IN_PROGRESS = (errno.EINPROGRESS,)
_CONNECT_OK = (0, errno.EISCONN)
def __init__(self, addr, handler, implementation):
self.__handler = handler
family = get_family_from_address(addr)
sock = socket.socket(family, socket.SOCK_STREAM)
dispatcher.__init__(self, sock, addr, implementation)
if __debug__:
self.logger.debug('connecting to %s', self.addr)
# INVARIANT: we are called from the select thread!
try:
self.handle_write_event()
except:
self.handle_error()
def readable(self):
return False
def writable(self):
return True
def handle_close(self, reason=None):
if __debug__:
self.logger.debug('connector close %r', reason)
try:
try:
self.__handler.failed_connect(reason)
except:
self.logger.exception("failed_connect(%r) failed", reason)
self.implementation.handle_error()
finally:
self.close()
def handle_write_event(self):
err = self.socket.connect_ex(self.addr)
if err in self._CONNECT_IN_PROGRESS:
return
if err not in self._CONNECT_OK:
reason = errno.errorcode.get(err) or str(err)
self.logger.warning("error connecting to %s: %s", self.addr, reason)
self.handle_close(reason)
return
self.del_channel(self._map)
if __debug__:
self.logger.debug('outgoing connected %r', self.addr)
dispatcher = _ConnectionDispatcher(self.socket, self.addr, self.logger,
self.implementation)
try:
self.__handler.connected(_Connection(dispatcher))
except:
self.logger.exception("connection handler failed")
dispatcher.handle_close("connection handler failed")
return
def handle_error(self):
reason = sys.exc_info()[1]
self.logger.exception('connect error')
try:
self.__handler.failed_connect(reason)
except:
self.logger.exception(
"Handler failed_connect(%s) raised an exception", reason,
)
self.close()
self.implementation.handle_error()
def handle_expt(self):
self.handle_close('connection failed')
class BaseListener(asyncore.dispatcher):
def __init__(self, implementation):
self.implementation = implementation
asyncore.dispatcher.__init__(self, map=implementation._map)
def writable(self):
return False
def add_channel(self, map=None):
# work around file-dispatcher bug
if map is None:
return
assert (map is self._map)
asyncore.dispatcher.add_channel(self, self._map)
def handle_error(self):
reason = sys.exc_info()[1]
self.logger.exception('listener error')
self.close()
self.implementation.handle_error()
class _Listener(BaseListener):
zc.ngi.interfaces.implements(zc.ngi.interfaces.IListener)
logger = logging.getLogger('zc.ngi.async.server')
def __init__(self, addr, handler, implementation, thready):
self.__handler = handler
self.__close_handler = None
self._thready = thready
self.__connections = set()
self.address = addr
BaseListener.__init__(self, implementation)
family = get_family_from_address(addr)
self.create_socket(family, socket.SOCK_STREAM)
try:
if not is_win32:
self.set_reuse_addr()
if addr is None:
# Try to pick one, primarily for testing
import random
n = 0
while 1:
port = random.randint(10000, 30000)
addr = 'localhost', port
try:
self.bind(addr)
except socket.error:
n += 1
if n > 100:
raise
else:
continue
break
else:
self.bind(addr)
if family in (socket.AF_INET, socket.AF_INET6) and addr[1] == 0:
self.addr = addr = addr[0], self.socket.getsockname()[1]
self.logger.info("listening on %r", addr)
self.listen(255)
except socket.error:
self.close()
self.logger.warn("unable to listen on %r", addr)
raise
self.add_channel(self._map)
self.address = addr
self.implementation.notify_select()
def handle_accept(self):
if not self.accepting:
return
try:
r = self.accept()
if r:
sock, addr = r
else:
# didn't get anything. Hm. Ignore.
return
except socket.error, msg:
self.logger.exception("accepted failed: %s", msg)
return
if __debug__:
self.logger.debug('incoming connection %r', addr)
if self._thready:
impl = Implementation(name="%r client" % (self.address,))
else:
impl = self.implementation
dispatcher = _ServerConnectionDispatcher(
self, sock, addr, self.logger, impl)
connection = _ServerConnection(dispatcher)
self.__connections.add(connection)
@impl.call_from_thread
def _():
try:
self.__handler(connection)
except:
self.logger.exception("server handler failed")
self.close()
if impl is not self.implementation:
impl.start_thread()
def connections(self):
return iter(self.__connections)
def closed(self, connection):
if connection in self.__connections:
self.__connections.remove(connection)
if not self.__connections and self.__close_handler:
self.__close_handler(self)
def _close(self, handler):
BaseListener.close(self)
if isinstance(self.address, str) and os.path.exists(self.address):
os.remove(self.address)
if handler is None:
for c in list(self.__connections):
c._dispatcher.handle_close("stopped")
elif not self.__connections:
handler(self)
else:
self.__close_handler = handler
def close(self, handler=None):
self.accepting = False
self.implementation.call_from_thread(lambda : self._close(handler))
def close_wait(self, timeout=None):
event = threading.Event()
self.close(lambda _: event.set())
event.wait(timeout)
# convenience method made possible by storing our address:
def connect(self, handler):
self.implementation.connect(self.address, handler)
class _UDPListener(BaseListener):
logger = logging.getLogger('zc.ngi.async.udpserver')
connected = True
def __init__(self, addr, handler, buffer_size, implementation):
self.__handler = handler
self.__buffer_size = buffer_size
BaseListener.__init__(self, implementation)
family = get_family_from_address(addr)
try:
self.create_socket(family, socket.SOCK_DGRAM)
if not is_win32:
self.set_reuse_addr()
self.bind(addr)
self.logger.info("listening on udp %r", addr)
except socket.error:
self.close()
self.logger.warn("unable to listen on udp %r", addr)
raise
self.add_channel(self._map)
self.implementation.notify_select()
def handle_read(self):
message, addr = self.socket.recvfrom(self.__buffer_size)
self.__handler(addr, message)
def close(self):
self.del_channel(self._map)
self.implementation.call_from_thread(self.socket.close)
# udp uses GIL to get thread-safe socket management
if is_win32:
_udp_socks = {socket.AF_INET: []}
else:
_udp_socks = {socket.AF_INET: [], socket.AF_UNIX: []}
# The following trigger code is greatly simplified from the Medusa
# trigger code.
class _Triggerbase(object):
"""OS-independent base class for OS-dependent trigger class."""
logger = logging.getLogger('zc.ngi.async.trigger')
def writable(self):
return 0
def handle_close(self):
self.close()
def handle_error(self):
self.logger.exception('trigger error %s', pid)
self.close()
def handle_read(self):
try:
self.recv(BUFFER_SIZE)
except socket.error:
pass
if os.name == 'posix':
class _Trigger(_Triggerbase, asyncore.file_dispatcher):
def __init__(self, map):
r, self.__writefd = os.pipe()
asyncore.file_dispatcher.__init__(self, r, map)
if self.socket.fd != r:
# Starting in Python 2.6, the descriptor passed to
# file_dispatcher gets duped and assigned to
# self.fd. This breaks the instantiation semantics and
# is a bug imo. I dount it will get fixed, but maybe
# it will. Who knows. For that reason, we test for the
# fd changing rather than just checking the Python version.
os.close(r)
def close(self):
os.close(self.__writefd)
asyncore.file_dispatcher.close(self)
def pull_trigger(self):
if __debug__:
self.logger.debug('pulled %s', pid)
os.write(self.__writefd, 'x')
def add_channel(self, map=None):
# work around file-dispatcher bug
assert (map is None) or (map is self._map)
asyncore.dispatcher.add_channel(self, self._map)
else:
# Windows version; uses just sockets, because a pipe isn't select'able
# on Windows.
class BindError(Exception):
pass
class _Trigger(_Triggerbase, asyncore.dispatcher):
def __init__(self, map):
# Get a pair of connected sockets. The trigger is the 'w'
# end of the pair, which is connected to 'r'. 'r' is put
# in the asyncore socket map. "pulling the trigger" then
# means writing something on w, which will wake up r.
w = socket.socket()
# Disable buffering -- pulling the trigger sends 1 byte,
# and we want that sent immediately, to wake up asyncore's
# select() ASAP.
w.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
count = 0
while 1:
count += 1
# Bind to a local port; for efficiency, let the OS pick
# a free port for us.
# Unfortunately, stress tests showed that we may not
# be able to connect to that port ("Address already in
# use") despite that the OS picked it. This appears
# to be a race bug in the Windows socket implementation.
# So we loop until a connect() succeeds (almost always
# on the first try). See the long thread at
# http://mail.zope.org/pipermail/zope/2005-July/160433.html
# for hideous details.
a = socket.socket()
a.bind(("127.0.0.1", 0))
connect_address = a.getsockname() # assigned (host, port) pair
a.listen(1)
try:
w.connect(connect_address)
break # success
except socket.error, detail:
if detail[0] != errno.WSAEADDRINUSE:
# "Address already in use" is the only error
# I've seen on two WinXP Pro SP2 boxes, under
# Pythons 2.3.5 and 2.4.1.
raise
# (10048, 'Address already in use')
# assert count <= 2 # never triggered in Tim's tests
if count >= 10: # I've never seen it go above 2
a.close()
w.close()
raise BindError("Cannot bind trigger!")
# Close `a` and try again. Note: I originally put a short
# sleep() here, but it didn't appear to help or hurt.
a.close()
r, addr = a.accept() # r becomes asyncore's (self.)socket
a.close()
self.trigger = w
asyncore.dispatcher.__init__(self, r, map)
def close(self):
self.del_channel(self._map)
# self.socket is r, and self.trigger is w, from __init__
self.socket.close()
self.trigger.close()
def pull_trigger(self):
if __debug__:
self.logger.debug('notify select %s', pid)
self.trigger.send('x')
_select_implementation = Implementation(name=__name__)
call_from_thread = _select_implementation.call_from_thread
connect = connector = _select_implementation.connect
listener = _select_implementation.listener
start_thread = _select_implementation.start_thread
udp = _select_implementation.udp
udp_listener = _select_implementation.udp_listener
_map = _select_implementation._map
cleanup_map = _select_implementation.cleanup_map
wait = _select_implementation.wait
main = Inline() | zc.ngi | /zc.ngi-2.1.0.tar.gz/zc.ngi-2.1.0/src/zc/ngi/async.py | async.py |
import warnings
import zc.ngi.interfaces
def handler(func=None, connection_adapter=None):
if func is None:
return lambda func: Handler(func, connection_adapter)
return Handler(func, connection_adapter)
class Handler(object):
zc.ngi.interfaces.implements(zc.ngi.interfaces.IServer,
zc.ngi.interfaces.IClientConnectHandler,
)
def __init__(self, func, connection_adapter):
self.func = func
self.connection_adapter = connection_adapter
def __call__(self, *args):
if self.connection_adapter is not None:
args = args[:-1]+(self.connection_adapter(args[-1]), )
return ConnectionHandler(self.func(*args), args[-1])
def __get__(self, inst, class_):
if inst is None:
return self
if self.connection_adapter is not None:
def connected(connection):
connection = self.connection_adapter(connection)
return ConnectionHandler(self.func(inst, connection),
connection)
return connected
return (lambda connection:
ConnectionHandler(self.func(inst, connection), connection)
)
def connected(self, connection):
self(connection)
def failed_connect(self, reason):
raise zc.ngi.interfaces.ConnectionFailed(reason)
class ConnectionHandler(object):
zc.ngi.interfaces.implements(zc.ngi.interfaces.IConnectionHandler)
def __init__(self, gen, connection):
try:
gen.next()
except StopIteration:
return
self.gen = gen
try:
connection.set_handler(self)
except AttributeError:
self.connection.setHandler(self)
warnings.warn("setHandler is deprecated. Use set_handler,",
DeprecationWarning, stacklevel=2)
def handle_input(self, connection, data):
try:
self.gen.send(data)
except StopIteration:
connection.close()
def handle_close(self, connection, reason):
try:
self.gen.throw(GeneratorExit, GeneratorExit(reason))
except (GeneratorExit, StopIteration):
pass
def handle_exception(self, connection, exception):
self.gen.throw(exception.__class__, exception) | zc.ngi | /zc.ngi-2.1.0.tar.gz/zc.ngi-2.1.0/src/zc/ngi/generator.py | generator.py |
from zc.ngi.interfaces import ConnectionFailed
import sys
import threading
import time
import warnings
import zc.ngi
import zc.ngi.adapters
warnings.warn("The blocking module is deprecated.",
DeprecationWarning, stacklevel=2)
class Timeout(Exception):
"""An operation timed out.
"""
class ConnectionTimeout(Timeout, ConnectionFailed):
"""An attempt to connect timed out.
"""
class RequestConnection(zc.ngi.adapters.Base):
def __init__(self, connection, connector):
self.connection = connection
self.connector = connector
def close(self):
self.connector.closed = True
self.connection.close()
self.connector.event.set()
def handle_input(self, connection, data):
try:
self.handler.handle_input(self, data)
except:
self.connector.exception = sys.exc_info()
self.connector.event.set()
raise
def handle_close(self, connection, reason):
handle_close = getattr(self.handler, 'handle_close', None)
if handle_close is not None:
try:
handle_close(self, reason)
except:
self.connector.exception = sys.exc_info()
self.connector.event.set()
raise
self.connector.closed = True
self.connector.result = reason
self.connector.event.set()
@property
def handle_exception(self):
handle = self.handler.handle_exception
def handle_exception(connection, exception):
try:
handle(self, exception)
except:
self.connector.exception = sys.exc_info()
self.connector.event.set()
raise
return handle_exception
class RequestConnector:
exception = closed = connection = result = None
def __init__(self, handler, event):
try:
connected = handler.connected
except AttributeError:
if callable(handler):
connected = handler
elif getattr(handler, 'handle_input', None) is None:
raise
else:
connected = lambda connection: connection.set_handler(handler)
self._connected = connected
self.event = event
def connected(self, connection):
self.connection = connection
try:
self._connected(RequestConnection(connection, self))
except:
self.exception = sys.exc_info()
self.event.set()
raise
def failed_connect(self, reason):
self.exception = ConnectionFailed(reason)
self.event.set()
def request(connect, address, connection_handler, timeout=None):
event = threading.Event()
connector = RequestConnector(connection_handler, event)
connect(address, connector)
event.wait(timeout)
if connector.exception:
exception = connector.exception
del connector.exception
if isinstance(exception, tuple):
raise exception[0], exception[1], exception[2]
else:
raise exception
if connector.closed:
return connector.result
if connector.connection is None:
raise ConnectionTimeout
raise Timeout
def connect(address, connect=None, timeout=None):
if connect is None:
connect = zc.ngi.implementation.connect
return _connector().connect(address, connect, timeout)
class _connector:
failed = connection = None
def connect(self, address, connect, timeout):
event = self.event = threading.Event()
connect(address, self)
event.wait(timeout)
if self.failed is not None:
raise ConnectionFailed(self.failed)
if self.connection is not None:
return self.connection
raise ConnectionTimeout()
def connected(self, connection):
self.connection = connection
self.event.set()
def failed_connect(self, reason):
self.failed = reason
self.event.set()
def open(connection_or_address, connector=None, timeout=None):
if connector is None and (hasattr(connection_or_address, 'set_handler')
or hasattr(connection_or_address, 'setHandler')
):
# connection_or_address is a connection
connection = connection_or_address
else:
connection = connect(connection_or_address, connector, timeout)
outputfile = OutputFile(connection)
return outputfile, InputFile(connection, outputfile)
class _BaseFile:
def __init__(self, connection):
self._connection = connection
self._position = 0
def seek(self, offset, whence=0):
position = self._position
if whence == 0:
position = offset
elif whence == 1:
position += offset
elif whence == 2:
position -= offset
else:
raise IOError("Invalid whence argument", whence)
if position < 0:
raise IOError("Invalid offset", offset)
self._position = position
def tell(self):
return self._position
_closed = False
def _check_open(self):
if self._closed:
raise IOError("I/O operation on closed file")
class OutputFile(_BaseFile):
def invalid_method(*args, **kw):
raise IOError("Invalid operation on output file")
read = readline = readlines = invalid_method
def flush(self):
self._check_exception()
def close(self):
if not self._closed:
self._connection.close()
self._closed = True
def write(self, data):
self._check_exception()
self._check_open()
assert isinstance(data, str)
self._position += len(data)
self._connection.write(data)
def writelines(self, data, timeout=None, nonblocking=False):
self._check_exception()
self._check_open()
if nonblocking:
self._connection.writelines(iter(data))
return
event = threading.Event()
self._connection.writelines(
_writelines_iterator(data, self, event.set))
# wait for iteration to finish
event.wait(timeout)
if not event.isSet():
raise Timeout()
_exception = None
def _check_exception(self):
if self._exception is not None:
exception = self._exception
self._exception = None
raise exception
class _writelines_iterator:
def __init__(self, base, file, notify):
self._base = iter(base)
self._file = file
self._notify = notify
def __iter__(self):
return self
def next(self):
try:
data = self._base.next()
self._file._position += 1
return data
except StopIteration:
self._notify()
raise
class InputFile(_BaseFile):
def __init__(self, connection, outputfile):
_BaseFile.__init__(self, connection)
self._condition = threading.Condition()
self._data = ''
self._outputfile = outputfile
self._outputfile._exception = None
connection.set_handler(self)
def invalid_method(*args, **kw):
raise IOError("Invalid operation on output file")
flush = write = writelines = invalid_method
def handle_input(self, connection, data):
condition = self._condition
condition.acquire()
self._data += data
condition.notifyAll()
condition.release()
def handle_close(self, connection, reason):
condition = self._condition
condition.acquire()
try:
self._closed = self._outputfile._closed = True
condition.notifyAll()
finally:
condition.release()
def handle_exception(self, connection, exception):
condition = self._condition
condition.acquire()
try:
self._outputfile._exception = exception
condition.notifyAll()
finally:
condition.release()
def close(self):
condition = self._condition
condition.acquire()
try:
self._closed = self._outputfile._closed = True
self._connection.close()
condition.notifyAll()
finally:
condition.release()
def __iter__(self):
return self
def next(self):
s = self.readline()
if s:
return s
raise StopIteration
def read(self, size=None, timeout=None):
deadline = None
condition = self._condition
condition.acquire()
try:
self._outputfile._check_exception()
while 1:
data = self._data
if size is not None and size <= len(data):
data, self._data = data[:size], data[size:]
break
elif self._closed:
if data:
self._data = ''
break
timeout, deadline = self._wait(timeout, deadline)
self._position += len(data)
return data
finally:
condition.release()
def readline(self, size=None, timeout=None):
deadline = None
condition = self._condition
condition.acquire()
try:
self._outputfile._check_exception()
while 1:
data = self._data
l = data.find('\n')
if l >= 0:
l += 1
if size is not None and size < l:
l = size
data, self._data = data[:l], data[l:]
break
elif size is not None and size <= len(data):
data, self._data = data[:size], data[size:]
break
elif self._closed:
if data:
self._data = ''
break
timeout, deadline = self._wait(timeout, deadline)
self._position += len(data)
return data
finally:
condition.release()
def readlines(self, sizehint=None, timeout=None):
deadline = None
condition = self._condition
condition.acquire()
try:
self._outputfile._check_exception()
while 1:
data = self._data
if sizehint is not None and sizehint <= len(data):
l = data.rfind('\n')
if l >= 0:
l += 1
data, self._data = data[:l], data[l:]
return data.splitlines(True)
elif self._closed:
if data:
self._data = ''
return data.splitlines()
timeout, deadline = self._wait(timeout, deadline)
finally:
condition.release()
def _wait(self, timeout, deadline):
if timeout is not None:
if deadline is None:
if timeout <= 0:
raise Timeout()
deadline = time.time() + timeout
else:
timeout = deadline - time.time()
if timeout <= 0:
raise Timeout()
self._condition.wait(timeout)
else:
self._condition.wait()
self._outputfile._check_exception()
return timeout, deadline | zc.ngi | /zc.ngi-2.1.0.tar.gz/zc.ngi-2.1.0/src/zc/ngi/blocking.py | blocking.py |
from zope import interface, schema
import zope.interface.common.sequence
import zope.interface.common.mapping
import zope.component.interfaces
from i18n import _
class ILog(zope.interface.common.sequence.IFiniteSequence):
def __call__(summary=None, details=None, defer=False, if_changed=False):
"""add an ILogEntry to logged with optional details, summary, and data.
details should be a schema.Text field; summary should be a
schema.TextLine field. The details and summary fields will move to
rich text fields when they are available.
Adapts self.__parent__ to self.record_schema, checks the adapted value
to see if it validates with the schema, compares the current
values with the last logged values, and creates a log entry with the
change set, the current record_schema, the summary, the details, and
the data.
If defer is True, will defer making the log entry until the end of the
transaction (a non-guaranteed point within the transaction's
beforeCommitHooks, but before other subsequent deferred log calls).
If if_changed is True, a log will only be made if a change has been
made since the last log entry.
If both defer and if_changed are True, any other log entries that are
deferred but not if_changed will come first, effectively eliminating
all deferred, if_changed entries. Similarly, if there are no deferred,
non-if_changed entries, only the first requested if_changed log will
be made.
"""
record_schema = schema.InterfaceField(
title=_("Record Schema"),
description=_("""The schema used for creating log entry records.
May be altered with a schema that extends the last-used schema.
Non-schema specifications (e.g., interface.Attribute and methods) are
ignored.
"""),
required=True)
class ILogging(interface.Interface):
"An object which provides an ILog as a 'log' attribute"
log = schema.Object(ILog, title=_('Log'), description=_(
"A zc.objectlog.ILog"), readonly=True, required=True)
class IRecord(interface.Interface):
"""Data about the logged object when the log entry was made.
Records always implement an additional interface: the record_schema of the
corresponding log entry."""
class ILogEntry(interface.Interface):
"""A log entry.
Log entries have three broad use cases:
- Record transition change messages from system users
- Record basic status values so approximate change timestamps can
be calculated
- Allow for simple extensibility.
"""
timestamp = schema.Datetime(
title=_("Creation Date"),
description=_("The date and time at which this log entry was made"),
required=True, readonly=True)
principal_ids = schema.Tuple(
value_type=schema.TextLine(),
title=_("Principals"),
description=_(
"""The ids of the principals who triggered this log entry"""),
required=True, readonly=True)
summary = schema.TextLine( # XXX Make rich text line later
title=_("Summary"),
description=_("Log summary"),
required=False, readonly=True)
details = schema.Text( # XXX Make rich text later
title=_("Details"),
description=_("Log details"),
required=False, readonly=True)
record_schema = schema.InterfaceField(
title=_("Record Schema"),
description=_("""The schema used for creating log entry records.
Non-schema specifications (e.g., interface.Attribute and methods) are
ignored."""),
required=True, readonly=True)
record_changes = schema.Object(
zope.interface.common.mapping.IExtendedReadMapping,
title=_("Changes"),
description=_("Changes to the object since the last log entry"),
required=True, readonly=True)
record = schema.Object(
IRecord,
title=_("Full Status"),
description=_("The status of the object at this log entry"),
required=True, readonly=True)
next = interface.Attribute("The next log entry, or None if last")
previous = interface.Attribute("The previous log entry, or None if first")
class IAggregatedLog(interface.Interface):
"""an iterable of logs aggregated for a given object"""
class ILogEntryEvent(zope.component.interfaces.IObjectEvent):
"""object is log's context (__parent__)"""
entry = interface.Attribute('the log entry created')
class LogEntryEvent(zope.component.interfaces.ObjectEvent):
interface.implements(ILogEntryEvent)
def __init__(self, obj, entry):
super(LogEntryEvent, self).__init__(obj)
self.entry = entry | zc.objectlog | /zc.objectlog-0.2.2.tar.gz/zc.objectlog-0.2.2/src/zc/objectlog/interfaces.py | interfaces.py |
from zope import interface, schema
import zope.interface.common.mapping
from zope.schema.interfaces import RequiredMissing
def validate(obj, i): # XXX put this in zope.schema?
i.validateInvariants(obj)
for name, field in schema.getFieldsInOrder(i):
value = field.query(obj, field.missing_value)
if value == field.missing_value:
if field.required:
raise RequiredMissing(name)
else:
bound = field.bind(obj)
bound.validate(value)
# !!! The ImmutableDict class is from an unreleased ZPL package called aimles,
# included for distribution here with the author's (Gary Poster) permission
class ImmutableDict(dict):
"""A dictionary that cannot be mutated (without resorting to superclass
tricks, as shown below).
>>> d = ImmutableDict({'name':'Gary', 'age':33})
>>> d['name']
'Gary'
>>> d['age']
33
>>> d.get('foo')
>>> d.get('name')
'Gary'
>>> d['name'] = 'Karyn'
Traceback (most recent call last):
...
RuntimeError: Immutable dictionary
>>> d.clear()
Traceback (most recent call last):
...
RuntimeError: Immutable dictionary
>>> d.update({'answer':42})
Traceback (most recent call last):
...
RuntimeError: Immutable dictionary
>>> del d['name']
Traceback (most recent call last):
...
RuntimeError: Immutable dictionary
>>> d.setdefault('sense')
Traceback (most recent call last):
...
RuntimeError: Immutable dictionary
>>> d.pop('name')
Traceback (most recent call last):
...
RuntimeError: Immutable dictionary
>>> d.popitem()
Traceback (most recent call last):
...
RuntimeError: Immutable dictionary
>>> d2 = ImmutableDict.fromkeys((1,2,3))
>>> type(d2.copy()) # copy is standard mutable dict
<type 'dict'>
>>> import pprint
>>> pprint.pprint(d2.copy()) # pprint gets confused by subtypes
{1: None, 2: None, 3: None}
>>> pprint.pprint(ImmutableDict.fromkeys((1,2,3),'foo'))
{1: 'foo', 2: 'foo', 3: 'foo'}
Here's an example of actually mutating the dictionary anyway.
>>> dict.__setitem__(d, 'age', 33*12 + 7)
>>> d['age']
403
pickling and unpickling is supported.
>>> import pickle
>>> copy = pickle.loads(pickle.dumps(d))
>>> copy is d
False
>>> copy == d
True
>>> import cPickle
>>> copy = cPickle.loads(cPickle.dumps(d))
>>> copy is d
False
>>> copy == d
True
"""
interface.implements(
zope.interface.common.mapping.IExtendedReadMapping,
zope.interface.common.mapping.IClonableMapping)
def __setitem__(self, key, val):
raise RuntimeError('Immutable dictionary')
def clear(self):
raise RuntimeError('Immutable dictionary')
def update(self, other):
raise RuntimeError('Immutable dictionary')
def __delitem__(self, key):
raise RuntimeError('Immutable dictionary')
def setdefault(self, key, failobj=None):
raise RuntimeError('Immutable dictionary')
def pop(self, key, *args):
raise RuntimeError('Immutable dictionary')
def popitem(self):
raise RuntimeError('Immutable dictionary')
@classmethod
def fromkeys(cls, iterable, value=None):
return cls(dict.fromkeys(iterable, value)) | zc.objectlog | /zc.objectlog-0.2.2.tar.gz/zc.objectlog-0.2.2/src/zc/objectlog/utils.py | utils.py |
import pytz, datetime, persistent, transaction
from BTrees import IOBTree
import zope.security.management
import zope.security.interfaces
import zope.security.checker
import zope.security.proxy
from zope import interface, schema, event
import zope.interface.interfaces
import zope.location
import zope.app.keyreference.interfaces
from zc.objectlog import interfaces, utils
from zc.objectlog.i18n import _
def performDeferredLogs(deferred, seen, transaction):
# deferred is dict of
# (parent key reference, log name): (not if_changed list, if_changed list)
# where not if_changed and if_changed are both lists of
# [(log, summary, details)]
problem = False
for hook, args, kwargs in reversed(
tuple(transaction.getBeforeCommitHooks())):
if hook is performDeferredLogs:
break
else:
if (hook, args, kwargs) not in seen:
seen.append((hook, args, kwargs))
problem = True
if problem:
transaction.addBeforeCommitHook(
performDeferredLogs, (deferred, seen, transaction))
else:
for always, if_changed in deferred.values():
if always:
for log, summary, details in always:
log._call(summary, details)
else:
log, summary, details = if_changed[0]
log._call(summary, details, if_changed=True)
def getTransactionFromPersistentObject(obj):
# this should maybe go in ZODB; extracted from some of Jim's code
connection = obj._p_jar
if connection is None:
return False
try:
tm = connection._txn_mgr
except AttributeError:
tm = connection.transaction_manager
return tm.get()
class Log(persistent.Persistent, zope.location.Location):
interface.implements(interfaces.ILog)
def __init__(self, record_schema):
self.entries = IOBTree.IOBTree()
self._record_schema = record_schema
def record_schema(self, value):
if self.entries:
last_schema = self[-1].record_schema
if value is not last_schema and not value.extends(last_schema):
raise ValueError(
_("Once entries have been made, may only change schema to "
"one that extends the last-used schema"))
self._record_schema = value
record_schema = property(lambda self: self._record_schema, record_schema)
def __call__(self,
summary=None, details=None, defer=False, if_changed=False):
if defer:
o = self.__parent__
key = (zope.app.keyreference.interfaces.IKeyReference(o),
self.__name__)
t = getTransactionFromPersistentObject(self) or transaction.get()
# the following approach behaves badly in the presence of
# savepoints. TODO: convert to use persistent data structure that
# will be rolled back when a savepoint is rolled back.
for hook, args, kwargs in t.getBeforeCommitHooks():
if hook is performDeferredLogs:
deferred = args[0]
ds = deferred.get(key)
if ds is None:
ds = deferred[key] = ([], [])
break
else:
ds = ([], [])
deferred = {key: ds}
t.addBeforeCommitHook(performDeferredLogs, (deferred, [], t))
ds[bool(if_changed)].append((self, summary, details))
else:
return self._call(summary, details, if_changed=if_changed)
def _call(self, summary, details, if_changed=False):
s = self.record_schema
new_record = s(self.__parent__)
utils.validate(new_record, s)
entries_len = len(self)
changes = {}
if entries_len:
old_record = self[-1].record
for name, field in schema.getFieldsInOrder(s):
old_val = field.query(old_record, field.missing_value)
new_val = field.query(new_record, field.missing_value)
if new_val != old_val:
changes[name] = new_val
else:
for name, field in schema.getFieldsInOrder(s):
changes[name] = field.query(new_record, field.missing_value)
if not if_changed or changes:
new = LogEntry(
entries_len, changes, self.record_schema, summary, details)
zope.location.locate(new, self, unicode(entries_len))
utils.validate(new, interfaces.ILogEntry)
self.entries[entries_len] = new
event.notify(interfaces.LogEntryEvent(self.__parent__, new))
return new
# else return None
def __getitem__(self, ix):
if isinstance(ix, slice):
indices = ix.indices(len(self))
return [self.entries[i] for i in range(*indices)]
# XXX put this in traversal adapter (I think)
if isinstance(ix, basestring):
ix = int(ix)
if ix < 0:
ix = len(self) + ix
try:
return self.entries[ix]
except KeyError:
raise IndexError, 'list index out of range'
def __len__(self):
if self.entries:
return self.entries.maxKey() + 1
else:
return 0
def __iter__(self):
for l in self.entries.values():
yield l
class LogEntry(persistent.Persistent, zope.location.Location):
interface.implements(interfaces.ILogEntry)
def __init__(self, ix, record_changes, record_schema,
summary, details):
self.index = ix
self.record_changes = utils.ImmutableDict(record_changes)
self.record_schema = record_schema
self.summary = summary
self.details = details
self.timestamp = datetime.datetime.now(pytz.utc)
try:
interaction = zope.security.management.getInteraction()
except zope.security.interfaces.NoInteraction:
self.principal_ids = ()
else:
self.principal_ids = tuple(
[unicode(p.principal.id) for p in interaction.participations
if zope.publisher.interfaces.IRequest.providedBy(p)])
record = property(lambda self: Record(self))
def next(self):
try:
return self.__parent__[self.index+1]
except IndexError:
return None
next = property(next)
def previous(self):
ix = self.index
if ix:
return self.__parent__[ix-1]
else: # it's 0
return None
previous = property(previous)
class RecordChecker(object):
interface.implements(zope.security.interfaces.IChecker)
def check_setattr(self, obj, name):
raise zope.security.interfaces.ForbiddenAttribute, (name, obj)
def check(self, obj, name):
if name not in zope.security.checker._available_by_default:
entry = zope.security.proxy.removeSecurityProxy(obj.__parent__)
schema = entry.record_schema
if name not in schema:
raise zope.security.interfaces.ForbiddenAttribute, (name, obj)
check_getattr = __setitem__ = check
def proxy(self, value):
'See IChecker'
checker = getattr(value, '__Security_checker__', None)
if checker is None:
checker = zope.security.checker.selectChecker(value)
if checker is None:
return value
return zope.security.checker.Proxy(value, checker)
class Record(zope.location.Location): # not intended to be persistent
interface.implements(interfaces.IRecord)
__name__ = u"record"
__Security_checker__ = RecordChecker()
def __init__(self, entry):
self.__parent__ = entry
interface.directlyProvides(self, entry.record_schema)
def __getattr__(self, name):
entry = self.__parent__
schema = entry.record_schema
try:
field = schema[name]
except KeyError:
raise AttributeError, name
else:
while entry is not None:
if name in entry.record_changes:
v = value = entry.record_changes[name]
break
entry = entry.previous
else: # we currently can never get here
v = value = getattr(schema[name], 'missing_value', None)
if zope.interface.interfaces.IMethod.providedBy(field):
v = lambda : value
setattr(self, name, v)
return v | zc.objectlog | /zc.objectlog-0.2.2.tar.gz/zc.objectlog-0.2.2/src/zc/objectlog/log.py | log.py |
from zope import interface, component, i18n, proxy
from zope.app import zapi
from zope.interface.common.idatetime import ITZInfo
from zope.publisher.interfaces.browser import IBrowserRequest
from zope.app.pagetemplate.viewpagetemplatefile import ViewPageTemplateFile
from zope.formlib import namedtemplate, form
import zope.publisher.browser
import zc.table.column
import zc.table.interfaces
from zc.objectlog import interfaces
import zc.objectlog.browser.interfaces
from zc.objectlog.i18n import _
class SortableColumn(zc.table.column.GetterColumn):
interface.implements(zc.table.interfaces.ISortableColumn)
def dateFormatter(value, context, formatter):
value = value.astimezone(ITZInfo(formatter.request))
dateFormatter = formatter.request.locale.dates.getFormatter(
'dateTime', length='medium')
return dateFormatter.format(value)
def principalsGetter(context, formatter):
principals = zapi.principals()
return [principals.getPrincipal(pid) for pid in context.principal_ids]
def principalsFormatter(value, context, formatter):
return ', '.join([v.title for v in value])
def logFormatter(value, context, formatter):
summary, details = value
res = []
if summary:
res.append('<div class="logSummary">%s</div>' % (
i18n.translate(summary,
context=formatter.request,
default=summary),))
if details:
res.append('<div class="details">%s</div>' % (
i18n.translate(details,
context=formatter.request,
default=details),))
if res:
return '\n'.join(res)
else:
return i18n.translate(
_('no_summary_or_details_available-log_view',
'[no information available]'), context=formatter.request)
def changesGetter(item, formatter):
obj = form.FormData(item.record_schema, item.record_changes)
interface.directlyProvides(obj, item.record_schema)
return obj
def recordGetter(item, formatter):
obj = form.FormData(item.record_schema, item.record)
interface.directlyProvides(obj, item.record_schema)
return obj
def recordFormatter(value, item, formatter):
view = component.getMultiAdapter(
(value, item, formatter.request), name='logrecordview')
view.update()
return view.render()
default_template = namedtemplate.NamedTemplateImplementation(
ViewPageTemplateFile('default.pt'),
zc.objectlog.browser.interfaces.ILoggingView)
class LogView(zope.publisher.browser.BrowserPage):
interface.implements(
zc.objectlog.browser.interfaces.ILoggingView)
component.adapts( # could move to IAdaptableToLogging ;-)
interfaces.ILogging, IBrowserRequest)
template = namedtemplate.NamedTemplate('default')
columns = (
SortableColumn(_('log_column-date', 'Date'),
lambda c, f: c.timestamp, dateFormatter),
SortableColumn(_('log_column-principals', 'Principals'),
principalsGetter, principalsFormatter),
zc.table.column.GetterColumn(
_('log_column-log', 'Log'), lambda c, f: (c.summary, c.details),
logFormatter),
zc.table.column.GetterColumn(
_('log_column-details', 'Changes'), changesGetter, recordFormatter),
# zc.table.column.GetterColumn(
# _('log_column-details', 'Full Status'), recordGetter, recordFormatter)
)
def __init__(self, context, request):
self.context = context
self.request = request
def update(self):
formatter_factory = component.getUtility(
zc.table.interfaces.IFormatterFactory)
self.formatter = formatter_factory(
self, self.request, interfaces.ILogging(self.context).log,
columns=self.columns)
def render(self):
return self.template()
def __call__(self):
self.update()
return self.render()
def objectGetter(item, formatter):
return item.__parent__.__parent__
def objectFormatter(value, item, formatter):
view = component.getMultiAdapter(
(value, item, formatter), name='log source')
view.update()
return view.render()
class AggregatedLogView(LogView):
interface.implements(
zc.objectlog.browser.interfaces.IAggregatedLoggingView)
component.adapts( # could move to IAdaptableToLogging ;-)
interfaces.ILogging, IBrowserRequest)
template = namedtemplate.NamedTemplate('default')
columns = (
SortableColumn(_('log_column-task', 'Source'),
objectGetter, objectFormatter),
SortableColumn(_('log_column-date', 'Date'),
lambda c, f: c.timestamp, dateFormatter),
SortableColumn(_('log_column-principals', 'Principals'),
principalsGetter, principalsFormatter),
zc.table.column.GetterColumn(
_('log_column-log', 'Log'), lambda c, f: (c.summary, c.details),
logFormatter),
zc.table.column.GetterColumn(
_('log_column-details', 'Changes'),
changesGetter, recordFormatter),
)
def update(self):
formatter_factory = component.getUtility(
zc.table.interfaces.IFormatterFactory)
self.formatter = formatter_factory(
self, self.request, interfaces.IAggregatedLog(self.context),
columns=self.columns) | zc.objectlog | /zc.objectlog-0.2.2.tar.gz/zc.objectlog-0.2.2/src/zc/objectlog/browser/log.py | log.py |
import os, shutil, sys, tempfile, textwrap, urllib, urllib2, subprocess
from optparse import OptionParser
if sys.platform == 'win32':
def quote(c):
if ' ' in c:
return '"%s"' % c # work around spawn lamosity on windows
else:
return c
else:
quote = str
# See zc.buildout.easy_install._has_broken_dash_S for motivation and comments.
stdout, stderr = subprocess.Popen(
[sys.executable, '-Sc',
'try:\n'
' import ConfigParser\n'
'except ImportError:\n'
' print 1\n'
'else:\n'
' print 0\n'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
has_broken_dash_S = bool(int(stdout.strip()))
# In order to be more robust in the face of system Pythons, we want to
# run without site-packages loaded. This is somewhat tricky, in
# particular because Python 2.6's distutils imports site, so starting
# with the -S flag is not sufficient. However, we'll start with that:
if not has_broken_dash_S and 'site' in sys.modules:
# We will restart with python -S.
args = sys.argv[:]
args[0:0] = [sys.executable, '-S']
args = map(quote, args)
os.execv(sys.executable, args)
# Now we are running with -S. We'll get the clean sys.path, import site
# because distutils will do it later, and then reset the path and clean
# out any namespace packages from site-packages that might have been
# loaded by .pth files.
clean_path = sys.path[:]
import site
sys.path[:] = clean_path
for k, v in sys.modules.items():
if k in ('setuptools', 'pkg_resources') or (
hasattr(v, '__path__') and
len(v.__path__)==1 and
not os.path.exists(os.path.join(v.__path__[0],'__init__.py'))):
# This is a namespace package. Remove it.
sys.modules.pop(k)
is_jython = sys.platform.startswith('java')
setuptools_source = 'http://peak.telecommunity.com/dist/ez_setup.py'
distribute_source = 'http://python-distribute.org/distribute_setup.py'
# parsing arguments
def normalize_to_url(option, opt_str, value, parser):
if value:
if '://' not in value: # It doesn't smell like a URL.
value = 'file://%s' % (
urllib.pathname2url(
os.path.abspath(os.path.expanduser(value))),)
if opt_str == '--download-base' and not value.endswith('/'):
# Download base needs a trailing slash to make the world happy.
value += '/'
else:
value = None
name = opt_str[2:].replace('-', '_')
setattr(parser.values, name, value)
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --setup-source and --download-base to point to
local resources, you can keep this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", dest="version",
help="use a specific zc.buildout version")
parser.add_option("-d", "--distribute",
action="store_true", dest="use_distribute", default=False,
help="Use Distribute rather than Setuptools.")
parser.add_option("--setup-source", action="callback", dest="setup_source",
callback=normalize_to_url, nargs=1, type="string",
help=("Specify a URL or file location for the setup file. "
"If you use Setuptools, this will default to " +
setuptools_source + "; if you use Distribute, this "
"will default to " + distribute_source +"."))
parser.add_option("--download-base", action="callback", dest="download_base",
callback=normalize_to_url, nargs=1, type="string",
help=("Specify a URL or directory for downloading "
"zc.buildout and either Setuptools or Distribute. "
"Defaults to PyPI."))
parser.add_option("--eggs",
help=("Specify a directory for storing eggs. Defaults to "
"a temporary directory that is deleted when the "
"bootstrap script completes."))
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", None, action="store", dest="config_file",
help=("Specify the path to the buildout configuration "
"file to be used."))
options, args = parser.parse_args()
# if -c was provided, we push it back into args for buildout's main function
if options.config_file is not None:
args += ['-c', options.config_file]
if options.eggs:
eggs_dir = os.path.abspath(os.path.expanduser(options.eggs))
else:
eggs_dir = tempfile.mkdtemp()
if options.setup_source is None:
if options.use_distribute:
options.setup_source = distribute_source
else:
options.setup_source = setuptools_source
if options.accept_buildout_test_releases:
args.append('buildout:accept-buildout-test-releases=true')
args.append('bootstrap')
try:
import pkg_resources
import setuptools # A flag. Sometimes pkg_resources is installed alone.
if not hasattr(pkg_resources, '_distribute'):
raise ImportError
except ImportError:
ez_code = urllib2.urlopen(
options.setup_source).read().replace('\r\n', '\n')
ez = {}
exec ez_code in ez
setup_args = dict(to_dir=eggs_dir, download_delay=0)
if options.download_base:
setup_args['download_base'] = options.download_base
if options.use_distribute:
setup_args['no_fake'] = True
ez['use_setuptools'](**setup_args)
if 'pkg_resources' in sys.modules:
reload(sys.modules['pkg_resources'])
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
cmd = [quote(sys.executable),
'-c',
quote('from setuptools.command.easy_install import main; main()'),
'-mqNxd',
quote(eggs_dir)]
if not has_broken_dash_S:
cmd.insert(1, '-S')
find_links = options.download_base
if not find_links:
find_links = os.environ.get('bootstrap-testing-find-links')
if find_links:
cmd.extend(['-f', quote(find_links)])
if options.use_distribute:
setup_requirement = 'distribute'
else:
setup_requirement = 'setuptools'
ws = pkg_resources.working_set
setup_requirement_path = ws.find(
pkg_resources.Requirement.parse(setup_requirement)).location
env = dict(
os.environ,
PYTHONPATH=setup_requirement_path)
requirement = 'zc.buildout'
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setup_requirement_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
if is_jython:
import subprocess
exitcode = subprocess.Popen(cmd, env=env).wait()
else: # Windows prefers this, apparently; otherwise we would prefer subprocess
exitcode = os.spawnle(*([os.P_WAIT, sys.executable] + cmd + [env]))
if exitcode != 0:
sys.stdout.flush()
sys.stderr.flush()
print ("An error occurred when trying to install zc.buildout. "
"Look above this message for any errors that "
"were output by easy_install.")
sys.exit(exitcode)
ws.add_entry(eggs_dir)
ws.require(requirement)
import zc.buildout.buildout
zc.buildout.buildout.main(args)
if not options.eggs: # clean up temporary egg directory
shutil.rmtree(eggs_dir) | zc.objectlog | /zc.objectlog-0.2.2.tar.gz/zc.objectlog-0.2.2/bootstrap/bootstrap.py | bootstrap.py |
import os, shutil, sys, tempfile
from optparse import OptionParser
tmpeggs = tempfile.mkdtemp()
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --setup-source and --download-base to point to
local resources, you can keep this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", help="use a specific zc.buildout version")
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
options, args = parser.parse_args()
######################################################################
# load/install distribute
to_reload = False
try:
import pkg_resources, setuptools
if not hasattr(pkg_resources, '_distribute'):
to_reload = True
raise ImportError
except ImportError:
ez = {}
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
exec(urlopen('http://python-distribute.org/distribute_setup.py').read(), ez)
setup_args = dict(to_dir=tmpeggs, download_delay=0, no_fake=True)
ez['use_setuptools'](**setup_args)
if to_reload:
reload(pkg_resources)
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
cmd = [sys.executable, '-c',
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
distribute_path = ws.find(
pkg_resources.Requirement.parse('distribute')).location
requirement = 'zc.buildout'
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[distribute_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd, env=dict(os.environ, PYTHONPATH=distribute_path)) != 0:
raise Exception(
"Failed to execute command:\n%s",
repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs) | zc.queue | /zc.queue-2.0.0a1.tar.gz/zc.queue-2.0.0a1/bootstrap.py | bootstrap.py |
"""Queue Implementations
"""
from persistent import Persistent
from ZODB.ConflictResolution import PersistentReference
from ZODB.POSException import ConflictError
from zope import interface
from zc.queue import interfaces
@interface.implementer(interfaces.IQueue)
class Queue(Persistent):
def __init__(self):
self._data = ()
def pull(self, index=0):
if index < 0:
len_self = len(self._data)
index += len_self
if index < 0:
raise IndexError(index - len_self)
res = self._data[index]
self._data = self._data[:index] + self._data[index + 1:]
return res
def put(self, item):
self._data += (item,)
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __getitem__(self, index):
return self._data[index] # works with passing a slice too
def __nonzero__(self):
return bool(self._data)
def _p_resolveConflict(self, oldstate, committedstate, newstate):
return resolveQueueConflict(
oldstate, committedstate, newstate)
class BucketQueue(Queue):
def _p_resolveConflict(self, oldstate, committedstate, newstate):
return resolveQueueConflict(
oldstate, committedstate, newstate, True)
PersistentQueue = BucketQueue # for legacy instances, be conservative
class PersistentReferenceProxy(object):
"""PersistentReferenceProxy
`ZODB.ConflictResolution.PersistentReference` doesn't get handled correctly
in the resolveQueueConflict function due to lack of the `__hash__` method.
So we make workaround here to utilize `__cmp__` method of
`PersistentReference`.
"""
def __init__(self, pr):
assert isinstance(pr, PersistentReference)
self.pr = pr
def __hash__(self):
return 1
def __eq__(self, other):
try:
return self.pr == other.pr
except ValueError:
return False
def __repr__(self):
return self.pr.__repr__()
def resolveQueueConflict(oldstate, committedstate, newstate, bucket=False):
# we only know how to merge _data. If anything else is different,
# puke.
if set(committedstate.keys()) != set(newstate.keys()):
raise ConflictError # can't resolve
for key, val in newstate.items():
if key != '_data' and val != committedstate[key]:
raise ConflictError # can't resolve
# basically, we are ok with anything--willing to merge--
# unless committedstate and newstate have one or more of the
# same deletions or additions in comparison to the oldstate.
# If items in the queue are persistent object, we need to wrap
# PersistentReference objects. See 'queue.txt'
wrap = lambda x: (
PersistentReferenceProxy(x) if isinstance(x, PersistentReference) else x)
old = list(map(wrap, oldstate['_data']))
committed = list(map(wrap, committedstate['_data']))
new = list(map(wrap, newstate['_data']))
old_set = set(old)
committed_set = set(committed)
new_set = set(new)
if bucket and bool(old_set) and (bool(committed_set) ^ bool(new_set)):
# This is a bucket, part of a CompositePersistentQueue. The old set
# of this bucket had items, and one of the two transactions cleaned
# it out. There's a reasonable chance that this bucket will be
# cleaned out by the parent in one of the two new transactions.
# We can't know for sure, so we take the conservative route of
# refusing to be resolvable.
raise ConflictError
committed_added = committed_set - old_set
committed_removed = old_set - committed_set
new_added = new_set - old_set
new_removed = old_set - new_set
if new_removed & committed_removed:
# they both removed (claimed) the same one. Puke.
raise ConflictError # can't resolve
elif new_added & committed_added:
# they both added the same one. Puke.
raise ConflictError # can't resolve
# Now we do the merge. We'll merge into the committed state and
# return it.
mod_committed = []
unwrap = lambda x: x.pr if isinstance(x, PersistentReferenceProxy) else x
for v in committed:
if v not in new_removed:
mod_committed.append(unwrap(v))
if new_added:
ordered_new_added = new[-len(new_added):]
assert set(ordered_new_added) == new_added
mod_committed.extend(list(map(unwrap, ordered_new_added)))
committedstate['_data'] = tuple(mod_committed)
return committedstate
@interface.implementer(interfaces.IQueue)
class CompositeQueue(Persistent):
"""Appropriate for queues that may become large.
Using this queue has one advantage and two possible disadvantages.
The advantage is that adding items to a large queue does not require
writing the entire queue out to the database, since only one or two parts
of it actually changes. This can be a win for time, memory, and database
size.
One disadvantage is that multiple concurrent adds may intermix the adds in
a surprising way: see queue.txt for more details.
Another possible disadvantage is that this queue does not consistently
enforce the policy that concurrent adds of the same item are not
allowed: because one instance may go in two different composite buckets,
the conflict resolution cannot look in both buckets to see that they were
both added.
If either of these are an issue, consider using the simpler PersistentQueue
instead, foregoing the advantages of the composite approach.
"""
# design note: one rejected strategy to try and enforce the
# "concurrent adds of the same object are not allowed" policy is
# to set a persistent flag on a queue when it reaches or exceeds
# the target size, and to start a new bucket only on the following
# transaction. This would work in some scenarios, but breaks down
# when two transactions happen sequentially *while* a third
# transaction happens concurrently to both.
def __init__(self, compositeSize=15, subfactory=BucketQueue):
# the compositeSize value is a ballpark. Because of the merging
# policy, a composite queue might get as big as 2n under unusual
# circumstances. A better name for this might be "splitSize"...
self.subfactory = subfactory
self._data = ()
self.compositeSize = compositeSize
def __nonzero__(self):
return bool(self._data)
def pull(self, index=0):
ct = 0
if index < 0:
len_self = len(self)
rindex = index + len_self # not efficient, but quick and easy
if rindex < 0:
raise IndexError(index)
else:
rindex = index
for cix, q in enumerate(self._data):
for ix, item in enumerate(q):
if rindex == ct:
q.pull(ix)
# take this opportunity to weed out empty
# composite queues that may have been introduced
# by conflict resolution merges or by this pull.
self._data = tuple(q for q in self._data if q)
return item
ct += 1
raise IndexError(index)
def put(self, item):
if not self._data:
self._data = (self.subfactory(),)
last = self._data[-1]
if len(last) >= self.compositeSize:
last = self.subfactory()
self._data += (last,)
last.put(item)
def __len__(self):
res = 0
for q in self._data:
res += len(q)
return res
def __iter__(self):
for q in self._data:
for i in q:
yield i
def __getitem__(self, index):
if isinstance(index, slice):
start, stop, stride = index.indices(len(self))
res = []
stride_ct = 1
for ix, v in enumerate(self):
if ix >= stop:
break
if ix < start:
continue
stride_ct -= 1
if stride_ct == 0:
res.append(v)
stride_ct = stride
return res
else:
if index < 0: # not efficient, but quick and easy
len_self = len(self)
rindex = index + len_self
if rindex < 0:
raise IndexError(index)
else:
rindex = index
for ix, v in enumerate(self):
if ix == rindex:
return v
raise IndexError(index)
def _p_resolveConflict(self, oldstate, committedstate, newstate):
return resolveQueueConflict(oldstate, committedstate, newstate)
CompositePersistentQueue = CompositeQueue # legacy | zc.queue | /zc.queue-2.0.0a1.tar.gz/zc.queue-2.0.0a1/src/zc/queue/_queue.py | _queue.py |
=================
Release History
=================
4.0 (2023-07-07)
================
- Drop support for Python 2.7, 3.5, 3.6.
- Add support for Python 3.9, 3.10, 3.11.
3.0.0 (2019-03-30)
==================
- Drop support for Python 3.4.
- Add support for Python 3.7 and 3.8a2.
- Flake8 the code.
2.0.0 (2017-06-21)
==================
- Add support for Python 3.4, 3.5, 3.6 and PyPy.
- Automated testing is enabled on Travis CI.
1.3.6 (2014-04-14)
==================
- Fixed: Strings were incorrectly compared using "is not ''" rather than !=
- Fixed: Spaces weren't allowed in the installation location.
1.3.5 (2011-08-06)
==================
- Fixed: Spaces weren't allowed in environment variables.
- Fixed: Added missing option reference documentation.
1.3.4 (2011-01-18)
==================
- Fixed a bug in location book-keeping that caused shared builds to be deleted
from disk when a part didn't need them anymore. (#695977)
- Made tests pass with both zc.buildout 1.4 and 1.5, lifted the upper version
bound on zc.buildout. (#695732)
1.3.3 (2010-11-10)
==================
- Remove the temporary build directory when cmmi succeeds.
- Specify that the zc.buildout version be <1.5.0b1, as the recipe is
currently not compatible with zc.buildout 1.5.
1.3.2 (2010-08-09)
==================
- Remove the build directory for a shared build when the source cannot be
downloaded.
- Declared a test dependency on zope.testing.
1.3.1 (2009-09-10)
==================
- Declare dependency on zc.buildout 1.4 or later. This dependency was introduced
in version 1.3.
1.3 (2009-09-03)
================
- Use zc.buildout's download API. As this allows MD5 checking, added the
md5sum and patch-md5sum options.
- Added options for changing the name of the configure script and
overriding the ``--prefix`` parameter.
- Moved the core "configure; make; make install" command sequence to a
method that can be overridden in other recipes, to support packages
whose installation process is slightly different.
1.2.1 (2009-08-12)
==================
Bug fix: keep track of reused shared builds.
1.2.0 (2009-05-18)
==================
Enabled using a shared directory for completed builds.
1.1.6 (2009-03-17)
==================
Moved 'zc' package from root of checkout into 'src', to prevent testrunner
from finding eggs installed locally by buildout.
Removed deprecations under Python 2.6.
1.1.5 (2008-11-07)
==================
- Added to the README.txt file a link to the SVN repository, so that Setuptools
can automatically find the development version when asked to install the
"-dev" version of zc.recipe.cmmi.
- Applied fix for bug #261367 i.e. changed open() of file being downloaded to
binary, so that errors like the following no longer occur under Windows.
uncompress = self.decompress.decompress(buf)
error: Error -3 while decompressing: invalid distance too far back
1.1.4 (2008-06-25)
==================
Add support to autogen configure files.
1.1.3 (2008-06-03)
==================
Add support for updating the environment.
1.1.2 (2008-02-28)
==================
- Check if the ``location`` folder exists before creating it.
After 1.1.0
===========
Added support for patches to be downloaded from a url rather than only using
patches on the filesystem
1.1.0
=====
Added support for:
- download-cache: downloaded files are cached in the 'cmmi' subdirectory of
the cache cache keys are hashes of the url that the file was downloaded from
cache information recorded in the cache.ini file within each directory
- offline mode: cmmi will not go online if the package is not in the cache
- variable location: build files other than in the parts directory if required
- additional logging/output
1.0.2 (2007-06-03)
==================
- Added support for patches.
- Tests fixed (buildout's output changed)
1.0.1 (2006-11-22)
==================
- Added missing zip_safe flag.
1.0 (2006-11-22)
================
Initial release.
| zc.recipe.cmmi | /zc.recipe.cmmi-4.0.tar.gz/zc.recipe.cmmi-4.0/CHANGES.rst | CHANGES.rst |
==============================================================
Recipe installing a download via configure/make/make install
==============================================================
The configure-make-make-install recipe automates installation of
configure-based source distribution into buildouts.
.. contents::
Options
=======
url
The URL of a source archive to download
configure-command
The name of the configure script.
The option defaults to ``./configure``.
configure-options
Basic configure options.
Defaults to a ``--prefix`` option that points to the part directory.
extra_options
A string of extra options to pass to configure in *addition to* the
base options.
environment
Optional environment variable settings of the forme NAME=VALUE.
Newlines are ignored. Spaces may be included in environment values
as long as they can't be mistaken for environment settings. So::
environment = FOO=bar
baz
Sets the environment variable FOO, but::
environment = FOO=bar xxx=baz
Sets 2 environment values, FOO and xxx.
patch
The name of an optional patch file to apply to the distribution.
patch_options
Options to supply to the patch command (if a patch file is used).
This defaults to ``-p0``
shared
Share the build accross buildouts.
autogen
The name of a script to run to generate a configure script.
source-directory-contains
The name of a file in the distribution's source directory.
This is used by the recipe to determine if it has found the source
directory. It defaults top "configure".
.. note::
This recipe is not expected to work in a Microsoft Windows environment.
| zc.recipe.cmmi | /zc.recipe.cmmi-4.0.tar.gz/zc.recipe.cmmi-4.0/README.rst | README.rst |
We have an archive with a demo foo tar ball and publish it by http in order
to see offline effects:
>>> ls(distros)
- bar.tgz
- baz.tgz
- foo.tgz
>>> distros_url = start_server(distros)
Let's update a sample buildout to installs it:
>>> write('buildout.cfg',
... """
... [buildout]
... parts = foo
...
... [foo]
... recipe = zc.recipe.cmmi
... url = %sfoo.tgz
... """ % distros_url)
We used the url option to specify the location of the archive.
If we run the buildout, the configure script in the archive is run.
It creates a make file which is also run:
>>> print(system('bin/buildout').strip())
Installing foo.
foo: Downloading http://localhost/foo.tgz
foo: Unpacking and configuring
configuring foo --prefix=/sample-buildout/parts/foo
echo building foo
building foo
echo installing foo
installing foo
The recipe also creates the parts directory:
>>> import os.path
>>> os.path.isdir(join(sample_buildout, "parts", "foo"))
True
If we run the buildout again, the update method will be called, which
does nothing:
>>> print(system('bin/buildout').strip())
Updating foo.
You can supply extra configure options:
>>> write('buildout.cfg',
... """
... [buildout]
... parts = foo
...
... [foo]
... recipe = zc.recipe.cmmi
... url = %sfoo.tgz
... extra_options = -a -b c
... """ % distros_url)
>>> print(system('bin/buildout').strip())
Uninstalling foo.
Installing foo.
foo: Downloading http://localhost/foo.tgz
foo: Unpacking and configuring
configuring foo --prefix=/sample-buildout/parts/foo -a -b c
echo building foo
building foo
echo installing foo
installing foo
The recipe sets the location option, which can be read by other
recipes, to the location where the part is installed:
>>> cat('.installed.cfg')
[buildout]
installed_develop_eggs =
parts = foo
<BLANKLINE>
[foo]
...
location = /sample_buildout/parts/foo
...
It may be necessary to set some environment variables when running configure
or make. This can be done by adding an environment statement:
>>> write('buildout.cfg',
... """
... [buildout]
... parts = foo
...
... [foo]
... recipe = zc.recipe.cmmi
... url = %sfoo.tgz
... environment =
... CFLAGS=-I/usr/lib/postgresql7.4/include
... """ % distros_url)
>>> print(system('bin/buildout').strip())
Uninstalling foo.
Installing foo.
foo: Downloading http://localhost/foo.tgz
foo: Unpacking and configuring
foo: Updating environment: CFLAGS=-I/usr/lib/postgresql7.4/include
configuring foo --prefix=/sample_buildout/parts/foo
echo building foo
building foo
echo installing foo
installing foo
Sometimes it's necessary to patch the sources before building a package.
You can specify the name of the patch to apply and (optional) patch options:
First of all let's write a patchfile:
>>> import sys
>>> mkdir('patches')
>>> write('patches/config.patch',
... """--- configure
... +++ /dev/null
... @@ -1,13 +1,13 @@
... #!%s
... import sys
... -print("configuring foo " + ' '.join(sys.argv[1:]))
... +print("configuring foo patched " + ' '.join(sys.argv[1:]))
...
... Makefile_template = '''
... all:
... -\techo building foo
... +\techo building foo patched
...
... install:
... -\techo installing foo
... +\techo installing foo patched
... '''
...
... with open('Makefile', 'w') as f:
... _ = f.write(Makefile_template)
...
... """ % sys.executable)
Now let's create a buildout.cfg file. Note: If no patch option is beeing
passed, -p0 is appended by default.
>>> write('buildout.cfg',
... """
... [buildout]
... parts = foo
...
... [foo]
... recipe = zc.recipe.cmmi
... url = %sfoo.tgz
... patch = ${buildout:directory}/patches/config.patch
... patch_options = -p0
... """ % distros_url)
>>> print(system('bin/buildout').strip())
Uninstalling foo.
Installing foo.
foo: Downloading http://localhost/foo.tgz
foo: Unpacking and configuring
patching file configure
...
configuring foo patched --prefix=/sample_buildout/parts/foo
echo building foo patched
building foo patched
echo installing foo patched
installing foo patched
It is possible to autogenerate the configure files:
>>> write('buildout.cfg',
... """
... [buildout]
... parts = foo
...
... [foo]
... recipe = zc.recipe.cmmi
... url = %s/bar.tgz
... autogen = autogen.sh
... """ % distros_url)
>>> print(system('bin/buildout').strip())
Uninstalling foo.
Installing foo.
foo: Downloading http://localhost//bar.tgz
foo: Unpacking and configuring
foo: auto generating configure files
configuring foo --prefix=/sample_buildout/parts/foo
echo building foo
building foo
echo installing foo
installing foo
It is also possible to support configure commands other than "./configure":
>>> write('buildout.cfg',
... """
... [buildout]
... parts = foo
...
... [foo]
... recipe = zc.recipe.cmmi
... url = %s/baz.tgz
... source-directory-contains = configure.py
... configure-command = ./configure.py
... configure-options =
... --bindir=bin
... """ % distros_url)
>>> print(system('bin/buildout').strip())
Uninstalling foo.
Installing foo.
foo: Downloading http://localhost//baz.tgz
foo: Unpacking and configuring
configuring foo --bindir=bin
echo building foo
building foo
echo installing foo
installing foo
When downloading a source archive or a patch, we can optionally make sure of
its authenticity by supplying an MD5 checksum that must be matched. If it
matches, we'll not be bothered with the check by buildout's output:
>>> from hashlib import md5
>>> with open(join(distros, 'foo.tgz'), 'rb') as f:
... foo_md5sum = md5(f.read()).hexdigest()
>>> write('buildout.cfg',
... """
... [buildout]
... parts = foo
...
... [foo]
... recipe = zc.recipe.cmmi
... url = %sfoo.tgz
... md5sum = %s
... """ % (distros_url, foo_md5sum))
>>> print(system('bin/buildout').strip())
Uninstalling foo.
Installing foo.
foo: Downloading http://localhost/foo.tgz
foo: Unpacking and configuring
configuring foo --prefix=/sample_buildout/parts/foo
echo building foo
building foo
echo installing foo
installing foo
But if the archive doesn't match the checksum, the recipe refuses to install:
>>> write('buildout.cfg',
... """
... [buildout]
... parts = foo
...
... [foo]
... recipe = zc.recipe.cmmi
... url = %sbar.tgz
... md5sum = %s
... patch = ${buildout:directory}/patches/config.patch
... """ % (distros_url, foo_md5sum))
>>> print(system('bin/buildout').strip())
Uninstalling foo.
Installing foo.
foo: Downloading http://localhost:20617/bar.tgz
While:
Installing foo.
Error: MD5 checksum mismatch downloading 'http://localhost/bar.tgz'
Similarly, a checksum mismatch for the patch will cause the buildout run to be
aborted:
>>> write('buildout.cfg',
... """
... [buildout]
... parts = foo
...
... [foo]
... recipe = zc.recipe.cmmi
... url = %sfoo.tgz
... patch = ${buildout:directory}/patches/config.patch
... patch-md5sum = %s
... """ % (distros_url, foo_md5sum))
>>> print(system('bin/buildout').strip())
Installing foo.
foo: Downloading http://localhost:21669/foo.tgz
foo: Unpacking and configuring
While:
Installing foo.
Error: MD5 checksum mismatch for local resource at '/.../sample-buildout/patches/config.patch'.
>>> write('buildout.cfg',
... """
... [buildout]
... parts = foo
...
... [foo]
... recipe = zc.recipe.cmmi
... url = %sfoo.tgz
... patch = ${buildout:directory}/patches/config.patch
... """ % (distros_url))
If the build fails, the temporary directory where the tarball was unpacked
is logged to stdout, and left intact for debugging purposes.
>>> write('patches/config.patch', "dgdgdfgdfg")
>>> res = system('bin/buildout')
>>> print(res)
Installing foo.
foo: Downloading http://localhost/foo.tgz
foo: Unpacking and configuring
patch unexpectedly ends in middle of line
foo: cmmi failed: /.../...buildout-foo
patch: **** Only garbage was found in the patch input.
While:
Installing foo.
<BLANKLINE>
An internal error occurred due to a bug in either zc.buildout or in a
recipe being used:
...
subprocess.CalledProcessError: Command 'patch -p0 < ...' returned non-zero exit status ...
<BLANKLINE>
>>> import re
>>> import os.path
>>> import shutil
>>> path = re.search('foo: cmmi failed: (.*)', res).group(1)
>>> os.path.exists(path)
True
>>> shutil.rmtree(path)
After a successful build, such temporary directories are removed.
>>> import glob
>>> import tempfile
>>> old_tempdir = tempfile.gettempdir()
>>> tempdir = tempfile.tempdir = tempfile.mkdtemp(suffix='.buildout.build')
>>> dirs = glob.glob(os.path.join(tempdir, '*buildout-foo'))
>>> write('buildout.cfg',
... """
... [buildout]
... parts = foo
...
... [foo]
... recipe = zc.recipe.cmmi
... url = %sfoo.tgz
... """ % (distros_url,))
>>> print(system("bin/buildout"))
Installing foo.
foo: Downloading http://localhost:21445/foo.tgz
foo: Unpacking and configuring
configuring foo --prefix=/sample_buildout/parts/foo
echo building foo
building foo
echo installing foo
installing foo
<BLANKLINE>
>>> new_dirs = glob.glob(os.path.join(tempdir, '*buildout-foo'))
>>> len(dirs) == len(new_dirs) == 0
True
>>> tempfile.tempdir = old_tempdir
| zc.recipe.cmmi | /zc.recipe.cmmi-4.0.tar.gz/zc.recipe.cmmi-4.0/src/zc/recipe/cmmi/README.rst | README.rst |
Loading Patces from URLs
========================
Patch files can be loaded from URLs as well as files.
Any downloaded patch files can be cached in a download cache if
available, in exactly the same way as for tarballs. Similarly,
if the build is set to offline operation, then it will not download
from a remote location.
To see how this works, we'll set up a web server with a patch file,
and a cache with our tarball in:
>>> import sys, os
>>> cache = tmpdir('cache')
>>> patch_data = tmpdir('patch_data')
>>> patchfile = os.path.join(patch_data, 'config.patch')
>>> write(patchfile,
... """--- configure
... +++ /dev/null
... @@ -1,13 +1,13 @@
... #!%s
... import sys
... -print("configuring foo " + ' '.join(sys.argv[1:]))
... +print("configuring foo patched " + ' '.join(sys.argv[1:]))
...
... Makefile_template = '''
... all:
... -\techo building foo
... +\techo building foo patched
...
... install:
... -\techo installing foo
... +\techo installing foo patched
... '''
...
... open('Makefile', 'w').write(Makefile_template)
...
... """ % sys.executable)
>>> server_url = start_server(patch_data)
Now let's create a buildout.cfg file.
>>> write('buildout.cfg',
... """
... [buildout]
... parts = foo
... download-cache=%(cache)s
... log-level = DEBUG
...
... [foo]
... recipe = zc.recipe.cmmi
... url = file://%(distros)s/foo.tgz
... patch = %(server_url)s/config.patch
... """ % dict(distros=distros,server_url=server_url,cache=cache))
>>> print(system('bin/buildout').strip())
In...
Installing foo.
foo: Searching cache at /cache/cmmi
foo: Cache miss; will cache /distros/foo.tgz as /cache/cmmi/...
foo: Using local resource /distros/foo.tgz
foo: Unpacking and configuring
foo: Searching cache at /cache/cmmi
foo: Cache miss; will cache http://localhost//config.patch as /cache/cmmi/...
foo: Downloading http://localhost//config.patch
patching file configure
...
configuring foo patched /sample-buildout/parts/foo
echo building foo patched
building foo patched
echo installing foo patched
installing foo patched
Any downloaded patch files can be cached in a download cache if available, in
exactly the same way as for tarballs. Similarly, if the build is set to offline
operation, then it will not download from a remote location.
We can see that the patch is now in the cache, as well as the tarball:
>>> import os
>>> cache_path = os.path.join(cache, 'cmmi')
>>> ls(cache_path)
- ...
- ...
| zc.recipe.cmmi | /zc.recipe.cmmi-4.0.tar.gz/zc.recipe.cmmi-4.0/src/zc/recipe/cmmi/patching.rst | patching.rst |
Using a download cache
======================
Normally, when distributions are installed, if any processing is
needed, they are downloaded from the internet to a temporary directory
and then installed from there. A download cache can be used to avoid
the download step. This can be useful to reduce network access and to
create source distributions of an entire buildout.
The buildout download-cache option can be used to specify a directory
to be used as a download cache.
In this example, we'll create a directory to hold the cache:
>>> cache = tmpdir('cache')
We have an archive with a demo foo tar ball and publish it by http in order
to see offline effects:
>>> ls(distros)
- bar.tgz
- baz.tgz
- foo.tgz
>>> distros = start_server(distros)
Let's update a sample buildout to install it:
>>> write('buildout.cfg',
... """
... [buildout]
... parts = foo
... download-cache = %s
... log-level = DEBUG
...
... [foo]
... recipe = zc.recipe.cmmi
... url = %s/foo.tgz
... """ % (cache, distros))
We used the url option to specify the location of the archive.
It creates a make file which is also run:
>>> print(system('bin/buildout'))
In...
...
Installing foo.
foo: Searching cache at /cache/cmmi
foo: Cache miss; will cache http://localhost//foo.tgz as /cache/cmmi/...
foo: Downloading http://localhost//foo.tgz
foo: Unpacking and configuring
configuring foo /sample-buildout/parts/foo
echo building foo
building foo
echo installing foo
installing foo
<BLANKLINE>
We'll also get the download cache populated. The buildout doesn't put
files in the cache directly. It creates an intermediate directory,
cmmi:
>>> ls(cache)
d cmmi
d dist
The cmmi directory contains the cache keys - these are hashes of the
download url:
>>> import os
>>> cache_path = os.path.join(cache, 'cmmi')
>>> len(os.listdir(cache_path))
1
If we remove the installed parts and then re-run, we'll see that the
files are not downloaded afresh:
>>> def remove_parts():
... for f in os.listdir("parts"):
... if f != "buildout":
... remove("parts", f)
>>> remove_parts()
>>> print(system(buildout))
In...
...
Uninstalling foo.
Installing foo.
foo: Searching cache at /cache/cmmi
foo: Using cache file /cache/cmmi/...
foo: Unpacking and configuring
configuring foo /sample-buildout/parts/foo
echo building foo
building foo
echo installing foo
installing foo
<BLANKLINE>
This is because the ones in the download cache are used.
If a file directory is removed from the cache, and the installed parts
are also removed, then it is downloaded afresh:
>>> for f in os.listdir( cache_path ):
... remove (cache_path, f)
>>> remove_parts()
>>> print(system('bin/buildout'))
In...
...
Installing foo.
foo: Searching cache at /cache/cmmi
foo: Cache miss; will cache http://localhost//foo.tgz as /cache/cmmi/...
foo: Unpacking and configuring
configuring foo /sample-buildout/parts/foo
echo building foo
building foo
echo installing foo
installing foo
<BLANKLINE>
If the cache location is changed, and the installed parts are removed,
the new cache is created and repopulated:
>>> remove_parts()
>>> cache2 = tmpdir('cache2')
>>> write('buildout.cfg',
... """
... [buildout]
... parts = foo
... download-cache = %s
... log-level = DEBUG
...
... [foo]
... recipe = zc.recipe.cmmi
... url = %s/foo.tgz
... """ % (cache2, distros))
>>> print(system('bin/buildout'))
In...
...
Installing foo.
foo: Searching cache at /cache2/cmmi
foo: Cache miss; will cache http://localhost//foo.tgz as /cache2/cmmi/...
foo: Unpacking and configuring
configuring foo /sample-buildout/parts/foo
echo building foo
building foo
echo installing foo
installing foo
<BLANKLINE>
The old cache is left in place:
>>> ls(cache_path)
- ...
Installing solely from a download cache
---------------------------------------
A download cache can be used as the basis of application source
releases. In an application source release, we want to distribute an
application that can be built without making any network accesses. In
this case, we distribute a buildout with download cache and tell the
buildout to install from the download cache only, without making
network accesses. The buildout install-from-cache option can be used
to signal that packages should be installed only from the download
cache.
If the buildout is run in offline mode, once the installed parts have
been removed, the files from the cache are used:
>>> write('buildout.cfg',
... """
... [buildout]
... parts = foo
... download-cache = %s
... log-level = DEBUG
... install-from-cache = true
...
... [foo]
... recipe = zc.recipe.cmmi
... url = %s/foo.tgz
... """ % (cache, distros))
>>> remove_parts()
>>> print(system(buildout))
In...
...
Uninstalling foo.
Installing foo.
foo: Searching cache at /cache/cmmi
foo: Using cache file /cache/cmmi/...
foo: Unpacking and configuring
configuring foo /sample-buildout/parts/foo
echo building foo
building foo
echo installing foo
installing foo
<BLANKLINE>
However, in offline mode, if we remove the installed parts and clear
the cache, an error is raised because the file is not in the cache:
>>> for f in os.listdir( cache_path ):
... remove (cache_path, f)
>>> remove_parts()
>>> print(system(buildout))
In...
...
Uninstalling foo.
Installing foo.
foo: Searching cache at /cache/cmmi
foo: Cache miss; will cache http://localhost//foo.tgz as /cache/cmmi/...
While:
Installing foo.
Error: Couldn't download 'http://localhost//foo.tgz' in offline mode.
<BLANKLINE>
| zc.recipe.cmmi | /zc.recipe.cmmi-4.0.tar.gz/zc.recipe.cmmi-4.0/src/zc/recipe/cmmi/downloadcache.rst | downloadcache.rst |
==============================
Using a shared build directory
==============================
For builds that take a long time, it can be convenient to reuse them across
several buildouts. To do this, use the `shared` option:
>>> cache = tmpdir('cache')
>>> write('buildout.cfg',
... """
... [buildout]
... parts = foo
... download-cache = %s
...
... [foo]
... recipe = zc.recipe.cmmi
... url = file://%s/foo.tgz
... shared = True
... """ % (cache, distros))
When run the first time, the build is executed as usual:
>>> print(system('bin/buildout'))
Installing foo.
foo: Unpacking and configuring
configuring foo /cache/cmmi/build/<BUILDID>
echo building foo
building foo
echo installing foo
installing foo
<BLANKLINE>
But after that, the existing shared build directory is used instead of running
the build again:
>>> remove('.installed.cfg')
>>> print(system('bin/buildout'))
Installing foo.
foo: using existing shared build
<BLANKLINE>
The shared directory
====================
By default, the shared build directory is named with a hash of the recipe's
configuration options (but it can also be configured manually, see below):
>>> ls(cache, 'cmmi', 'build')
d <BUILDID>
For example, if the download url changes, the build is executed again:
>>> import os
>>> import shutil
>>> _ = shutil.copy(os.path.join(distros, 'foo.tgz'),
... os.path.join(distros, 'qux.tgz'))
>>> remove('.installed.cfg')
>>> write('buildout.cfg',
... """
... [buildout]
... parts = qux
... download-cache = %s
...
... [qux]
... recipe = zc.recipe.cmmi
... url = file://%s/qux.tgz
... shared = True
... """ % (cache, distros))
>>> print(system('bin/buildout'))
Installing qux.
qux: Unpacking and configuring
configuring foo /cache/cmmi/build/<BUILDID>
echo building foo
building foo
echo installing foo
installing foo
and another shared directory is created:
>>> ls(cache, 'cmmi', 'build')
d <BUILDID>
d <BUILDID>
(Other recipes can retrieve the shared build directory from our part's
`location` as usual, so the SHA-names shouldn't be a problem.)
Configuring the shared directory
================================
If you set `shared` to an existing directory, that will be used as the build
directory directly (instead of a name computed from to the recipe options):
>>> shared = os.path.join(cache, 'existing')
>>> os.mkdir(shared)
>>> write('buildout.cfg',
... """
... [buildout]
... parts = foo
...
... [foo]
... recipe = zc.recipe.cmmi
... url = file://%s/foo.tgz
... shared = %s
... """ % (distros, shared))
>>> remove('.installed.cfg')
>>> print(system('bin/buildout'))
Installing foo.
foo: Unpacking and configuring
configuring foo /cache/existing/cmmi
echo building foo
building foo
echo installing foo
installing foo
<BLANKLINE>
If no download-cache is set, and `shared` is not a directory, an error is raised:
>>> write('buildout.cfg',
... """
... [buildout]
... parts = foo
...
... [foo]
... recipe = zc.recipe.cmmi
... url = file://%s/foo.tgz
... shared = True
... """ % distros)
>>> print(system('bin/buildout').strip())
While:
Installing.
Getting section foo.
Initializing section foo.
...
ValueError: Set the 'shared' option of zc.recipe.cmmi to an existing
directory, or set ${buildout:download-cache}
Build errors
============
If an error occurs during the build (or it is aborted by the user),
the build directory is removed, so there is no risk of accidentally
mistaking some half-baked build directory as a good cached shared build.
Let's simulate a build error. First, we backup a working build.
>>> _ = shutil.copy(os.path.join(distros, 'foo.tgz'),
... os.path.join(distros, 'foo.tgz.bak'))
Then we create a broken tarball:
>>> import tarfile
>>> from zc.recipe.cmmi.tests import BytesIO
>>> import sys
>>> tarpath = os.path.join(distros, 'foo.tgz')
>>> with tarfile.open(tarpath, 'w:gz') as tar:
... configure = 'invalid'
... info = tarfile.TarInfo('configure.off')
... info.size = len(configure)
... info.mode = 0o755
... tar.addfile(info, BytesIO(configure))
Now we reset the cache to force our broken tarball to be used:
>>> shutil.rmtree(cache)
>>> cache = tmpdir('cache')
>>> write('buildout.cfg',
... """
... [buildout]
... parts = foo
... download-cache = %s
...
... [foo]
... recipe = zc.recipe.cmmi
... url = file://%s/foo.tgz
... shared = True
... """ % (cache, distros))
>>> remove('.installed.cfg')
>>> res = system('bin/buildout')
>>> print(res)
Installing foo.
...
ValueError: Couldn't find configure
The temporary directory where tarball was unpacked was left behind for
debugging purposes.
>>> import re
>>> shutil.rmtree(re.search('foo: cmmi failed: (.*)', res).group(1))
When we now fix the error (by copying back the working version and resetting the
cache), the build will be run again, and we don't use a half-baked shared
directory:
>>> _ = shutil.copy(os.path.join(distros, 'foo.tgz.bak'),
... os.path.join(distros, 'foo.tgz'))
>>> shutil.rmtree(cache)
>>> cache = tmpdir('cache')
>>> write('buildout.cfg',
... """
... [buildout]
... parts = foo
... download-cache = %s
...
... [foo]
... recipe = zc.recipe.cmmi
... url = file://%s/foo.tgz
... shared = True
... """ % (cache, distros))
>>> print(system('bin/buildout'))
Installing foo.
foo: Unpacking and configuring
configuring foo /cache/cmmi/build/<BUILDID>
echo building foo
building foo
echo installing foo
installing foo
<BLANKLINE>
Interaction with other users of shared builds
=============================================
While shared builds are a way to cache a build between installation runs of a
given buildout part, they are, more importantly, shared between multiple parts
and most probably, multiple buildouts. This implies two general rules of
behaviour: We should never delete shared builds, and we need to be prepared
for shared builds to be deleted by other system at any time.
In other words: Every install or update run of the recipe that uses a shared
build needs to check whether the build still exists on disk and rebuild it if
it does not. On the other hand, a part using the shared build must not declare
the shared build its own property lest buildout remove it when the shared
build is no longer needed, either because the part no longer uses it or
because the part itself is no longer used.
The last thing we did above was to install a shared build:
>>> ls(cache, 'cmmi', 'build')
d <BUILDID>
If someone deletes this shared build, updating the buildout part that needs it
will cause it to be rebuilt:
>>> rmdir(cache, 'cmmi', 'build')
>>> print(system('bin/buildout').strip())
Updating foo.
foo: Unpacking and configuring
configuring foo /cache/cmmi/build/<BUILDID>
echo building foo
building foo
echo installing foo
installing foo
>>> ls(cache, 'cmmi', 'build')
d <BUILDID>
If we stop using the shared build, it stays in the build cache:
>>> write('buildout.cfg',
... """
... [buildout]
... parts = foo
... download-cache = %s
...
... [foo]
... recipe = zc.recipe.cmmi
... url = file://%s/foo.tgz
... """ % (cache, distros))
>>> print(system('bin/buildout').strip())
Uninstalling foo.
Installing foo.
foo: Unpacking and configuring
configuring foo /sample-buildout/parts/foo
echo building foo
building foo
echo installing foo
installing foo
>>> ls(cache, 'cmmi', 'build')
d <BUILDID>
Regression: Keeping track of a reused shared build
==================================================
Let's first remove and rebuild everything to get some measure of isolation
from the story so far:
>>> remove('.installed.cfg')
>>> rmdir(cache, 'cmmi', 'build')
>>> write('buildout.cfg',
... """
... [buildout]
... parts = foo
... download-cache = %s
...
... [foo]
... recipe = zc.recipe.cmmi
... url = file://%s/foo.tgz
... shared = True
... """ % (cache, distros))
>>> print(system('bin/buildout'))
Installing foo.
foo: Unpacking and configuring
configuring foo /cache/cmmi/build/<BUILDID>
echo building foo
building foo
echo installing foo
installing foo
zc.recipe.cmmi 1.2 had a bug that manifested after reusing a shared build: The
part wouldn't keep track of the shared build and thus wasn't able to restore
it if it got deleted from the cache. This is how it should work:
>>> remove('.installed.cfg')
>>> print(system('bin/buildout'))
Installing foo.
foo: using existing shared build
>>> rmdir(cache, 'cmmi', 'build')
>>> print(system('bin/buildout').strip())
Updating foo.
foo: Unpacking and configuring
configuring foo /cache/cmmi/build/<BUILDID>
echo building foo
building foo
echo installing foo
installing foo
Regression: Don't leave behind a build directory if the download failed
=======================================================================
zc.recipe.cmmi up to version 1.3.1 had a bug that caused an empty build
directory to be left behind if a download failed, causing it to be mistaken
for a good shared build.
We cause the download to fail by specifying a nonsensical MD5 sum:
>>> shutil.rmtree(cache)
>>> cache = tmpdir('cache')
>>> write('buildout.cfg',
... """
... [buildout]
... parts = foo
... download-cache = %s
...
... [foo]
... recipe = zc.recipe.cmmi
... url = file://%s/foo.tgz
... md5sum = 1234
... shared = True
... """ % (cache, distros))
>>> remove('.installed.cfg')
>>> print(system('bin/buildout'))
Installing foo.
...
Error: MD5 checksum mismatch for local resource at '/distros/foo.tgz'.
The build directory must not exist anymore:
>>> ls(cache, 'cmmi')
Another buildout run must fail the same way as the first attempt:
>>> print(system('bin/buildout'))
Installing foo.
...
Error: MD5 checksum mismatch for local resource at '/distros/foo.tgz'.
| zc.recipe.cmmi | /zc.recipe.cmmi-4.0.tar.gz/zc.recipe.cmmi-4.0/src/zc/recipe/cmmi/shared.rst | shared.rst |
import logging
import os
import os.path
import re
import shutil
import subprocess
import tempfile
from hashlib import sha1
import setuptools.archive_util
import zc.buildout
import zc.buildout.download
almost_environment_setting = re.compile(r'\w+=').match
not_starting_with_digit = re.compile(r'\D').match
def system(c):
subprocess.check_call(c, shell=True)
class Recipe:
def __init__(self, buildout, name, options):
self.buildout, self.name, self.options = buildout, name, options
directory = buildout['buildout']['directory']
download_cache = buildout['buildout'].get('download-cache')
self.url = self.options['url']
extra_options = self.options.get('extra_options', '')
# get rid of any newlines that may be in the options so they
# do not get passed through to the commandline
self.extra_options = ' '.join(extra_options.split())
self.autogen = self.options.get('autogen', '')
self.patch = self.options.get('patch', '')
self.patch_options = self.options.get('patch_options', '-p0')
environ = []
for token in self.options.get('environment', '').split():
if (almost_environment_setting(token) and
not_starting_with_digit(token)):
environ.append(token)
else:
if environ:
environ[-1] += ' ' + token
else:
raise ValueError('Bad environment setting', token)
if environ:
self.environ = dict([x.split('=', 1) for x in environ])
else:
self.environ = {}
self.source_directory_contains = self.options.get(
'source-directory-contains', 'configure')
self.configure_cmd = self.options.get(
'configure-command', './configure')
self.configure_options = self.options.get('configure-options', None)
if self.configure_options:
self.configure_options = ' '.join(self.configure_options.split())
self.shared = options.get('shared', None)
if self.shared:
if os.path.isdir(self.shared):
# to prevent nasty surprises, don't use the directory directly
# since we remove it in case of build errors
self.shared = os.path.join(self.shared, 'cmmi')
else:
if not download_cache:
raise ValueError(
"Set the 'shared' option of zc.recipe.cmmi"
" to an existing"
" directory, or set ${buildout:download-cache}")
self.shared = os.path.join(
directory, download_cache, 'cmmi', 'build')
self.shared = os.path.join(self.shared, self._state_hash())
location = self.shared
else:
location = os.path.join(options.get(
'location', buildout['buildout']['parts-directory']), name)
options['location'] = location
def _state_hash(self):
# hash of our configuration state, so that e.g. different
# ./configure options will get a different build directory.
# Be sure to sort to keep a consistent order, since dictionary
# iteration order is never guaranteed.
env = ''.join(['{}{}'.format(key, value) for key, value
in sorted(self.environ.items())])
state = [self.url, self.extra_options, self.autogen,
self.patch, self.patch_options, env]
data = ''.join(state)
if not isinstance(data, bytes):
data = data.encode('utf-8')
return sha1(data).hexdigest()
def install(self):
self.build()
if self.shared:
return ''
return self.options['location']
def update(self):
if not os.path.isdir(self.options['location']):
self.build()
def build(self):
logger = logging.getLogger(self.name)
download = zc.buildout.download.Download(
self.buildout['buildout'], namespace='cmmi', hash_name=True,
logger=logger)
if self.shared:
if os.path.isdir(self.shared):
logger.info('using existing shared build')
return self.shared
fname, is_temp = download(self.url, md5sum=self.options.get('md5sum'))
# now unpack and work as normal
tmp = tempfile.mkdtemp('buildout-' + self.name)
logger.info('Unpacking and configuring')
try:
setuptools.archive_util.unpack_archive(fname, tmp)
finally:
if is_temp:
os.remove(fname)
for key, value in sorted(self.environ.items()):
logger.info('Updating environment: %s=%s', key, value)
os.environ.update(self.environ)
# XXX This is probably more complicated than it needs to be. I
# retained the distinction between makedirs and mkdir when I moved
# creation of the build dir after downloading the source since I
# didn't understand the reason for the distinction. (tlotze)
if self.shared and not os.path.isdir(self.shared):
os.makedirs(self.shared)
dest = self.options['location']
if not os.path.exists(dest):
os.mkdir(dest)
try:
here = os.getcwd()
os.chdir(tmp)
try:
if not (os.path.exists(self.source_directory_contains) or
(self.autogen and os.path.exists(self.autogen))):
entries = os.listdir(tmp)
if len(entries) == 1 and os.path.isdir(entries[0]):
os.chdir(entries[0])
if self.patch != '':
# patch may be a filesystem path or url
# url patches can go through the cache
if self.patch != '':
try:
self.patch, is_temp = download(
self.patch,
md5sum=self.options.get('patch-md5sum'))
except BaseException:
# If download/checksum of the patch fails, leaving
# the tmp dir won't be helpful.
shutil.rmtree(tmp)
raise
try:
system("patch %s < %s"
% (self.patch_options, self.patch))
finally:
if is_temp:
os.remove(self.patch)
if self.autogen != '':
logger.info('auto generating configure files')
system("./%s" % self.autogen)
if not os.path.exists(self.source_directory_contains):
entries = os.listdir(tmp)
if len(entries) == 1 and os.path.isdir(entries[0]):
os.chdir(entries[0])
else:
raise ValueError("Couldn't find configure")
self.cmmi(dest)
shutil.rmtree(tmp)
finally:
os.chdir(here)
except BaseException:
shutil.rmtree(dest)
if os.path.exists(tmp):
logger.error("cmmi failed: %s", tmp)
raise
def cmmi(self, dest):
"""Do the 'configure; make; make install' command sequence.
When this is called, the current working directory is the
source directory. The 'dest' parameter specifies the
installation prefix.
This can be overridden by subclasses to support packages whose
command sequence is different.
"""
options = self.configure_options
if options is None:
options = '--prefix="%s"' % dest
if self.extra_options:
options += ' %s' % self.extra_options
system("{} {}".format(self.configure_cmd, options))
system("make")
system("make install") | zc.recipe.cmmi | /zc.recipe.cmmi-4.0.tar.gz/zc.recipe.cmmi-4.0/src/zc/recipe/cmmi/__init__.py | __init__.py |
Various tests
=============
This doctest contains misc tests.
Creating the location folder
----------------------------
When the recipe is subclassed, the `location` folder might be created
before `zc.recipe.cmmi` has a chance to create it, so we need to make
sure it checks that the folder does not exists before it is created.
In the test below, the `foo` folder is created before the recipe
is launched::
>>> location = join(sample_buildout, 'parts', 'foo')
>>> mkdir(location)
>>> write('buildout.cfg',
... """
... [buildout]
... parts = foo
... log-level = DEBUG
...
... [foo]
... recipe = zc.recipe.cmmi
... url = file://%s/foo.tgz
... """ % (distros))
>>> print(system('bin/buildout'))
Installing...
...
installing foo
<BLANKLINE>
>>> import os.path
>>> os.path.isdir(join(sample_buildout, "parts", "foo"))
True
Removing the parts folder
-------------------------
As a result of featuring shared builds, the handling of zc.recipe.cmmi's
associated file-system paths is not entirely trivial. Let's make sure that
when not sharing the build, the recipe gets the book-keeping of its part
directory right.
The part's directory is created when the part is installed:
>>> remove('.installed.cfg')
>>> rmdir('parts', 'foo')
>>> print(system('bin/buildout'))
Installing...
...
installing foo
>>> os.path.isdir(join(sample_buildout, "parts", "foo"))
True
The part's directory is removed when it is no longer needed (e.g. because the
part now uses a shared build or because the part is gone altogether):
>>> write('buildout.cfg',
... """
... [buildout]
... parts =
... """)
>>> print(system('bin/buildout'))
Uninstalling foo.
>>> os.path.isdir(join(sample_buildout, "parts", "foo"))
False
Spaces in environment variables
-------------------------------
Unfortunately, environment option parsing is simplistic and makes it
hard to include spaces. We allow spaces if the tokens after spaves
aren't of the form NAME=.....
>>> distros_url = start_server(distros)
>>> write('buildout.cfg',
... """
... [buildout]
... parts = foo
...
... [foo]
... recipe = zc.recipe.cmmi
... url = %sfoo.tgz
... environment =
... CFLAGS=-I/yyy -I/xxx --x=y 2=1+1 a=b
... """ % distros_url)
>>> print(system('bin/buildout'))
Installing foo.
foo: Downloading http://localhost/foo.tgz
foo: Unpacking and configuring
foo: Updating environment: CFLAGS=-I/yyy -I/xxx --x=y 2=1+1
foo: Updating environment: a=b
configuring foo --prefix=/sample_buildout/parts/foo
echo building foo
building foo
echo installing foo
installing foo
| zc.recipe.cmmi | /zc.recipe.cmmi-4.0.tar.gz/zc.recipe.cmmi-4.0/src/zc/recipe/cmmi/misc.rst | misc.rst |
***********************
Unix Deployment Support
***********************
.. contents::
The zc.recipe.deployment recipe provides support for deploying
applications with multiple processes on Unix systems. (Perhaps support
for other systems will be added later.) It creates directories to hold
application instance configuration, log and run-time files. It also
sets or reads options that can be read by other programs to find out
where to place files:
``cache-directory``
The name of the directory where application instances should write
cached copies of replacable data. This defaults to /var/cache/NAME,
where NAME is the deployment name.
``crontab-directory``
The name of the directory in which cron jobs should be placed.
This defaults to /etc/cron.d.
``etc-directory``
The name of the directory where configuration files should be
placed. This defaults to /etc/NAME, where NAME is the deployment
name.
``var-prefix``
The path of the directory where configuration should be stored for
all applications. This defaults to /etc.
``lib-directory``
The name of the directory where application instances should write
valuable data. This defaults to /var/lib/NAME, where NAME is the
deployment name.
``log-directory``
The name of the directory where application instances should write
their log files. This defaults to /var/log/NAME, where NAME is the
deployment name.
``logrotate-directory``
The name of the directory where logrotate configuration files
should be placed, typically, /etc/logrotate.d.
``run-directory``
The name of the directory where application instances should put
their run-time files such as pid files and inter-process
communication socket files. This defaults to /var/run/NAME, where
NAME is the deployment name.
``rc-directory``
The name of the directory where run-control scripts should be
installed. This defaults to /etc/init.d.
``var-prefix``
The path of the directory where data should be stored for all
applications. This defaults to /var.
Directories traditionally placed in the /var hierarchy are created in
such a way that the directories are owned by the user specified in the
``user`` option and are writable by the user and the user's group.
Directories usually found in the /etc hierarchy are created owned by the
user specified by the ``etc-user`` setting (default to 'root') with the
same permissions
A system-wide configuration file, zc.recipe.deployment.cfg, located in
the ``etc-prefix`` directory, can be used to specify the ``var-prefix``
setting. The file uses the Python-standard ConfigParser syntax::
[deployment]
var-prefix = /mnt/fatdisk
Note that the section name is not related to the name of the deployment
parts being built; this is a system-wide setting not specific to any
deployment. This is useful to identify very large partitions where
control over /var itself is difficult to achieve.
| zc.recipe.deployment | /zc.recipe.deployment-1.3.0.tar.gz/zc.recipe.deployment-1.3.0/README.rst | README.rst |
Using the deployment recipe is pretty simple. Just specify a
deployment name, specified via the part name, and a deployment user.
Let's add a deployment to a sample buildout:
>>> write('buildout.cfg',
... '''
... [buildout]
... parts = foo
...
... [foo]
... prefix = %s
... recipe = zc.recipe.deployment
... user = %s
... etc-user = %s
... ''' % (sample_buildout, user, user))
>>> from six import print_
>>> print_(system(join('bin', 'buildout')), end='')
Installing foo.
zc.recipe.deployment:
Creating 'PREFIX/etc/foo',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/cache/foo',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/lib/foo',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/log/foo',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/run/foo',
mode 750, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/etc/cron.d',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/etc/init.d',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/etc/logrotate.d',
mode 755, user 'USER', group 'GROUP'
Note that we are providing a prefix and an etc-user here. These options
default to '/' and 'root', respectively.
Now we can see that directories named foo in PREFIX/etc, PREFIX/var/log and
PREFIX/var/run have been created:
>>> import os
>>> print_(ls(os.path.join(sample_buildout, 'etc/foo')))
drwxr-xr-x USER GROUP PREFIX/etc/foo
>>> print_(ls(os.path.join(sample_buildout, 'var/cache/foo')))
drwxr-xr-x USER GROUP PREFIX/var/cache/foo
>>> print_(ls(os.path.join(sample_buildout, 'var/lib/foo')))
drwxr-xr-x USER GROUP PREFIX/var/lib/foo
>>> print_(ls(os.path.join(sample_buildout, 'var/log/foo')))
drwxr-xr-x USER GROUP PREFIX/var/log/foo
>>> print_(ls(os.path.join(sample_buildout, 'var/run/foo')))
drwxr-x--- USER GROUP PREFIX/var/run/foo
By looking at .installed.cfg, we can see the options available for use
by other recipes:
>>> cat('.installed.cfg') # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
[buildout]
...
[foo]
__buildout_installed__ =
...
cache-directory = PREFIX/var/cache/foo
crontab-directory = PREFIX/etc/cron.d
etc-directory = PREFIX/etc/foo
etc-prefix = PREFIX/etc
etc-user = USER
lib-directory = PREFIX/var/lib/foo
log-directory = PREFIX/var/log/foo
logrotate-directory = PREFIX/etc/logrotate.d
name = foo
prefix = PREFIX
rc-directory = PREFIX/etc/init.d
recipe = zc.recipe.deployment
run-directory = PREFIX/var/run/foo
user = USER
var-prefix = PREFIX/var
If we uninstall, then the directories are removed.
>>> print_(system(join('bin', 'buildout')+' buildout:parts='), end='')
Uninstalling foo.
Running uninstall recipe.
zc.recipe.deployment: Removing 'PREFIX/etc/foo'
zc.recipe.deployment: Removing 'PREFIX/etc/cron.d'.
zc.recipe.deployment: Removing 'PREFIX/etc/init.d'.
zc.recipe.deployment: Removing 'PREFIX/etc/logrotate.d'.
zc.recipe.deployment: Removing 'PREFIX/var/cache/foo'.
zc.recipe.deployment: Removing 'PREFIX/var/lib/foo'.
zc.recipe.deployment: Removing 'PREFIX/var/log/foo'.
zc.recipe.deployment: Removing 'PREFIX/var/run/foo'.
>>> import os
>>> os.path.exists(os.path.join(sample_buildout, 'etc/foo'))
False
>>> os.path.exists(os.path.join(sample_buildout, 'var/cache/foo'))
False
>>> os.path.exists(os.path.join(sample_buildout, 'var/lib/foo'))
False
>>> os.path.exists(os.path.join(sample_buildout, 'var/log/foo'))
False
>>> os.path.exists(os.path.join(sample_buildout, 'var/run/foo'))
False
The cache, lib, log and run directories are only removed if they are empty.
To see that, we'll put a file in each of the directories created:
>>> print_(system(join('bin', 'buildout')), end='')
... # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
Installing foo.
zc.recipe.deployment:
Creating 'PREFIX/etc/foo',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/cache/foo',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/lib/foo',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/log/foo',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/run/foo',
mode 750, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/etc/cron.d',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/etc/init.d',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/etc/logrotate.d',
mode 755, user 'USER', group 'GROUP'
>>> write(os.path.join(sample_buildout, 'etc/foo/x'), '')
>>> write(os.path.join(sample_buildout, 'var/cache/foo/x'), '')
>>> write(os.path.join(sample_buildout, 'var/lib/foo/x'), '')
>>> write(os.path.join(sample_buildout, 'var/log/foo/x'), '')
>>> write(os.path.join(sample_buildout, 'var/run/foo/x'), '')
And then uninstall:
>>> print_(system(join('bin', 'buildout')+' buildout:parts='), end='')
Uninstalling foo.
Running uninstall recipe.
zc.recipe.deployment: Removing 'PREFIX/etc/foo'
zc.recipe.deployment: Removing 'PREFIX/etc/cron.d'.
zc.recipe.deployment: Removing 'PREFIX/etc/init.d'.
zc.recipe.deployment: Removing 'PREFIX/etc/logrotate.d'.
zc.recipe.deployment: Can't remove non-empty directory 'PREFIX/var/cache/foo'.
zc.recipe.deployment: Can't remove non-empty directory 'PREFIX/var/lib/foo'.
zc.recipe.deployment: Can't remove non-empty directory 'PREFIX/var/log/foo'.
zc.recipe.deployment: Can't remove non-empty directory 'PREFIX/var/run/foo'.
>>> os.path.exists(os.path.join(sample_buildout, 'etc/foo'))
False
>>> print_(ls(os.path.join(sample_buildout, 'var/cache/foo')))
drwxr-xr-x USER GROUP PREFIX/var/cache/foo
>>> print_(ls(os.path.join(sample_buildout, 'var/lib/foo')))
drwxr-xr-x USER GROUP PREFIX/var/lib/foo
>>> print_(ls(os.path.join(sample_buildout, 'var/log/foo')))
drwxr-xr-x USER GROUP PREFIX/var/log/foo
>>> print_(ls(os.path.join(sample_buildout, 'var/run/foo')))
drwxr-x--- USER GROUP PREFIX/var/run/foo
Here we see that the var and run directories are kept. The etc
directory is discarded because only buildout recipes should write to
it and all of its data are expendible.
If we reinstall, remove the files, and uninstall, then the directories
are removed:
>>> print_(system(join('bin', 'buildout')), end='')
Installing foo.
zc.recipe.deployment:
Creating 'PREFIX/etc/foo',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Updating 'PREFIX/var/cache/foo',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Updating 'PREFIX/var/lib/foo',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Updating 'PREFIX/var/log/foo',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Updating 'PREFIX/var/run/foo',
mode 750, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/etc/cron.d',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/etc/init.d',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/etc/logrotate.d',
mode 755, user 'USER', group 'GROUP'
>>> os.remove(os.path.join(sample_buildout, 'var/cache/foo/x'))
>>> os.remove(os.path.join(sample_buildout, 'var/lib/foo/x'))
>>> os.remove(os.path.join(sample_buildout, 'var/log/foo/x'))
>>> os.remove(os.path.join(sample_buildout, 'var/run/foo/x'))
>>> print_(system(join('bin', 'buildout')+' buildout:parts='), end='')
Uninstalling foo.
Running uninstall recipe.
zc.recipe.deployment: Removing 'PREFIX/etc/foo'
zc.recipe.deployment: Removing 'PREFIX/etc/cron.d'.
zc.recipe.deployment: Removing 'PREFIX/etc/init.d'.
zc.recipe.deployment: Removing 'PREFIX/etc/logrotate.d'.
zc.recipe.deployment: Removing 'PREFIX/var/cache/foo'.
zc.recipe.deployment: Removing 'PREFIX/var/lib/foo'.
zc.recipe.deployment: Removing 'PREFIX/var/log/foo'.
zc.recipe.deployment: Removing 'PREFIX/var/run/foo'.
>>> os.path.exists('' + os.path.join(sample_buildout, 'PREFIX/etc/foo'))
False
>>> os.path.exists('' + os.path.join(sample_buildout, 'PREFIX/var/cache/foo'))
False
>>> os.path.exists('' + os.path.join(sample_buildout, 'PREFIX/var/lib/foo'))
False
>>> os.path.exists('' + os.path.join(sample_buildout, 'PREFIX/var/log/foo'))
False
>>> os.path.exists('' + os.path.join(sample_buildout, 'PREFIX/var/run/foo'))
False
Prior to zc.recipe.deployment 0.10.0, some directories (eg., cache-directory,
lib-directory) were not managed by zc.recipe.deployment. So on uninstall, we
can expect any nonexistent directory keys to be silently ignored.
>>> _ = system(join('bin', 'buildout')), # doctest: +NORMALIZE_WHITESPACE
>>> new_installed_contents = ""
>>> with open(
... os.path.join(sample_buildout, ".installed.cfg")) as fi:
... for line in fi.readlines():
... if (not line.startswith("cache-directory = ") and
... not line.startswith("lib-directory = ")):
... new_installed_contents += line
>>> with open(
... os.path.join(sample_buildout, ".installed.cfg"), 'w') as fi:
... _ = fi.write(new_installed_contents)
>>> print_(system(join('bin', 'buildout')+' buildout:parts='), end='')
Uninstalling foo.
Running uninstall recipe.
zc.recipe.deployment: Removing '/tmp/tmpcokpi_buildoutSetUp/_TEST_/sample-buildout/etc/foo'
zc.recipe.deployment: Removing '/tmp/tmpcokpi_buildoutSetUp/_TEST_/sample-buildout/etc/cron.d'.
zc.recipe.deployment: Removing '/tmp/tmpcokpi_buildoutSetUp/_TEST_/sample-buildout/etc/init.d'.
zc.recipe.deployment: Removing '/tmp/tmpcokpi_buildoutSetUp/_TEST_/sample-buildout/etc/logrotate.d'.
zc.recipe.deployment: Removing '/tmp/tmpcokpi_buildoutSetUp/_TEST_/sample-buildout/var/log/foo'.
zc.recipe.deployment: Removing '/tmp/tmpcokpi_buildoutSetUp/_TEST_/sample-buildout/var/run/foo'.
We'll finish the cleanup our modified .installed.cfg missed.
>>> os.removedirs(os.path.join(sample_buildout, 'var/cache/foo'))
>>> os.removedirs(os.path.join(sample_buildout, 'var/lib/foo'))
Deployment Name
===============
The deployment name is used for naming generated files and directories.
The deployment name defaults to the section name, but the deployment
name can be specified explicitly:
>>> write('buildout.cfg',
... '''
... [buildout]
... parts = foo
...
... [foo]
... recipe = zc.recipe.deployment
... prefix = %s
... name = bar
... user = %s
... etc-user = %s
... ''' % (sample_buildout, user, user))
>>> print_(system(join('bin', 'buildout')), end='')
Installing foo.
zc.recipe.deployment:
Creating 'PREFIX/etc/bar',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/cache/bar',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/lib/bar',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/log/bar',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/run/bar',
mode 750, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/etc/cron.d',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/etc/init.d',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/etc/logrotate.d',
mode 755, user 'USER', group 'GROUP'
>>> print_(ls(os.path.join(sample_buildout, 'etc/bar')))
drwxr-xr-x USER GROUP PREFIX/etc/bar
>>> print_(ls(os.path.join(sample_buildout, 'var/cache/bar')))
drwxr-xr-x USER GROUP PREFIX/var/cache/bar
>>> print_(ls(os.path.join(sample_buildout, 'var/lib/bar')))
drwxr-xr-x USER GROUP PREFIX/var/lib/bar
>>> print_(ls(os.path.join(sample_buildout, 'var/log/bar')))
drwxr-xr-x USER GROUP PREFIX/var/log/bar
>>> print_(ls(os.path.join(sample_buildout, 'var/run/bar')))
drwxr-x--- USER GROUP PREFIX/var/run/bar
>>> cat('.installed.cfg') # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
[buildout]
installed_develop_eggs =
parts = foo
<BLANKLINE>
[foo]
__buildout_installed__ =
...
cache-directory = PREFIX/var/cache/bar
crontab-directory = PREFIX/etc/cron.d
etc-directory = PREFIX/etc/bar
etc-prefix = PREFIX/etc
etc-user = USER
lib-directory = PREFIX/var/lib/bar
log-directory = PREFIX/var/log/bar
logrotate-directory = PREFIX/etc/logrotate.d
name = bar
prefix = PREFIX
rc-directory = PREFIX/etc/init.d
recipe = zc.recipe.deployment
run-directory = PREFIX/var/run/bar
user = USER
var-prefix = PREFIX/var
Note (here and earlier) that the options include the name option,
which defaults to the part name. Other parts that use the deployment
name should use the name option rather than the part name.
Configuration files
===================
Normally, configuration files are created by specialized recipes.
Sometimes, it's useful to specify configuration files in a buildout
configuration file. The zc.recipe.deployment:configuration recipe can be
used to do that.
Let's add a configuration file to our buildout:
>>> write('buildout.cfg',
... '''
... [buildout]
... parts = foo x.cfg
...
... [foo]
... recipe = zc.recipe.deployment
... prefix = %s
... user = %s
... etc-user = %s
...
... [x.cfg]
... recipe = zc.recipe.deployment:configuration
... text = xxx
... yyy
... zzz
... ''' % (sample_buildout, user, user))
>>> print_(system(join('bin', 'buildout')), end='')
Uninstalling foo.
Running uninstall recipe.
zc.recipe.deployment: Removing 'PREFIX/etc/bar'
zc.recipe.deployment: Removing 'PREFIX/etc/cron.d'.
zc.recipe.deployment: Removing 'PREFIX/etc/init.d'.
zc.recipe.deployment: Removing 'PREFIX/etc/logrotate.d'.
zc.recipe.deployment: Removing 'PREFIX/var/cache/bar'.
zc.recipe.deployment: Removing 'PREFIX/var/lib/bar'.
zc.recipe.deployment: Removing 'PREFIX/var/log/bar'.
zc.recipe.deployment: Removing 'PREFIX/var/run/bar'.
Installing foo.
zc.recipe.deployment:
Creating 'PREFIX/etc/foo',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/cache/foo',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/lib/foo',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/log/foo',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/run/foo',
mode 750, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/etc/cron.d',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/etc/init.d',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/etc/logrotate.d',
mode 755, user 'USER', group 'GROUP'
Installing x.cfg.
By default, the configuration is installed as a part:
>>> cat('parts', 'x.cfg')
xxx
yyy
zzz
If a deployment is specified, then the file is placed in the
deployment etc directory:
>>> write('buildout.cfg',
... '''
... [buildout]
... parts = foo x.cfg
...
... [foo]
... recipe = zc.recipe.deployment
... prefix = %s
... user = %s
... etc-user = %s
...
... [x.cfg]
... recipe = zc.recipe.deployment:configuration
... text = xxx
... yyy
... zzz
... deployment = foo
... ''' % (sample_buildout, user, user))
>>> print_(system(join('bin', 'buildout')), end='')
Uninstalling x.cfg.
Updating foo.
Installing x.cfg.
zc.recipe.deployment:
Updating 'PREFIX/etc/foo',
mode 755, user 'USER', group 'GROUP'
>>> os.path.exists(join('parts', 'x.cfg'))
False
>>> cat(os.path.join(sample_buildout, 'etc/foo/x.cfg'))
xxx
yyy
zzz
If a directory is specified, then the file is placed in the directory.
>>> write('buildout.cfg',
... '''
... [buildout]
... parts = foo x.cfg
...
... [foo]
... recipe = zc.recipe.deployment
... prefix = %s
... user = %s
... etc-user = %s
...
... [x.cfg]
... recipe = zc.recipe.deployment:configuration
... text = xxx
... yyy
... zzz
... directory = etc/foobar
... deployment = foo
... ''' % (sample_buildout, user, user))
>>> print_(system(join('bin', 'buildout')), end='')
Uninstalling x.cfg.
Updating foo.
Installing x.cfg.
zc.recipe.deployment:
Creating 'PREFIX/etc/foobar',
mode 755, user 'USER', group 'GROUP'
>>> os.path.exists(join('parts', 'x.cfg'))
False
>>> os.path.exists(join(sample_buildout, 'etc/foo/x.cfg'))
False
>>> cat(os.path.join(sample_buildout, 'etc/foobar/x.cfg'))
xxx
yyy
zzz
A directory option works only with a deployment option.
>>> write('buildout.cfg',
... '''
... [buildout]
... parts = foo x.cfg
...
... [foo]
... recipe = zc.recipe.deployment
... prefix = %s
... user = %s
... etc-user = %s
...
... [x.cfg]
... recipe = zc.recipe.deployment:configuration
... text = xxx
... yyy
... zzz
... directory = etc/foobar
... ''' % (sample_buildout, user, user))
>>> print_(system(join('bin', 'buildout')), end='')
Uninstalling x.cfg.
Updating foo.
Installing x.cfg.
>>> os.path.exists(join('parts', 'x.cfg'))
True
>>> os.path.exists(join(sample_buildout, 'etc/foobar/x.cfg'))
False
>>> cat('parts', 'x.cfg')
xxx
yyy
zzz
We can read data from a file rather than specifying in the
configuration:
>>> write('x.in', '1\n2\n3\n')
>>> write('buildout.cfg',
... '''
... [buildout]
... parts = foo x.cfg
...
... [foo]
... recipe = zc.recipe.deployment
... prefix = %s
... user = %s
... etc-user = %s
...
... [x.cfg]
... recipe = zc.recipe.deployment:configuration
... file = x.in
... deployment = foo
... ''' % (sample_buildout, user, user))
>>> print_(system(join('bin', 'buildout')), end='')
Uninstalling x.cfg.
Updating foo.
Installing x.cfg.
zc.recipe.deployment:
Updating 'PREFIX/etc/foo',
mode 755, user 'USER', group 'GROUP'
>>> cat(os.path.join(sample_buildout, 'etc/foo/x.cfg'))
1
2
3
The recipe sets a location option that can be used by other recipes:
>>> cat('.installed.cfg') # doctest: +ELLIPSIS
[buildout]
...
[x.cfg]
...
location = PREFIX/etc/foo/x.cfg
...
By default, the part name is used as the file name. You can specify a
name explicitly using the name option:
>>> write('buildout.cfg',
... '''
... [buildout]
... parts = foo x.cfg
...
... [foo]
... recipe = zc.recipe.deployment
... prefix = %s
... user = %s
... etc-user = %s
...
... [x.cfg]
... recipe = zc.recipe.deployment:configuration
... name = y.cfg
... text = this is y
... deployment = foo
... ''' % (sample_buildout, user, user))
>>> print_(system(join('bin', 'buildout')), end='')
Uninstalling x.cfg.
Updating foo.
Installing x.cfg.
zc.recipe.deployment:
Updating 'PREFIX/etc/foo',
mode 755, user 'USER', group 'GROUP'
>>> cat(os.path.join(sample_buildout, 'etc/foo/y.cfg'))
this is y
If name is given, only the file so named is created:
>>> os.path.exists(os.path.join(sample_buildout, 'etc', 'foo', 'x.cfg'))
False
The name can be a path, or even absolute:
>>> write('buildout.cfg',
... '''
... [buildout]
... parts = foo x.cfg
...
... [foo]
... recipe = zc.recipe.deployment
... prefix = %s
... user = %s
... etc-user = %s
...
... [x.cfg]
... recipe = zc.recipe.deployment:configuration
... name = ${buildout:directory}/y.cfg
... text = this is y also
... deployment = foo
... ''' % (sample_buildout, user, user))
>>> print_(system(join('bin', 'buildout')), end='')
Uninstalling x.cfg.
Updating foo.
Installing x.cfg.
zc.recipe.deployment:
Updating 'PREFIX/etc/foo',
mode 755, user 'USER', group 'GROUP'
>>> cat('y.cfg')
this is y also
If the content of the configuration file is unchanged between builds,
and the path hasn't been changed, the file isn't actually written in
subsequent builds. This is helpful if processes that use the file watch
for changes.
>>> mod_time = os.stat('y.cfg').st_mtime
>>> print_(system(join('bin', 'buildout')), end='')
Updating foo.
Updating x.cfg.
zc.recipe.deployment:
Updating 'PREFIX/etc/foo',
mode 755, user 'USER', group 'GROUP'
>>> os.stat('y.cfg').st_mtime == mod_time
True
Running a command when a configuration file changes
---------------------------------------------------
Often, when working with configuration files, you'll need to restart
processes when configuration files change. You can specify an
``on-change`` option that takes a command to run whenever a
configuration file changes:
>>> write('buildout.cfg',
... '''
... [buildout]
... parts = foo x.cfg
...
... [foo]
... recipe = zc.recipe.deployment
... prefix = %s
... user = %s
... etc-user = %s
...
... [x.cfg]
... recipe = zc.recipe.deployment:configuration
... name = ${buildout:directory}/y.cfg
... text = this is y
... deployment = foo
... on-change = echo /etc/init.d/x start
... ''' % (sample_buildout, user, user))
>>> print_(system(join('bin', 'buildout')), end='')
Uninstalling x.cfg.
Updating foo.
Installing x.cfg.
zc.recipe.deployment:
Updating 'PREFIX/etc/foo',
mode 755, user 'USER', group 'GROUP'
/etc/init.d/x start
.. test
If we run this again, so the file doesn't change, then the command
isn't run:
>>> print_(system(join('bin', 'buildout')), end='')
... # doctest: +NORMALIZE_WHITESPACE
Updating foo.
Updating x.cfg.
zc.recipe.deployment:
Updating 'PREFIX/etc/foo',
mode 755, user 'USER', group 'GROUP'
If we screw up the command, buildout will see it:
>>> write('buildout.cfg',
... '''
... [buildout]
... parts = foo x.cfg
...
... [foo]
... recipe = zc.recipe.deployment
... prefix = %s
... user = %s
... etc-user = %s
...
... [x.cfg]
... recipe = zc.recipe.deployment:configuration
... name = ${buildout:directory}/y.cfg
... text = this is y
... deployment = foo
... on-change = echoxxx /etc/init.d/x start
... ''' % (sample_buildout, user, user))
>>> print_(system(join('bin', 'buildout')), end='')
... # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
Uninstalling x.cfg.
Updating foo.
Installing x.cfg.
zc.recipe.deployment:
Updating 'PREFIX/etc/foo',
mode 755, user 'USER', group 'GROUP'
... echoxxx: not found
While:
Installing x.cfg.
<BLANKLINE>
An internal error occurred due to a bug in either zc.buildout or in a
recipe being used:
Traceback (most recent call last):
...
SystemError: 'echoxxx /etc/init.d/x start' failed
Cron support
============
The crontab recipe provides support for creating crontab files. It
uses a times option to specify times to run the command and a command
option containing the command.
>>> write('buildout.cfg',
... '''
... [buildout]
... parts = foo cron
...
... [foo]
... recipe = zc.recipe.deployment
... prefix = %s
... user = %s
... etc-user = %s
...
... [cron]
... recipe = zc.recipe.deployment:crontab
... times = 30 23 * * *
... command = echo hello world!
... deployment = foo
... ''' % (sample_buildout, user, user))
>>> print_(system(join('bin', 'buildout')), end='')
Updating foo.
Installing cron.
This example creates PREFIX/etc/cron.d/foo-cron
>>> open(os.path.join(sample_buildout, 'etc/cron.d/foo-cron')).read()
'30 23 * * *\tUSER\techo hello world!\n'
.. make sure cron recipe honors deployment name option:
>>> write('buildout.cfg',
... '''
... [buildout]
... parts = foo cron
...
... [foo]
... recipe = zc.recipe.deployment
... prefix = %s
... name = bar
... user = %s
... etc-user = %s
...
... [cron]
... recipe = zc.recipe.deployment:crontab
... times = 30 23 * * *
... command = echo hello world!
... deployment = foo
... ''' % (sample_buildout, user, user))
>>> print_(system(join('bin', 'buildout')), end='')
Uninstalling cron.
Uninstalling foo.
Running uninstall recipe.
zc.recipe.deployment: Removing 'PREFIX/etc/foo'
zc.recipe.deployment: Removing 'PREFIX/etc/cron.d'.
zc.recipe.deployment: Removing 'PREFIX/etc/init.d'.
zc.recipe.deployment: Removing 'PREFIX/etc/logrotate.d'.
zc.recipe.deployment: Removing 'PREFIX/var/cache/foo'.
zc.recipe.deployment: Removing 'PREFIX/var/lib/foo'.
zc.recipe.deployment: Removing 'PREFIX/var/log/foo'.
zc.recipe.deployment: Removing 'PREFIX/var/run/foo'.
Installing foo.
zc.recipe.deployment:
Creating 'PREFIX/etc/bar',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/cache/bar',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/lib/bar',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/log/bar',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/run/bar',
mode 750, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/etc/cron.d',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/etc/init.d',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/etc/logrotate.d',
mode 755, user 'USER', group 'GROUP'
Installing cron.
>>> open(os.path.join(sample_buildout, 'etc/cron.d/bar-cron')).read()
'30 23 * * *\tUSER\techo hello world!\n'
The crontab recipe gets its user from the buildout's deployment by default,
but it doesn't have to.
>>> write('buildout.cfg',
... '''
... [buildout]
... parts = foo cron
...
... [foo]
... recipe = zc.recipe.deployment
... name = bar
... prefix = %s
... user = %s
... etc-user = %s
...
... [cron]
... recipe = zc.recipe.deployment:crontab
... times = 30 23 * * *
... user = bob
... command = echo hello world!
... deployment = foo
... ''' % (sample_buildout, user, user))
>>> print_(system(join('bin', 'buildout')), end='')
Uninstalling cron.
Updating foo.
Installing cron.
>>> open('etc/cron.d/bar-cron').read()
'30 23 * * *\tbob\techo hello world!\n'
.. edge case
uninstall with no stored prefix
>>> installed = [l for l in open('.installed.cfg')
... if not l.startswith('prefix =')]
>>> _ = open('.installed.cfg', 'w').write(''.join(installed))
uninstall with some directories already gone:
>>> rmdir(sample_buildout, 'etc', 'bar')
>>> rmdir(sample_buildout, 'var', 'run')
>>> write('buildout.cfg',
... '''
... [buildout]
... parts =
... ''')
>>> print_(system(join('bin', 'buildout')), end='')
Uninstalling cron.
Uninstalling foo.
Running uninstall recipe.
zc.recipe.deployment: Removing 'PREFIX/var/cache/bar'.
zc.recipe.deployment: Removing 'PREFIX/var/lib/bar'.
zc.recipe.deployment: Removing 'PREFIX/var/log/bar'.
.. cleanup
>>> print_(system(join('bin', 'buildout')+' buildout:parts='), end='')
>>> os.path.exists(os.path.join(sample_buildout, 'etc/cron.d/bar-cron'))
False
SharedConfig
============
This recipe can be used to update configuration files that are shared by
multiple applications. The absolute path of the file must be specified.
Also, the configuration files must accept comments that start with "#".
Like the configuration recipe, the content to add in the configuration file can
be provided using the "text" or the "file" option.
First let's create a file that will be used as the shared configuration file.
>>> _ = open('y.cfg', 'w').write(
... '''Some
... existing
... configuration
... ''')
We now create our buildout configuration and use the "sharedconfig" recipe and
run buildout.
>>> write('buildout.cfg',
... '''
... [buildout]
... parts = foo y.cfg
...
... [foo]
... recipe = zc.recipe.deployment
... prefix = %s
... user = %s
... etc-user = %s
...
... [y.cfg]
... recipe = zc.recipe.deployment:sharedconfig
... path = y.cfg
... deployment = foo
... text = xxx
... yyy
... zzz
... ''' % (sample_buildout, user, user))
>>> print_(system(join('bin', 'buildout')), end='')
Installing foo.
zc.recipe.deployment:
Creating 'PREFIX/etc/foo',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/cache/foo',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/lib/foo',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/log/foo',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/run/foo',
mode 750, user 'USER', group 'GROUP'
zc.recipe.deployment:
Updating 'PREFIX/etc/cron.d',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Updating 'PREFIX/etc/init.d',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Updating 'PREFIX/etc/logrotate.d',
mode 755, user 'USER', group 'GROUP'
Installing y.cfg.
>>> print_(open('y.cfg', 'r').read())
Some
existing
configuration
<BLANKLINE>
#[foo_y.cfg DO NOT MODIFY LINES FROM HERE#
xxx
yyy
zzz
#TILL HERE foo_y.cfg]#
<BLANKLINE>
Running buildout again without modifying the configuration leaves the file the
same.
>>> print_(system(join('bin', 'buildout')), end='')
Updating foo.
Updating y.cfg.
>>> print_(open('y.cfg', 'r').read())
Some
existing
configuration
<BLANKLINE>
#[foo_y.cfg DO NOT MODIFY LINES FROM HERE#
xxx
yyy
zzz
#TILL HERE foo_y.cfg]#
<BLANKLINE>
If we add some more lines to the file
>>> _ = open('y.cfg', 'a').write(
... '''Some
... additional
... configuration
... ''')
and run buildout again, but this time after modifying the configuration for
"y.cfg", the sections will be moved to the end of the file.
>>> write('buildout.cfg',
... '''
... [buildout]
... parts = foo y.cfg
...
... [foo]
... recipe = zc.recipe.deployment
... prefix = %s
... user = %s
... etc-user = %s
...
... [y.cfg]
... recipe = zc.recipe.deployment:sharedconfig
... path = y.cfg
... deployment = foo
... text = 111
... 222
... 333
... ''' % (sample_buildout, user, user))
>>> print_(system(join('bin', 'buildout')), end='')
Uninstalling y.cfg.
Running uninstall recipe.
Updating foo.
Installing y.cfg.
>>> print_(open('y.cfg', 'r').read())
Some
existing
configuration
Some
additional
configuration
<BLANKLINE>
#[foo_y.cfg DO NOT MODIFY LINES FROM HERE#
111
222
333
#TILL HERE foo_y.cfg]#
<BLANKLINE>
The text to append to the shared configuration file can also be provided via a
file.
>>> write('x.cfg', '''
... [foo]
... a = 1
... b = 2
...
... [log]
... c = 1
... ''')
>>> write('buildout.cfg',
... '''
... [buildout]
... parts = foo y.cfg
...
... [foo]
... recipe = zc.recipe.deployment
... prefix = %s
... user = %s
... etc-user = %s
...
... [y.cfg]
... recipe = zc.recipe.deployment:sharedconfig
... path = %s/etc/z.cfg
... deployment = foo
... file = x.cfg
... ''' % (sample_buildout, user, user, sample_buildout))
>>> print_(system(join('bin', 'buildout')), end='')
While:
Installing.
Getting section y.cfg.
Initializing section y.cfg.
Error: Path 'PREFIX/etc/z.cfg' does not exist
Oops. The path of the configuration file must exist. Let's create one.
>>> write(join(sample_buildout, 'etc', 'z.cfg'), '')
>>> print_(system(join('bin', 'buildout')), end='')
Uninstalling y.cfg.
Running uninstall recipe.
Updating foo.
Installing y.cfg.
>>> print_(open(join(sample_buildout, 'etc', 'z.cfg'), 'r').read())
<BLANKLINE>
#[foo_y.cfg DO NOT MODIFY LINES FROM HERE#
<BLANKLINE>
[foo]
a = 1
b = 2
<BLANKLINE>
[log]
c = 1
<BLANKLINE>
#TILL HERE foo_y.cfg]#
<BLANKLINE>
While uninstalling, only the lines that the recipe installed are removed.
>>> print_(system(join('bin', 'buildout')+' buildout:parts='), end='')
Uninstalling y.cfg.
Running uninstall recipe.
Uninstalling foo.
Running uninstall recipe.
zc.recipe.deployment: Removing 'PREFIX/etc/foo'
zc.recipe.deployment: Removing 'PREFIX/etc/cron.d'.
zc.recipe.deployment: Removing 'PREFIX/etc/init.d'.
zc.recipe.deployment: Removing 'PREFIX/etc/logrotate.d'.
zc.recipe.deployment: Removing 'PREFIX/var/cache/foo'.
zc.recipe.deployment: Removing 'PREFIX/var/lib/foo'.
zc.recipe.deployment: Removing 'PREFIX/var/log/foo'.
zc.recipe.deployment: Removing 'PREFIX/var/run/foo'.
But the files are not deleted.
>>> os.path.exists('y.cfg')
True
>>> print_(open('y.cfg', 'r').read())
Some
existing
configuration
Some
additional
configuration
<BLANKLINE>
>>> os.path.exists(join(sample_buildout, 'etc', 'z.cfg'))
True
>>> print_(open(join(sample_buildout, 'etc', 'z.cfg'), 'r').read())
<BLANKLINE>
Edgecases
---------
The SharedConfig recipe checks to see if the current data in the file
ends with a new line. If it doesn't exist it adds one. This is in
addition to the blank line the recipe adds before the section to enhance
readability.
>>> _ = open('anotherconfig.cfg', 'w').write('one')
>>> write('buildout.cfg',
... '''
... [buildout]
... parts = foo y.cfg
...
... [foo]
... recipe = zc.recipe.deployment
... prefix = %s
... user = %s
... etc-user = %s
...
... [y.cfg]
... recipe = zc.recipe.deployment:sharedconfig
... path = anotherconfig.cfg
... deployment = foo
... text = I predict that there will be a blank line above this.
... ''' % (sample_buildout, user, user))
>>> print_(system(join('bin', 'buildout')), end='')
Installing foo.
zc.recipe.deployment:
Creating 'PREFIX/etc/foo',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/cache/foo',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/lib/foo',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/log/foo',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/run/foo',
mode 750, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/etc/cron.d',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/etc/init.d',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/etc/logrotate.d',
mode 755, user 'USER', group 'GROUP'
Installing y.cfg.
>>> print_(open('anotherconfig.cfg').read())
one
<BLANKLINE>
#[foo_y.cfg DO NOT MODIFY LINES FROM HERE#
I predict that there will be a blank line above this.
#TILL HERE foo_y.cfg]#
<BLANKLINE>
But the recipe doesn't add a new line if there was one already at the end.
>>> _ = open('anotherconfig.cfg', 'w').write('ends with a new line\n')
>>> print_(open('anotherconfig.cfg').read())
ends with a new line
<BLANKLINE>
We modify the buildout configuration so that "install" is invoked again:
>>> write('buildout.cfg',
... '''
... [buildout]
... parts = foo y.cfg
...
... [foo]
... recipe = zc.recipe.deployment
... prefix = %s
... user = %s
... etc-user = %s
...
... [y.cfg]
... recipe = zc.recipe.deployment:sharedconfig
... path = anotherconfig.cfg
... deployment = foo
... text = there will still be only a single blank line above.
... ''' % (sample_buildout, user, user))
>>> print_(system(join('bin', 'buildout')), end='')
Uninstalling y.cfg.
Running uninstall recipe.
Updating foo.
Installing y.cfg.
>>> print_(open('anotherconfig.cfg').read())
ends with a new line
<BLANKLINE>
#[foo_y.cfg DO NOT MODIFY LINES FROM HERE#
there will still be only a single blank line above.
#TILL HERE foo_y.cfg]#
<BLANKLINE>
If we uninstall the file, the data will be the same as "original_data":
>>> print_(system(join('bin', 'buildout')+' buildout:parts='), end='')
Uninstalling y.cfg.
Running uninstall recipe.
Uninstalling foo.
Running uninstall recipe.
zc.recipe.deployment: Removing 'PREFIX/etc/foo'
zc.recipe.deployment: Removing 'PREFIX/etc/cron.d'.
zc.recipe.deployment: Removing 'PREFIX/etc/init.d'.
zc.recipe.deployment: Removing 'PREFIX/etc/logrotate.d'.
zc.recipe.deployment: Removing 'PREFIX/var/cache/foo'.
zc.recipe.deployment: Removing 'PREFIX/var/lib/foo'.
zc.recipe.deployment: Removing 'PREFIX/var/log/foo'.
zc.recipe.deployment: Removing 'PREFIX/var/run/foo'.
>>> print_(open('anotherconfig.cfg').read())
ends with a new line
<BLANKLINE>
| zc.recipe.deployment | /zc.recipe.deployment-1.3.0.tar.gz/zc.recipe.deployment-1.3.0/src/zc/recipe/deployment/README.txt | README.txt |
from six.moves import configparser as ConfigParser
import errno
import grp
import logging
import os
import pwd
import shutil
import zc.buildout
logger = logging.getLogger('zc.recipe.deployment')
def deprecated(name, instead=None):
if instead:
msg = ("found deprecated '%s' setting (used '%s' instead)"
% (name, instead))
else:
msg = "using deprecated '%s' setting" % name
logger.warn(msg)
class Install:
def __init__(self, buildout, name, options):
self.options = options
if not options.get('name'):
options['name'] = name
name = options['name']
prefix = options.get('prefix')
if not prefix:
prefix = '/'
options['prefix'] = prefix
etc_prefix = options.get('etc-prefix')
if not etc_prefix:
etc_prefix = options.get('etc')
if etc_prefix:
deprecated('etc')
else:
etc_prefix = 'etc'
elif options.get('etc'):
deprecated('etc', 'etc-prefix')
etc = os.path.join(prefix, etc_prefix)
cfg = os.path.join(etc, "zc.recipe.deployment.cfg")
cp = ConfigParser.RawConfigParser()
cp.optionxform = str
cp.read(cfg)
if cp.has_section("deployment"):
for key in sorted(cp.options("deployment")):
if key == "var-prefix":
value = cp.get("deployment", key)
if value and not options.get(key):
options[key] = value
else:
raise zc.buildout.UserError(
"disallowed option %r in system configuration" % key)
var = os.path.join(prefix, options.get('var-prefix') or 'var')
if options.get('var-prefix'):
if options.get('log'):
deprecated('log', 'var-prefix')
log = os.path.join(var, "log")
if options.get('run'):
deprecated('run', 'var-prefix')
run = os.path.join(var, "run")
else:
if options.get('log'):
if options.get('log-directory'):
deprecated('log', 'log-directory')
else:
deprecated('log')
log = os.path.join(prefix, options.get('log') or 'var/log')
if options.get('run'):
if options.get('run-directory'):
deprecated('run', 'run-directory')
else:
deprecated('run')
run = os.path.join(prefix, options.get('run') or 'var/run')
def directory(key, base, *tail):
key += '-directory'
setting = options.get(key)
if setting:
path = os.path.join(prefix, setting)
else:
path = os.path.join(base, *tail)
options[key] = path
options['etc-prefix'] = etc
options['var-prefix'] = var
# /etc hierarchy
directory('crontab', etc, 'cron.d')
directory('etc', etc, name)
directory('logrotate', etc, 'logrotate.d')
directory('rc', etc, 'init.d')
# /var hierarchy
directory('cache', var, 'cache', name)
directory('lib', var, 'lib', name)
directory('log', log, name)
directory('run', run, name)
def install(self):
options = self.options
run_user = options['user']
etc_user = options.get('etc-user', 'root')
run_uid, run_gid = pwd.getpwnam(run_user)[2:4]
etc_uid, etc_gid = pwd.getpwnam(etc_user)[2:4]
created = []
try:
make_dir(options['etc-directory'], etc_uid, etc_gid, 0o755, created)
make_dir(options['cache-directory'],
run_uid, run_gid, 0o755, created)
make_dir(options['lib-directory'], run_uid, run_gid, 0o755, created)
make_dir(options['log-directory'], run_uid, run_gid, 0o755, created)
make_dir(options['run-directory'], run_uid, run_gid, 0o750, created)
if options['prefix'] != '/':
make_dir(options['crontab-directory'],
etc_uid, etc_gid, 0o755, created)
make_dir(options['rc-directory'],
etc_uid, etc_gid, 0o755, created)
make_dir(options['logrotate-directory'],
etc_uid, etc_gid, 0o755, created)
except Exception:
for d in created:
try:
shutil.rmtree(d)
except OSError:
# parent directory may have already been removed
pass
raise
return ()
def update(self):
pass
def uninstall(name, options):
path = options['etc-directory']
if os.path.isdir(path):
shutil.rmtree(path)
logger.info("Removing %r", path)
directories = ()
if options.get('prefix', '/') != '/':
directories = ('crontab', 'rc', 'logrotate')
for d in directories + ('cache', 'lib', 'log', 'run'):
path = options.get(d+'-directory')
if not path:
continue
if os.path.isdir(path):
if os.listdir(path):
logger.warn("Can't remove non-empty directory %r.", path)
else:
os.rmdir(path)
logger.info("Removing %r.", path)
def make_dir(name, uid, gid, mode, created):
uname = pwd.getpwuid(uid)[0]
gname = grp.getgrgid(gid)[0]
if not os.path.isdir(name):
os.makedirs(name, mode)
created.append(name)
logger.info('\n Creating %r,\n mode %o, user %r, group %r',
name, mode, uname, gname)
else:
os.chmod(name, mode)
logger.info('\n Updating %r,\n mode %o, user %r, group %r',
name, mode, uname, gname)
os.chown(name, uid, gid)
class Configuration:
def __init__(self, buildout, name, options):
self.options = options
deployment = options.get('deployment')
if deployment:
options['etc-user'] = buildout[deployment].get('etc-user', 'root')
options['prefix'] = buildout[deployment].get('prefix', '/')
directory = options.get("directory")
if directory:
directory = os.path.join(options['prefix'], directory)
else:
directory = os.path.join(
buildout[deployment]['etc-directory'])
else:
directory = os.path.join(
buildout['buildout']['parts-directory'])
options["directory"] = directory
options["location"] = os.path.join(directory, options.get('name', name))
def install(self):
options = self.options
mode = options.get('mode', '')
if 'file' in options:
if 'text' in options:
raise zc.buildout.UserError(
"Cannot specify both file and text options")
with open(options['file'], 'r'+mode) as f:
text = f.read()
else:
text = options['text']
deployment = options.get('deployment')
if deployment:
etc_user = options['etc-user']
etc_uid, etc_gid = pwd.getpwnam(etc_user)[2:4]
created = []
try:
make_dir(options['directory'], etc_uid, etc_gid, 0o755, created)
except Exception:
for d in created:
try:
shutil.rmtree(d)
except OSError:
# parent directory may have already been removed
pass
raise
try:
with open(options['location'], 'r'+mode) as f:
original = f.read()
except IOError as e:
if e.errno != errno.ENOENT:
raise
original = None
if original != text:
with open(options['location'], 'w'+mode) as f:
f.write(text)
on_change = options.get('on-change')
if on_change:
if os.system(on_change):
raise SystemError("%r failed" % on_change)
return options['location']
update = install
class Crontab:
def __init__(self, buildout, name, options):
self.options = options
deployment = options['deployment']
user = options.get('user', buildout[deployment]['user'])
deployment_name = buildout[deployment]['name']
options['location'] = os.path.join(
buildout[deployment]['crontab-directory'],
deployment_name + '-' + name)
options['entry'] = '%s\t%s\t%s\n' % (
options['times'], user, options['command'])
def install(self):
options = self.options
open(options['location'], 'w').write(options['entry'])
return options['location']
update = install
begin_marker = '#[%s DO NOT MODIFY LINES FROM HERE#'
end_marker = '#TILL HERE %s]#'
class SharedConfig:
def __init__(self, buildout, name, options):
self.options = options
deployment = options.get('deployment')
options['entry_name'] = '%s_%s' % (buildout[deployment]['name'], name)
if not os.path.exists(options['path']):
raise zc.buildout.UserError(
"Path '%s' does not exist" % options['path'])
options['location'] = options['path']
def install(self):
options = self.options
if 'file' in options:
if 'text' in options:
raise zc.buildout.UserError(
"Cannot specify both file and text options")
text = open(options['file'], 'r').read()
else:
text = options['text']
config_file = open(options['location'], 'r+')
current_data = config_file.read()
new_data = ''
if current_data and current_data[-1] != '\n':
new_data += '\n'
new_data += self._wrap_with_comments(options['entry_name'], text)
config_file.write(new_data)
config_file.close()
return ()
def _wrap_with_comments(self, entry_name, text):
return '\n%s\n%s\n%s\n' % (
begin_marker % entry_name, text, end_marker % entry_name)
def update(self):
pass
def uninstall_shared_config(name, options):
old_config = open(options['location'], 'r').readlines()
new_config = []
block_start = False
for line in old_config:
if line.startswith('#[%s' % options['entry_name']):
# remove the newline we have added
if new_config[-1] == '\n':
new_config = new_config[:-1]
block_start = True
continue
elif line.strip().endswith('%s]#' % options['entry_name']):
block_start = False
continue
else:
if block_start:
continue
else:
new_config.append(line)
open(options['location'], 'w').write(''.join(new_config)) | zc.recipe.deployment | /zc.recipe.deployment-1.3.0.tar.gz/zc.recipe.deployment-1.3.0/src/zc/recipe/deployment/__init__.py | __init__.py |
Change History
**************
2.0.7 (2018-07-02)
==================
- For the 2.0.6 change, we require zc.buildout 2.12.0. The `install_requires`
in `setup.py` now also says that.
2.0.6 (2018-07-02)
==================
- Added extra keyword argument ``allow_unknown_extras`` to support zc.buildout
2.12.0.
2.0.5 (2017-12-04)
==================
- Fixed #429: added sorting of working set by priority of different
type of paths (develop-eggs-directory, eggs-directory, other paths).
2.0.4 (2017-08-17)
==================
- Fixed #153: buildout should cache working set environments
[rafaelbco]
2.0.3 (2015-10-02)
==================
- Releasing zc.recipe.egg as a wheel in addition to only an sdist. No
functional changes.
[reinout]
2.0.2 (2015-07-01)
==================
- Fixed: In ``zc.recipe.egg#custom`` recipe's ``rpath`` support, don't
assume path elements are buildout-relative if they start with one of the
"special" tokens (e.g., ``$ORIGIN``). See:
https://github.com/buildout/buildout/issues/225.
[tseaver]
2.0.1 (2013-09-05)
==================
- Accomodated ``zc.buildout`` switch to post-merge ``setuptools``.
2.0.0 (2013-04-02)
==================
- Enabled 'prefer-final' option by default.
2.0.0a3 (2012-11-19)
====================
- Added support for Python 3.2 / 3.3.
- Added 'MANIFEST.in'.
- Support non-entry-point-based scripts.
- Honor exit codes from scripts (https://bugs.launchpad.net/bugs/697913).
2.0.0a2 (2012-05-03)
====================
- Always unzip installed eggs.
- Switched from using 'setuptools' to 'distribute'.
- Removed multi-python support.
1.3.2 (2010-08-23)
==================
- Bugfix for the change introduced in 1.3.1.
1.3.1 (2010-08-23)
==================
- Support recipes that are using zc.recipe.egg by passing in a dict, rather
than a zc.buildout.buildout.Options object as was expected/tested.
1.3.0 (2010-08-23)
==================
- Small further refactorings past 1.2.3b1 to be compatible with
zc.buildout 1.5.0.
1.2.3b1 (2010-04-29)
====================
- Refactored to be used with z3c.recipe.scripts and zc.buildout 1.5.0.
No new user-visible features.
1.2.2 (2009-03-18)
==================
- Fixed a dependency information. zc.buildout >1.2.0 is required.
1.2.1 (2009-03-18)
==================
- Refactored generation of relative egg paths to generate simpler code.
1.2.0 (2009-03-17)
==================
- Added the `dependent-scripts` option. When set to `true`, scripts will
be generated for all required eggs in addition to the eggs named
specifically. This idea came from two forks of this recipe,
`repoze.recipe.egg` and `pylons_sandbox`, but the option name is
spelled with a dash instead of underscore and it defaults to `false`.
- Added a relative-paths option. When true, egg paths in scripts are generated
relative to the script names.
1.1.0 (2008-07-19)
==================
- Refactored to work honor the new buildout-level unzip option.
1.1.0b1 (2008-06-27)
====================
- Added `environment` option to custom extension building options.
1.0.0 (2007-11-03)
==================
- No code changes from last beta, just some small package meta-data
improvements.
1.0.0b5 (2007-02-08)
====================
Feature Changes
---------------
- Added support for the buildout newest option.
1.0.0b4 (2007-01-17)
====================
Feature Changes
---------------
- Added initialization and arguments options to the scripts recipe.
- Added an eggs recipe that *just* installs eggs.
- Advertized the scripts recipe for creating scripts.
1.0.0b3 (2006-12-04)
====================
Feature Changes
---------------
- Added a develop recipe for creating develop eggs.
This is useful to:
- Specify custom extension building options,
- Specify a version of Python to use, and to
- Cause develop eggs to be created after other parts.
- The develop and build recipes now return the paths created, so that
created eggs or egg links are removed when a part is removed (or
changed).
1.0.0b2 (2006-10-16)
====================
Updated to work with (not get a warning from) zc.buildout 1.0.0b10.
1.0.0b1
=======
Updated to work with zc.buildout 1.0.0b3.
1.0.0a3
=======
- Extra path elements to be included in generated scripts can now be
set via the extra-paths option.
- No longer implicitly generate "py\_" scripts for each egg. There is
now an interpreter option to generate a script that, when run
without arguments, launches the Python interactive interpreter with
the path set based on a parts eggs and extra paths. If this script
is run with the name of a Python script and arguments, then the
given script is run with the path set.
- You can now specify explicit entry points. This is useful for use
with packages that don't declare their own entry points.
- Added Windows support.
- Now-longer implicitly generate "py\_" scripts for each egg. You can
now generate a script for launching a Python interpreter or for
running scripts based on the eggs defined for an egg part.
- You can now specify custom entry points for packages that don't
declare their entry points.
- You can now specify extra-paths to be included in generated scripts.
1.0.0a2
=======
Added a custom recipe for building custom eggs using custom distutils
build_ext arguments.
1.0.0a1
=======
Initial public version
| zc.recipe.egg | /zc.recipe.egg-2.0.7.tar.gz/zc.recipe.egg-2.0.7/CHANGES.rst | CHANGES.rst |
Installation of distributions as eggs
=====================================
The zc.recipe.egg:eggs recipe can be used to install various types if
distutils distributions as eggs. It takes a number of options:
eggs
A list of eggs to install given as one or more setuptools
requirement strings. Each string must be given on a separate
line.
find-links
A list of URLs, files, or directories to search for distributions.
index
The URL of an index server, or almost any other valid URL. :)
If not specified, the Python Package Index,
http://cheeseshop.python.org/pypi, is used. You can specify an
alternate index with this option. If you use the links option and
if the links point to the needed distributions, then the index can
be anything and will be largely ignored. In the examples, here,
we'll just point to an empty directory on our link server. This
will make our examples run a little bit faster.
We have a link server that has a number of distributions:
>>> print_(get(link_server), end='')
<html><body>
<a href="bigdemo-0.1-py2.3.egg">bigdemo-0.1-py2.3.egg</a><br>
<a href="demo-0.1-py2.3.egg">demo-0.1-py2.3.egg</a><br>
<a href="demo-0.2-py2.3.egg">demo-0.2-py2.3.egg</a><br>
<a href="demo-0.3-py2.3.egg">demo-0.3-py2.3.egg</a><br>
<a href="demo-0.4rc1-py2.3.egg">demo-0.4rc1-py2.3.egg</a><br>
<a href="demoneeded-1.0.zip">demoneeded-1.0.zip</a><br>
<a href="demoneeded-1.1.zip">demoneeded-1.1.zip</a><br>
<a href="demoneeded-1.2rc1.zip">demoneeded-1.2rc1.zip</a><br>
<a href="du_zipped-1.0-pyN.N.egg">du_zipped-1.0-pyN.N.egg</a><br>
<a href="extdemo-1.4.zip">extdemo-1.4.zip</a><br>
<a href="index/">index/</a><br>
<a href="mixedcase-0.5.zip">mixedcase-0.5.zip</a><br>
<a href="other-1.0-py2.3.egg">other-1.0-py2.3.egg</a><br>
</body></html>
We have a sample buildout. Let's update it's configuration file to
install the demo package.
>>> write(sample_buildout, 'buildout.cfg',
... """
... [buildout]
... parts = demo
...
... [demo]
... recipe = zc.recipe.egg:eggs
... eggs = demo<0.3
... find-links = %(server)s
... index = %(server)s/index
... """ % dict(server=link_server))
In this example, we limited ourselves to revisions before 0.3. We also
specified where to find distributions using the find-links option.
Let's run the buildout:
>>> import os
>>> print_(system(buildout), end='')
Installing demo.
Getting distribution for 'demo<0.3'.
Got demo 0.2.
Getting distribution for 'demoneeded'.
Got demoneeded 1.1.
Now, if we look at the buildout eggs directory:
>>> ls(sample_buildout, 'eggs')
d demo-0.2-py2.3.egg
d demoneeded-1.1-py2.3.egg
- setuptools-0.7-py2.3.egg
d zc.buildout-1.0-py2.3.egg
We see that we got an egg for demo that met the requirement, as well
as the egg for demoneeded, which demo requires. (We also see an egg
link for the recipe in the develop-eggs directory. This egg link was
actually created as part of the sample buildout setup. Normally, when
using the recipe, you'll get a regular egg installation.)
Script generation
-----------------
The demo egg defined a script, but we didn't get one installed:
>>> ls(sample_buildout, 'bin')
- buildout
If we want scripts provided by eggs to be installed, we should use the
scripts recipe:
>>> write(sample_buildout, 'buildout.cfg',
... """
... [buildout]
... parts = demo
...
... [demo]
... recipe = zc.recipe.egg:scripts
... eggs = demo<0.3
... find-links = %(server)s
... index = %(server)s/index
... """ % dict(server=link_server))
>>> print_(system(buildout), end='')
Uninstalling demo.
Installing demo.
Generated script '/sample-buildout/bin/demo'.
Now we also see the script defined by the demo script:
>>> ls(sample_buildout, 'bin')
- buildout
- demo
The scripts recipe defines some additional options:
entry-points
A list of entry-point identifiers of the form:
name=module:attrs
where name is a script name, module is a dotted name resolving to a
module name, and attrs is a dotted name resolving to a callable
object within a module.
This option is useful when working with distributions that don't
declare entry points, such as distributions not written to work
with setuptools.
Examples can be seen in the section "Specifying entry points" below.
scripts
Control which scripts are generated. The value should be a list of
zero or more tokens. Each token is either a name, or a name
followed by an '=' and a new name. Only the named scripts are
generated. If no tokens are given, then script generation is
disabled. If the option isn't given at all, then all scripts
defined by the named eggs will be generated.
dependent-scripts
If set to the string "true", scripts will be generated for all
required eggs in addition to the eggs specifically named.
interpreter
The name of a script to generate that allows access to a Python
interpreter that has the path set based on the eggs installed.
extra-paths
Extra paths to include in a generated script.
initialization
Specify some Python initialization code. This is very limited. In
particular, be aware that leading whitespace is stripped from the
code given.
arguments
Specify some arguments to be passed to entry points as Python source.
relative-paths
If set to true, then egg paths will be generated relative to the
script path. This allows a buildout to be moved without breaking
egg paths. This option can be set in either the script section or
in the buildout section.
Let's add an interpreter option:
>>> write(sample_buildout, 'buildout.cfg',
... """
... [buildout]
... parts = demo
...
... [demo]
... recipe = zc.recipe.egg
... eggs = demo<0.3
... find-links = %(server)s
... index = %(server)s/index
... interpreter = py-demo
... """ % dict(server=link_server))
Note that we omitted the entry point name from the recipe
specification. We were able to do this because the scripts recipe is
the default entry point for the zc.recipe.egg egg.
>>> print_(system(buildout), end='')
Uninstalling demo.
Installing demo.
Generated script '/sample-buildout/bin/demo'.
Generated interpreter '/sample-buildout/bin/py-demo'.
Now we also get a py-demo script for giving us a Python prompt with
the path for demo and any eggs it depends on included in sys.path.
This is useful for debugging and testing.
>>> ls(sample_buildout, 'bin')
- buildout
- demo
- py-demo
If we run the demo script, it prints out some minimal data:
>>> print_(system(join(sample_buildout, 'bin', 'demo')), end='')
2 1
The value it prints out happens to be some values defined in the
modules installed.
We can also run the py-demo script. Here we'll just print_(out)
the bits if the path added to reflect the eggs:
>>> print_(system(join(sample_buildout, 'bin', 'py-demo'),
... """import os, sys
... for p in sys.path:
... if 'demo' in p:
... _ = sys.stdout.write(os.path.basename(p)+'\\n')
...
... """).replace('>>> ', '').replace('... ', ''), end='')
... # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
demo-0.2-py2.4.egg
demoneeded-1.1-py2.4.egg...
Egg updating
------------
The recipe normally gets the most recent distribution that satisfies the
specification. It won't do this is the buildout is either in
non-newest mode or in offline mode. To see how this works, we'll
remove the restriction on demo:
>>> write(sample_buildout, 'buildout.cfg',
... """
... [buildout]
... parts = demo
...
... [demo]
... recipe = zc.recipe.egg
... find-links = %(server)s
... index = %(server)s/index
... """ % dict(server=link_server))
and run the buildout in non-newest mode:
>>> print_(system(buildout+' -N'), end='')
Uninstalling demo.
Installing demo.
Generated script '/sample-buildout/bin/demo'.
Note that we removed the eggs option, and the eggs defaulted to the
part name. Because we removed the eggs option, the demo was
reinstalled.
We'll also run the buildout in off-line mode:
>>> print_(system(buildout+' -o'), end='')
Updating demo.
We didn't get an update for demo:
>>> ls(sample_buildout, 'eggs')
d demo-0.2-py2.3.egg
d demoneeded-1.1-py2.3.egg
- setuptools-0.7-py2.3.egg
d zc.buildout-1.0-py2.3.egg
If we run the buildout on the default online and newest modes,
we'll get an update for demo:
>>> print_(system(buildout), end='')
Updating demo.
Getting distribution for 'demo'.
Got demo 0.3.
Generated script '/sample-buildout/bin/demo'.
Then we'll get a new demo egg:
>>> ls(sample_buildout, 'eggs')
d demo-0.2-py2.3.egg
d demo-0.3-py2.3.egg
d demoneeded-1.1-py2.3.egg
- setuptools-0.7-py2.4.egg
d zc.buildout-1.0-py2.4.egg
The script is updated too:
>>> print_(system(join(sample_buildout, 'bin', 'demo')), end='')
3 1
Controlling script generation
-----------------------------
You can control which scripts get generated using the scripts option.
For example, to suppress scripts, use the scripts option without any
arguments:
>>> write(sample_buildout, 'buildout.cfg',
... """
... [buildout]
... parts = demo
...
... [demo]
... recipe = zc.recipe.egg
... find-links = %(server)s
... index = %(server)s/index
... scripts =
... """ % dict(server=link_server))
>>> print_(system(buildout), end='')
Uninstalling demo.
Installing demo.
>>> ls(sample_buildout, 'bin')
- buildout
You can also control the name used for scripts:
>>> write(sample_buildout, 'buildout.cfg',
... """
... [buildout]
... parts = demo
...
... [demo]
... recipe = zc.recipe.egg
... find-links = %(server)s
... index = %(server)s/index
... scripts = demo=foo
... """ % dict(server=link_server))
>>> print_(system(buildout), end='')
Uninstalling demo.
Installing demo.
Generated script '/sample-buildout/bin/foo'.
>>> ls(sample_buildout, 'bin')
- buildout
- foo
Specifying extra script paths
-----------------------------
If we need to include extra paths in a script, we can use the
extra-paths option:
>>> write(sample_buildout, 'buildout.cfg',
... """
... [buildout]
... parts = demo
...
... [demo]
... recipe = zc.recipe.egg
... find-links = %(server)s
... index = %(server)s/index
... scripts = demo=foo
... extra-paths =
... /foo/bar
... ${buildout:directory}/spam
... """ % dict(server=link_server))
>>> print_(system(buildout), end='')
Uninstalling demo.
Installing demo.
Generated script '/sample-buildout/bin/foo'.
Let's look at the script that was generated:
>>> cat(sample_buildout, 'bin', 'foo') # doctest: +NORMALIZE_WHITESPACE
#!/usr/local/bin/python2.7
<BLANKLINE>
import sys
sys.path[0:0] = [
'/sample-buildout/eggs/demo-0.3-py2.4.egg',
'/sample-buildout/eggs/demoneeded-1.1-py2.4.egg',
'/foo/bar',
'/sample-buildout/spam',
]
<BLANKLINE>
import eggrecipedemo
<BLANKLINE>
if __name__ == '__main__':
sys.exit(eggrecipedemo.main())
Relative egg paths
------------------
If the relative-paths option is specified with a true value, then
paths will be generated relative to the script. This is useful when
you want to be able to move a buildout directory around without
breaking scripts.
>>> write(sample_buildout, 'buildout.cfg',
... """
... [buildout]
... parts = demo
...
... [demo]
... recipe = zc.recipe.egg
... find-links = %(server)s
... index = %(server)s/index
... scripts = demo=foo
... relative-paths = true
... extra-paths =
... /foo/bar
... ${buildout:directory}/spam
... """ % dict(server=link_server))
>>> print_(system(buildout), end='')
Uninstalling demo.
Installing demo.
Generated script '/sample-buildout/bin/foo'.
Let's look at the script that was generated:
>>> cat(sample_buildout, 'bin', 'foo') # doctest: +NORMALIZE_WHITESPACE
#!/usr/local/bin/python2.7
<BLANKLINE>
import os
<BLANKLINE>
join = os.path.join
base = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
base = os.path.dirname(base)
<BLANKLINE>
import sys
sys.path[0:0] = [
join(base, 'eggs/demo-0.3-pyN.N.egg'),
join(base, 'eggs/demoneeded-1.1-pyN.N.egg'),
'/foo/bar',
join(base, 'spam'),
]
<BLANKLINE>
import eggrecipedemo
<BLANKLINE>
if __name__ == '__main__':
sys.exit(eggrecipedemo.main())
You can specify relative paths in the buildout section, rather than in
each individual script section:
>>> write(sample_buildout, 'buildout.cfg',
... """
... [buildout]
... parts = demo
... relative-paths = true
...
... [demo]
... recipe = zc.recipe.egg
... find-links = %(server)s
... index = %(server)s/index
... scripts = demo=foo
... extra-paths =
... /foo/bar
... ${buildout:directory}/spam
... """ % dict(server=link_server))
>>> print_(system(buildout), end='')
Uninstalling demo.
Installing demo.
Generated script '/sample-buildout/bin/foo'.
>>> cat(sample_buildout, 'bin', 'foo') # doctest: +NORMALIZE_WHITESPACE
#!/usr/local/bin/python2.7
<BLANKLINE>
import os
<BLANKLINE>
join = os.path.join
base = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
base = os.path.dirname(base)
<BLANKLINE>
import sys
sys.path[0:0] = [
join(base, 'eggs/demo-0.3-pyN.N.egg'),
join(base, 'eggs/demoneeded-1.1-pyN.N.egg'),
'/foo/bar',
join(base, 'spam'),
]
<BLANKLINE>
import eggrecipedemo
<BLANKLINE>
if __name__ == '__main__':
sys.exit(eggrecipedemo.main())
Specifying initialization code and arguments
-----------------------------------------------
Sometimes, we need to do more than just calling entry points. We can
use the initialization and arguments options to specify extra code
to be included in generated scripts:
>>> write(sample_buildout, 'buildout.cfg',
... """
... [buildout]
... parts = demo
...
... [demo]
... recipe = zc.recipe.egg
... find-links = %(server)s
... index = %(server)s/index
... scripts = demo=foo
... extra-paths =
... /foo/bar
... ${buildout:directory}/spam
... initialization = a = (1, 2
... 3, 4)
... interpreter = py
... arguments = a, 2
... """ % dict(server=link_server))
>>> print_(system(buildout), end='')
Uninstalling demo.
Installing demo.
Generated script '/sample-buildout/bin/foo'.
Generated interpreter '/sample-buildout/bin/py'.
>>> cat(sample_buildout, 'bin', 'foo') # doctest: +NORMALIZE_WHITESPACE
#!/usr/local/bin/python2.7
<BLANKLINE>
import sys
sys.path[0:0] = [
'/sample-buildout/eggs/demo-0.3-py2.4.egg',
'/sample-buildout/eggs/demoneeded-1.1-py2.4.egg',
'/foo/bar',
'/sample-buildout/spam',
]
<BLANKLINE>
a = (1, 2
3, 4)
<BLANKLINE>
import eggrecipedemo
<BLANKLINE>
if __name__ == '__main__':
sys.exit(eggrecipedemo.main(a, 2))
Here we see that the initialization code we specified was added after
setting the path. Note, as mentioned above, that leading whitespace
has been stripped. Similarly, the argument code we specified was
added in the entry point call (to main).
Our interpreter also has the initialization code:
>>> cat(sample_buildout, 'bin', 'py')
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
#!/usr/local/bin/python2.7
<BLANKLINE>
import sys
<BLANKLINE>
sys.path[0:0] = [
'/sample-buildout/eggs/demo-0.3-py3.3.egg',
'/sample-buildout/eggs/demoneeded-1.1-py3.3.egg',
'/foo/bar',
'/sample-buildout/spam',
]
<BLANKLINE>
a = (1, 2
3, 4)
<BLANKLINE>
<BLANKLINE>
_interactive = True
...
Specifying entry points
-----------------------
Scripts can be generated for entry points declared explicitly. We can
declare entry points using the entry-points option:
>>> write(sample_buildout, 'buildout.cfg',
... """
... [buildout]
... parts = demo
...
... [demo]
... recipe = zc.recipe.egg
... find-links = %(server)s
... index = %(server)s/index
... extra-paths =
... /foo/bar
... ${buildout:directory}/spam
... entry-points = alt=eggrecipedemo:alt other=foo.bar:a.b.c
... """ % dict(server=link_server))
>>> print_(system(buildout), end='')
Uninstalling demo.
Installing demo.
Generated script '/sample-buildout/bin/demo'.
Generated script '/sample-buildout/bin/alt'.
Generated script '/sample-buildout/bin/other'.
>>> ls(sample_buildout, 'bin')
- alt
- buildout
- demo
- other
>>> cat(sample_buildout, 'bin', 'other')
#!/usr/local/bin/python2.7
<BLANKLINE>
import sys
sys.path[0:0] = [
'/sample-buildout/eggs/demo-0.3-py2.4.egg',
'/sample-buildout/eggs/demoneeded-1.1-py2.4.egg',
'/foo/bar',
'/sample-buildout/spam',
]
<BLANKLINE>
import foo.bar
<BLANKLINE>
if __name__ == '__main__':
sys.exit(foo.bar.a.b.c())
Generating all scripts
----------------------
The `bigdemo` package doesn't have any scripts, but it requires the `demo`
package, which does have a script. Specify `dependent-scripts = true` to
generate all scripts in required packages:
>>> write(sample_buildout, 'buildout.cfg',
... """
... [buildout]
... parts = bigdemo
...
... [bigdemo]
... recipe = zc.recipe.egg
... find-links = %(server)s
... index = %(server)s/index
... dependent-scripts = true
... """ % dict(server=link_server))
>>> print_(system(buildout+' -N'), end='')
Uninstalling demo.
Installing bigdemo.
Getting distribution for 'bigdemo'.
Got bigdemo 0.1.
Generated script '/sample-buildout/bin/demo'.
Offline mode
------------
If the buildout offline option is set to "true", then no attempt will
be made to contact an index server:
>>> write(sample_buildout, 'buildout.cfg',
... """
... [buildout]
... parts = demo
... offline = true
...
... [demo]
... recipe = zc.recipe.egg
... index = eek!
... scripts = demo=foo
... """ % dict(server=link_server))
>>> print_(system(buildout), end='')
Uninstalling bigdemo.
Installing demo.
Generated script '/sample-buildout/bin/foo'.
| zc.recipe.egg | /zc.recipe.egg-2.0.7.tar.gz/zc.recipe.egg-2.0.7/src/zc/recipe/egg/README.rst | README.rst |
import copy
import glob
import logging
import os
import pkg_resources
import re
import sys
import zc.buildout.easy_install
from zc.buildout.buildout import bool_option
class Eggs(object):
_WORKING_SET_CACHE_ATTR_NAME = '_zc_recipe_egg_working_set_cache'
def __init__(self, buildout, name, options):
self.buildout = buildout
self.name = name
self.options = options
b_options = buildout['buildout']
links = options.get('find-links', b_options['find-links'])
if links:
links = links.split()
options['find-links'] = '\n'.join(links)
else:
links = ()
self.links = links
index = options.get('index', b_options.get('index'))
if index is not None:
options['index'] = index
self.index = index
allow_hosts = b_options['allow-hosts']
allow_hosts = tuple([host.strip() for host in allow_hosts.split('\n')
if host.strip() != ''])
self.allow_hosts = allow_hosts
options['eggs-directory'] = b_options['eggs-directory']
options['_e'] = options['eggs-directory'] # backward compat.
options['develop-eggs-directory'] = b_options['develop-eggs-directory']
options['_d'] = options['develop-eggs-directory'] # backward compat.
def working_set(self, extra=()):
"""Separate method to just get the working set
This is intended for reuse by similar recipes.
"""
options = self.options
buildout_section = self.buildout['buildout']
# Backward compat. :(
options['executable'] = sys.executable
orig_distributions = [
r.strip()
for r in options.get('eggs', self.name).split('\n')
if r.strip()
]
ws = self._working_set(
distributions=orig_distributions + list(extra),
develop_eggs_dir=options['develop-eggs-directory'],
eggs_dir=options['eggs-directory'],
offline=(buildout_section.get('offline') == 'true'),
newest=(buildout_section.get('newest') == 'true'),
links=self.links,
index=self.index,
allow_hosts=self.allow_hosts,
allow_unknown_extras=bool_option(buildout_section, 'allow-unknown-extras')
)
return orig_distributions, ws
def install(self):
reqs, ws = self.working_set()
return ()
update = install
def _sort_working_set(self, ws):
develop_paths = set()
pattern = os.path.join(self.options['develop-eggs-directory'], '*.egg-link')
for egg_link in glob.glob(pattern):
with open(egg_link, 'rt') as f:
path = f.readline().strip()
if path:
develop_paths.add(path)
egg_directory = os.path.join(self.options['eggs-directory'], '')
sorted_paths = []
egg_paths = []
other_paths = []
for dist in ws:
path = dist.location
if path in develop_paths:
sorted_paths.append(path)
elif os.path.commonprefix([path, egg_directory]) == egg_directory:
egg_paths.append(path)
else:
other_paths.append(path)
sorted_paths.extend(egg_paths)
sorted_paths.extend(other_paths)
return pkg_resources.WorkingSet(sorted_paths)
def _working_set(
self,
distributions,
eggs_dir,
develop_eggs_dir,
offline=False,
newest=True,
links=(),
index=None,
allow_hosts=('*',),
allow_unknown_extras=False,
):
"""Helper function to build a working set.
Return an instance of `pkg_resources.WorkingSet`.
Results are cached. The cache key is composed by all the arguments
passed to the function. See also `self._get_cache_storage()`.
"""
cache_storage = self._get_cache_storage()
cache_key = (
tuple(distributions),
eggs_dir,
develop_eggs_dir,
offline,
newest,
tuple(links),
index,
tuple(allow_hosts),
allow_unknown_extras,
)
if cache_key not in cache_storage:
if offline:
ws = zc.buildout.easy_install.working_set(
distributions,
[develop_eggs_dir, eggs_dir]
)
else:
ws = zc.buildout.easy_install.install(
distributions, eggs_dir,
links=links,
index=index,
path=[develop_eggs_dir],
newest=newest,
allow_hosts=allow_hosts,
allow_unknown_extras=allow_unknown_extras)
ws = self._sort_working_set(ws)
cache_storage[cache_key] = ws
# `pkg_resources.WorkingSet` instances are mutable, so we need to return
# a copy.
return copy.deepcopy(cache_storage[cache_key])
def _get_cache_storage(self):
"""Return a mapping where to store generated working sets.
The cache storage is stored in an attribute of `self.buildout` with
name given by `self._WORKING_SET_CACHE_ATTR_NAME`.
"""
cache_storage = getattr(
self.buildout,
self._WORKING_SET_CACHE_ATTR_NAME,
None)
if cache_storage is None:
cache_storage = {}
setattr(
self.buildout,
self._WORKING_SET_CACHE_ATTR_NAME,
cache_storage)
return cache_storage
class Scripts(Eggs):
def __init__(self, buildout, name, options):
super(Scripts, self).__init__(buildout, name, options)
options['bin-directory'] = buildout['buildout']['bin-directory']
options['_b'] = options['bin-directory'] # backward compat.
self.extra_paths = [
os.path.join(buildout['buildout']['directory'], p.strip())
for p in options.get('extra-paths', '').split('\n')
if p.strip()
]
if self.extra_paths:
options['extra-paths'] = '\n'.join(self.extra_paths)
relative_paths = options.get(
'relative-paths',
buildout['buildout'].get('relative-paths', 'false')
)
if relative_paths == 'true':
options['buildout-directory'] = buildout['buildout']['directory']
self._relative_paths = options['buildout-directory']
else:
self._relative_paths = ''
assert relative_paths == 'false'
parse_entry_point = re.compile(
'([^=]+)=(\w+(?:[.]\w+)*):(\w+(?:[.]\w+)*)$'
).match
def install(self):
reqs, ws = self.working_set()
options = self.options
scripts = options.get('scripts')
if scripts or scripts is None:
if scripts is not None:
scripts = scripts.split()
scripts = dict([
('=' in s) and s.split('=', 1) or (s, s)
for s in scripts
])
for s in options.get('entry-points', '').split():
parsed = self.parse_entry_point(s)
if not parsed:
logging.getLogger(self.name).error(
"Cannot parse the entry point %s.", s)
raise zc.buildout.UserError("Invalid entry point")
reqs.append(parsed.groups())
if get_bool(options, 'dependent-scripts'):
# Generate scripts for all packages in the working set,
# except setuptools.
reqs = list(reqs)
for dist in ws:
name = dist.project_name
if name != 'setuptools' and name not in reqs:
reqs.append(name)
return zc.buildout.easy_install.scripts(
reqs, ws, sys.executable, options['bin-directory'],
scripts=scripts,
extra_paths=self.extra_paths,
interpreter=options.get('interpreter'),
initialization=options.get('initialization', ''),
arguments=options.get('arguments', ''),
relative_paths=self._relative_paths,
)
return ()
update = install
def get_bool(options, name, default=False):
value = options.get(name)
if not value:
return default
if value == 'true':
return True
elif value == 'false':
return False
else:
raise zc.buildout.UserError(
"Invalid value for %s option: %s" % (name, value))
Egg = Scripts | zc.recipe.egg | /zc.recipe.egg-2.0.7.tar.gz/zc.recipe.egg-2.0.7/src/zc/recipe/egg/egg.py | egg.py |
"""Install packages as eggs
"""
import logging
import os
import sys
import zc.buildout.easy_install
logger = logging.getLogger(__name__)
class Base:
def __init__(self, buildout, name, options):
self.name, self.options = name, options
options['_d'] = buildout['buildout']['develop-eggs-directory']
self.build_ext = build_ext(buildout, options)
def update(self):
return self.install()
class Custom(Base):
def __init__(self, buildout, name, options):
Base.__init__(self, buildout, name, options)
links = options.get('find-links',
buildout['buildout'].get('find-links'))
if links:
links = links.split()
options['find-links'] = '\n'.join(links)
else:
links = ()
self.links = links
index = options.get('index', buildout['buildout'].get('index'))
if index is not None:
options['index'] = index
self.index = index
environment_section = options.get('environment')
if environment_section:
self.environment = buildout[environment_section]
else:
self.environment = {}
environment_data = list(self.environment.items())
environment_data.sort()
options['_environment-data'] = repr(environment_data)
options['_e'] = buildout['buildout']['eggs-directory']
if buildout['buildout'].get('offline') == 'true':
self.install = lambda: ()
self.newest = buildout['buildout'].get('newest') == 'true'
def install(self):
options = self.options
distribution = options.get('egg')
if distribution is None:
distribution = options.get('eggs')
if distribution is None:
distribution = self.name
else:
logger.warn("The eggs option is deprecated. Use egg instead")
distribution = options.get('egg', options.get('eggs', self.name)
).strip()
self._set_environment()
try:
return zc.buildout.easy_install.build(
distribution, options['_d'], self.build_ext,
self.links, self.index, sys.executable,
[options['_e']], newest=self.newest,
)
finally:
self._restore_environment()
def _set_environment(self):
self._saved_environment = {}
for key, value in list(self.environment.items()):
if key in os.environ:
self._saved_environment[key] = os.environ[key]
# Interpolate value with variables from environment. Maybe there
# should be a general way of doing this in buildout with something
# like ${environ:foo}:
os.environ[key] = value % os.environ
def _restore_environment(self):
for key in self.environment:
if key in self._saved_environment:
os.environ[key] = self._saved_environment[key]
else:
try:
del os.environ[key]
except KeyError:
pass
class Develop(Base):
def __init__(self, buildout, name, options):
Base.__init__(self, buildout, name, options)
options['setup'] = os.path.join(buildout['buildout']['directory'],
options['setup'])
def install(self):
options = self.options
return zc.buildout.easy_install.develop(
options['setup'], options['_d'], self.build_ext)
def build_ext(buildout, options):
result = {}
for be_option in ('include-dirs', 'library-dirs'):
value = options.get(be_option)
if value is None:
continue
value = [
os.path.join(
buildout['buildout']['directory'],
v.strip()
)
for v in value.strip().split('\n')
if v.strip()
]
result[be_option] = os.pathsep.join(value)
options[be_option] = os.pathsep.join(value)
# rpath has special symbolic dirnames which must not be prefixed
# with the buildout dir. See:
# http://man7.org/linux/man-pages/man8/ld.so.8.html
RPATH_SPECIAL = [
'$ORIGIN', '$LIB', '$PLATFORM', '${ORIGIN}', '${LIB}', '${PLATFORM}']
def _prefix_non_special(x):
x = x.strip()
for special in RPATH_SPECIAL:
if x.startswith(special):
return x
return os.path.join( buildout['buildout']['directory'], x)
value = options.get('rpath')
if value is not None:
values = [_prefix_non_special(v)
for v in value.strip().split('\n') if v.strip()]
result['rpath'] = os.pathsep.join(values)
options['rpath'] = os.pathsep.join(values)
swig = options.get('swig')
if swig:
options['swig'] = result['swig'] = os.path.join(
buildout['buildout']['directory'],
swig,
)
for be_option in ('define', 'undef', 'libraries', 'link-objects',
'debug', 'force', 'compiler', 'swig-cpp', 'swig-opts',
):
value = options.get(be_option)
if value is None:
continue
result[be_option] = value
return result | zc.recipe.egg | /zc.recipe.egg-2.0.7.tar.gz/zc.recipe.egg-2.0.7/src/zc/recipe/egg/custom.py | custom.py |
Egg Recipe API for other Recipes
================================
It is common for recipes to accept a collection of egg specifications
and generate scripts based on the resulting working sets. The egg
recipe provides an API that other recipes can use.
A recipe can reuse the egg recipe, supporting the eggs, find-links,
index, and extra-paths options. This is done by creating an
egg recipe instance in a recipes's constructor. In the recipe's
install script, the egg-recipe instance's working_set method is used
to collect the requested eggs and working set.
To illustrate, we create a sample recipe that is a very thin layer
around the egg recipe:
>>> mkdir(sample_buildout, 'sample')
>>> write(sample_buildout, 'sample', 'sample.py',
... """
... import logging, os, sys
... import zc.recipe.egg
...
... def print_(*args):
... sys.stdout.write(' '.join(map(str, args)) + '\\n')
...
... class Sample:
...
... def __init__(self, buildout, name, options):
... self.egg = zc.recipe.egg.Scripts(buildout, name, options)
... self.name = name
... self.options = options
...
... def install(self):
... extras = self.options['extras'].split()
... requirements, ws = self.egg.working_set(extras)
... print_('Part:', self.name)
... print_('Egg requirements:')
... for r in requirements:
... print_(r)
... print_('Working set:')
... for d in ws:
... print_(d)
... print_('extra paths:', self.egg.extra_paths)
... return ()
...
... update = install
... """)
Here we instantiated the egg recipe in the constructor, saving it in
an attribute. This also initialized the options dictionary.
In our install method, we called the working_set method on the
instance we saved. The working_set method takes an optional sequence
of extra requirements to be included in the working set.
>>> write(sample_buildout, 'sample', 'setup.py',
... """
... from setuptools import setup
...
... setup(
... name = "sample",
... entry_points = {'zc.buildout': ['default = sample:Sample']},
... install_requires = 'zc.recipe.egg',
... )
... """)
>>> write(sample_buildout, 'sample', 'README.txt', " ")
>>> write(sample_buildout, 'buildout.cfg',
... """
... [buildout]
... develop = sample
... parts = sample-part
...
... [sample-part]
... recipe = sample
... eggs = demo<0.3
... find-links = %(server)s
... index = %(server)sindex
... extras = other
... """ % dict(server=link_server))
>>> import os
>>> os.chdir(sample_buildout)
>>> buildout = os.path.join(sample_buildout, 'bin', 'buildout')
>>> print_(system(buildout + ' -q'), end='')
Part: sample-part
Egg requirements:
demo<0.3
Working set:
demoneeded 1.1
other 1.0
demo 0.2
extra paths: []
We can see that the options were augmented with additional data
computed by the egg recipe by looking at .installed.cfg:
>>> cat(sample_buildout, '.installed.cfg')
[buildout]
installed_develop_eggs = /sample-buildout/develop-eggs/sample.egg-link
parts = sample-part
<BLANKLINE>
[sample-part]
__buildout_installed__ =
__buildout_signature__ = ...
_b = /sample-buildout/bin
_d = /sample-buildout/develop-eggs
_e = /sample-buildout/eggs
bin-directory = /sample-buildout/bin
develop-eggs-directory = /sample-buildout/develop-eggs
eggs = demo<0.3
eggs-directory = /sample-buildout/eggs
extras = other
find-links = http://localhost:27071/
index = http://localhost:27071/index
recipe = sample
If we use the extra-paths option:
>>> write(sample_buildout, 'buildout.cfg',
... """
... [buildout]
... develop = sample
... parts = sample-part
...
... [sample-part]
... recipe = sample
... eggs = demo<0.3
... find-links = %(server)s
... index = %(server)sindex
... extras = other
... extra-paths = /foo/bar
... /spam/eggs
... """ % dict(server=link_server))
Then we'll see that reflected in the extra_paths attribute in the egg
recipe instance:
>>> print_(system(buildout + ' -q'), end='')
Part: sample-part
Egg requirements:
demo<0.3
Working set:
demo 0.2
other 1.0
demoneeded 1.1
extra paths: ['/foo/bar', '/spam/eggs']
| zc.recipe.egg | /zc.recipe.egg-2.0.7.tar.gz/zc.recipe.egg-2.0.7/src/zc/recipe/egg/api.rst | api.rst |
Creating eggs with extensions needing custom build settings
=============================================================
Sometimes, It's necessary to provide extra control over how an egg is
created. This is commonly true for eggs with extension modules that
need to access libraries or include files.
The zc.recipe.egg:custom recipe can be used to define an egg with
custom build parameters. The currently defined parameters are:
include-dirs
A new-line separated list of directories to search for include
files.
library-dirs
A new-line separated list of directories to search for libraries
to link with.
rpath
A new-line separated list of directories to search for dynamic libraries
at run time.
define
A comma-separated list of names of C preprocessor variables to
define.
undef
A comma-separated list of names of C preprocessor variables to
undefine.
libraries
The name of an additional library to link with. Due to limitations
in distutils and despite the option name, only a single library
can be specified.
link-objects
The name of an link object to link against. Due to limitations
in distutils and despite the option name, only a single link object
can be specified.
debug
Compile/link with debugging information
force
Forcibly build everything (ignore file timestamps)
compiler
Specify the compiler type
swig
The path to the swig executable
swig-cpp
Make SWIG create C++ files (default is C)
swig-opts
List of SWIG command line options
In addition, the following options can be used to specify the egg:
egg
An specification for the egg to be created, to install given as a
setuptools requirement string. This defaults to the part name.
find-links
A list of URLs, files, or directories to search for distributions.
index
The URL of an index server, or almost any other valid URL. :)
If not specified, the Python Package Index,
http://cheeseshop.python.org/pypi, is used. You can specify an
alternate index with this option. If you use the links option and
if the links point to the needed distributions, then the index can
be anything and will be largely ignored. In the examples, here,
we'll just point to an empty directory on our link server. This
will make our examples run a little bit faster.
environment
The name of a section with additional environment variables. The
environment variables are set before the egg is built.
To illustrate this, we'll define a buildout that builds an egg for a
package that has a simple extension module::
#include <Python.h>
#include <extdemo.h>
static PyMethodDef methods[] = {};
PyMODINIT_FUNC
initextdemo(void)
{
PyObject *m;
m = Py_InitModule3("extdemo", methods, "");
#ifdef TWO
PyModule_AddObject(m, "val", PyInt_FromLong(2));
#else
PyModule_AddObject(m, "val", PyInt_FromLong(EXTDEMO));
#endif
}
The extension depends on a system-dependent include file, extdemo.h,
that defines a constant, EXTDEMO, that is exposed by the extension.
The extension module is available as a source distribution,
extdemo-1.4.tar.gz, on a distribution server.
We have a sample buildout that we'll add an include directory to with
the necessary include file:
>>> mkdir('include')
>>> write('include', 'extdemo.h',
... """
... #define EXTDEMO 42
... """)
We'll also update the buildout configuration file to define a part for
the egg:
>>> write(sample_buildout, 'buildout.cfg',
... """
... [buildout]
... parts = extdemo
...
... [extdemo]
... recipe = zc.recipe.egg:custom
... find-links = %(server)s
... index = %(server)s/index
... include-dirs = include
...
... """ % dict(server=link_server))
>>> print_(system(buildout), end='') # doctest: +ELLIPSIS
Installing extdemo...
We got the zip_safe warning because the source distribution we used
wasn't setuptools based and thus didn't set the option.
The egg is created in the develop-eggs directory *not* the eggs
directory because it depends on buildout-specific parameters and the
eggs directory can be shared across multiple buildouts.
>>> ls(sample_buildout, 'develop-eggs')
d extdemo-1.4-py2.4-unix-i686.egg
- zc.recipe.egg.egg-link
Note that no scripts or dependencies are installed. To install
dependencies or scripts for a custom egg, define another part and use
the zc.recipe.egg recipe, listing the custom egg as one of the eggs to
be installed. The zc.recipe.egg recipe will use the installed egg.
Let's define a script that uses out ext demo:
>>> mkdir('demo')
>>> write('demo', 'demo.py',
... """
... import extdemo, sys
... def print_(*args):
... sys.stdout.write(' '.join(map(str, args)) + '\\n')
... def main():
... print_(extdemo.val)
... """)
>>> write('demo', 'setup.py',
... """
... from setuptools import setup
... setup(name='demo')
... """)
>>> write('buildout.cfg',
... """
... [buildout]
... develop = demo
... parts = extdemo demo
...
... [extdemo]
... recipe = zc.recipe.egg:custom
... find-links = %(server)s
... index = %(server)s/index
... include-dirs = include
...
... [demo]
... recipe = zc.recipe.egg
... eggs = demo
... extdemo
... entry-points = demo=demo:main
... """ % dict(server=link_server))
>>> print_(system(buildout), end='')
Develop: '/sample-buildout/demo'
Updating extdemo.
Installing demo.
Generated script '/sample-buildout/bin/demo'...
When we run the script, we'll 42 printed:
>>> print_(system(join('bin', 'demo')), end='')
42
Updating
--------
The custom recipe will normally check for new source distributions
that meet the given specification. This can be suppressed using the
buildout non-newest and offline modes. We'll generate a new source
distribution for extdemo:
>>> update_extdemo()
If we run the buildout in non-newest or offline modes:
>>> print_(system(buildout+' -N'), end='')
Develop: '/sample-buildout/demo'
Updating extdemo.
Updating demo.
>>> print_(system(buildout+' -o'), end='')
Develop: '/sample-buildout/demo'
Updating extdemo.
Updating demo.
We won't get an update.
>>> ls(sample_buildout, 'develop-eggs')
- demo.egg-link
d extdemo-1.4-py2.4-unix-i686.egg
- zc.recipe.egg.egg-link
But if we run the buildout in the default on-line and newest modes, we
will. This time we also get the test-variable message again, because the new
version is imported:
>>> print_(system(buildout), end='') # doctest: +ELLIPSIS
Develop: '/sample-buildout/demo'
Updating extdemo.
zip_safe flag not set; analyzing archive contents...
Updating demo.
...
>>> ls(sample_buildout, 'develop-eggs')
- demo.egg-link
d extdemo-1.4-py2.4-linux-i686.egg
d extdemo-1.5-py2.4-linux-i686.egg
- zc.recipe.egg.egg-link
Controlling the version used
----------------------------
We can specify a specific version using the egg option:
>>> write('buildout.cfg',
... """
... [buildout]
... develop = demo
... parts = extdemo demo
...
... [extdemo]
... recipe = zc.recipe.egg:custom
... egg = extdemo ==1.4
... find-links = %(server)s
... index = %(server)s/index
... include-dirs = include
...
... [demo]
... recipe = zc.recipe.egg
... eggs = demo
... extdemo ==1.4
... entry-points = demo=demo:main
... """ % dict(server=link_server))
>>> print_(system(buildout+' -D'), end='') # doctest: +ELLIPSIS
Develop: '/sample-buildout/demo'
...
>>> ls(sample_buildout, 'develop-eggs')
- demo.egg-link
d extdemo-1.4-py2.4-linux-i686.egg
- zc.recipe.egg.egg-link
Controlling environment variables
+++++++++++++++++++++++++++++++++
To set additional environment variables, the `environment` option is used.
Let's create a recipe which prints out environment variables. We need this to
make sure the set environment variables are removed after the egg:custom
recipe was run.
>>> mkdir(sample_buildout, 'recipes')
>>> write(sample_buildout, 'recipes', 'environ.py',
... """
... import logging, os, zc.buildout
...
... class Environ:
...
... def __init__(self, buildout, name, options):
... self.name = name
...
... def install(self):
... logging.getLogger(self.name).info(
... 'test-variable left over: %s' % (
... 'test-variable' in os.environ))
... return []
...
... def update(self):
... self.install()
... """)
>>> write(sample_buildout, 'recipes', 'setup.py',
... """
... from setuptools import setup
...
... setup(
... name = "recipes",
... entry_points = {'zc.buildout': ['environ = environ:Environ']},
... )
... """)
Create our buildout:
>>> write(sample_buildout, 'buildout.cfg',
... """
... [buildout]
... develop = recipes
... parts = extdemo checkenv
...
... [extdemo-env]
... test-variable = foo
...
... [extdemo]
... recipe = zc.recipe.egg:custom
... find-links = %(server)s
... index = %(server)s/index
... include-dirs = include
... environment = extdemo-env
...
... [checkenv]
... recipe = recipes:environ
...
... """ % dict(server=link_server))
>>> print_(system(buildout), end='') # doctest: +ELLIPSIS
Develop: '/sample-buildout/recipes'
Uninstalling demo.
Uninstalling extdemo.
Installing extdemo.
Have environment test-variable: foo
zip_safe flag not set; analyzing archive contents...
Installing checkenv.
...
The setup.py also printed out that we have set the environment `test-variable`
to foo. After the buildout the variable is reset to its original value (i.e.
removed).
When an environment variable has a value before zc.recipe.egg:custom is run,
the original value will be restored:
>>> import os
>>> os.environ['test-variable'] = 'bar'
>>> print_(system(buildout), end='')
Develop: '/sample-buildout/recipes'
Updating extdemo.
Updating checkenv.
checkenv: test-variable left over: True
>>> os.environ['test-variable']
'bar'
Sometimes it is required to prepend or append to an existing environment
variable, for instance for adding something to the PATH. Therefore all variables
are interpolated with os.environ before the're set:
>>> write(sample_buildout, 'buildout.cfg',
... """
... [buildout]
... develop = recipes
... parts = extdemo checkenv
...
... [extdemo-env]
... test-variable = foo:%%(test-variable)s
...
... [extdemo]
... recipe = zc.recipe.egg:custom
... find-links = %(server)s
... index = %(server)s/index
... include-dirs = include
... environment = extdemo-env
...
... [checkenv]
... recipe = recipes:environ
...
... """ % dict(server=link_server))
>>> print_(system(buildout), end='') # doctest: +ELLIPSIS
Develop: '/sample-buildout/recipes'
Uninstalling extdemo.
Installing extdemo.
Have environment test-variable: foo:bar
zip_safe flag not set; analyzing archive contents...
Updating checkenv.
...
>>> os.environ['test-variable']
'bar'
>>> del os.environ['test-variable']
Create a clean buildout.cfg w/o the checkenv recipe, and delete the recipe:
>>> write(sample_buildout, 'buildout.cfg',
... """
... [buildout]
... develop = recipes
... parts = extdemo
...
... [extdemo]
... recipe = zc.recipe.egg:custom
... find-links = %(server)s
... index = %(server)s/index
... include-dirs = include
...
... """ % dict(server=link_server))
>>> print_(system(buildout), end='') # doctest: +ELLIPSIS
Develop: '/sample-buildout/recipes'
Uninstalling checkenv.
Uninstalling extdemo.
Installing extdemo...
>>> rmdir(sample_buildout, 'recipes')
Controlling develop-egg generation
==================================
If you want to provide custom build options for a develop egg, you can
use the develop recipe. The recipe has the following options:
setup
The path to a setup script or directory containing a startup
script. This is required.
include-dirs
A new-line separated list of directories to search for include
files.
library-dirs
A new-line separated list of directories to search for libraries
to link with.
rpath
A new-line separated list of directories to search for dynamic libraries
at run time.
define
A comma-separated list of names of C preprocessor variables to
define.
undef
A comma-separated list of names of C preprocessor variables to
undefine.
libraries
The name of an additional library to link with. Due to limitations
in distutils and despite the option name, only a single library
can be specified.
link-objects
The name of an link object to link against. Due to limitations
in distutils and despite the option name, only a single link object
can be specified.
debug
Compile/link with debugging information
force
Forcibly build everything (ignore file timestamps)
compiler
Specify the compiler type
swig
The path to the swig executable
swig-cpp
Make SWIG create C++ files (default is C)
swig-opts
List of SWIG command line options
To illustrate this, we'll use a directory containing the extdemo
example from the earlier section:
>>> ls(extdemo)
- MANIFEST
- MANIFEST.in
- README
- extdemo.c
- setup.py
>>> write('buildout.cfg',
... """
... [buildout]
... develop = demo
... parts = extdemo demo
...
... [extdemo]
... setup = %(extdemo)s
... recipe = zc.recipe.egg:develop
... include-dirs = include
... define = TWO
...
... [demo]
... recipe = zc.recipe.egg
... eggs = demo
... extdemo
... entry-points = demo=demo:main
... """ % dict(extdemo=extdemo))
Note that we added a define option to cause the preprocessor variable
TWO to be defined. This will cause the module-variable, 'val', to be
set with a value of 2.
>>> print_(system(buildout), end='') # doctest: +ELLIPSIS
Develop: '/sample-buildout/demo'
Uninstalling extdemo.
Installing extdemo.
Installing demo.
...
Our develop-eggs now includes an egg link for extdemo:
>>> ls('develop-eggs')
- demo.egg-link
- extdemo.egg-link
- zc.recipe.egg.egg-link
and the extdemo now has a built extension:
>>> contents = os.listdir(extdemo)
>>> bool([f for f in contents if f.endswith('.so') or f.endswith('.pyd')])
True
Because develop eggs take precedence over non-develop eggs, the demo
script will use the new develop egg:
>>> print_(system(join('bin', 'demo')), end='')
2
| zc.recipe.egg | /zc.recipe.egg-2.0.7.tar.gz/zc.recipe.egg-2.0.7/src/zc/recipe/egg/custom.rst | custom.rst |
Working set caching
===================
Working sets are cached, to improve speed on buildouts with multiple similar
parts based on ``zc.recipe.egg``.
The egg-recipe instance's ``_working_set`` helper method is used to make
the caching easier. It does the same job as ``working_set()`` but with some
differences:
- The signature is different: all information needed to build the working set
is passed as parameters.
- The return value is simpler: only an instance of ``pkg_resources.WorkingSet``
is returned.
Here's an example:
>>> from zc.buildout import testing
>>> from zc.recipe.egg.egg import Eggs
>>> import os
>>> import pkg_resources
>>> recipe = Eggs(buildout=testing.Buildout(), name='fake-part', options={})
>>> eggs_dir = os.path.join(sample_buildout, 'eggs')
>>> develop_eggs_dir = os.path.join(sample_buildout, 'develop-eggs')
>>> testing.install_develop('zc.recipe.egg', develop_eggs_dir)
>>> ws = recipe._working_set(
... distributions=['zc.recipe.egg', 'demo<0.3'],
... eggs_dir=eggs_dir,
... develop_eggs_dir=develop_eggs_dir,
... index=link_server,
... )
Getting...
>>> isinstance(ws, pkg_resources.WorkingSet)
True
>>> sorted(dist.project_name for dist in ws)
['demo', 'demoneeded', 'setuptools', 'zc.buildout', 'zc.recipe.egg']
We'll monkey patch a method in the ``easy_install`` module in order to verify if
the cache is working:
>>> import zc.buildout.easy_install
>>> old_install = zc.buildout.easy_install.Installer.install
>>> def new_install(*args, **kwargs):
... print('Building working set.')
... return old_install(*args, **kwargs)
>>> zc.buildout.easy_install.Installer.install = new_install
Now we check if the caching is working by verifying if the same working set is
built only once.
>>> ws_args_1 = dict(
... distributions=['demo>=0.1'],
... eggs_dir=eggs_dir,
... develop_eggs_dir=develop_eggs_dir,
... offline=True,
... )
>>> ws_args_2 = dict(ws_args_1)
>>> ws_args_2['distributions'] = ['demoneeded']
>>> recipe._working_set(**ws_args_1)
Building working set.
<pkg_resources.WorkingSet object at ...>
>>> recipe._working_set(**ws_args_1)
<pkg_resources.WorkingSet object at ...>
>>> recipe._working_set(**ws_args_2)
Building working set.
<pkg_resources.WorkingSet object at ...>
>>> recipe._working_set(**ws_args_1)
<pkg_resources.WorkingSet object at ...>
>>> recipe._working_set(**ws_args_2)
<pkg_resources.WorkingSet object at ...>
Undo monkey patch:
>>> zc.buildout.easy_install.Installer.install = old_install
Since ``pkg_resources.WorkingSet`` instances are mutable, we must ensure that
``working_set()`` always returns a pristine copy. Otherwise callers would be
able to modify instances inside the cache.
Let's create a working set:
>>> ws = recipe._working_set(**ws_args_1)
>>> sorted(dist.project_name for dist in ws)
['demo', 'demoneeded']
Now we add a distribution to it:
>>> dist = pkg_resources.get_distribution('zc.recipe.egg')
>>> ws.add(dist)
>>> sorted(dist.project_name for dist in ws)
['demo', 'demoneeded', 'zc.recipe.egg']
Let's call the working_set function again and see if the result remains valid:
>>> ws = recipe._working_set(**ws_args_1)
>>> sorted(dist.project_name for dist in ws)
['demo', 'demoneeded']
| zc.recipe.egg | /zc.recipe.egg-2.0.7.tar.gz/zc.recipe.egg-2.0.7/src/zc/recipe/egg/working_set_caching.rst | working_set_caching.rst |
=======
CHANGES
=======
2.0 (2023-02-10)
----------------
- Drop support for Python 2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6.
- Add support for Python 3.7, 3.8, 3.9, 3.10, 3.11, PyPy3.
1.1.2 (2014-02-21)
------------------
- Fixed: packaging bug that caused 'pip install zc.recipe.filestorage' to fail
with an error about missing README.txt
1.1.1 (2014-02-16)
------------------
- Fixed: packaging bug that caused a test failure in
a test runner that didn't use buildout to run setup.py.
1.1.0 (2014-02-14)
------------------
- Python 3 compatibility
- Using Python's ``doctest`` module instead of deprecated
``zope.testing.doctest``.
- Removed 'shared-blob-dir' from blobstorage section.
1.0.0 (2007-11-03)
------------------
- Initial release.
| zc.recipe.filestorage | /zc.recipe.filestorage-2.0.tar.gz/zc.recipe.filestorage-2.0/CHANGES.rst | CHANGES.rst |
===================================
Recipe for setting up a filestorage
===================================
This recipe can be used to define a file-storage. It creates a ZConfig
file-storage database specification that can be used by other recipes to
generate ZConfig configuration files.
This recipe takes an optional path option. If none is given, it creates and
uses a subdirectory of the buildout parts directory with the same name as the
part.
The recipe records a zconfig option for use by other recipes.
We'll show a couple of examples, using a dictionary as a simulated buildout
object:
>>> import zc.recipe.filestorage
>>> buildout = dict(
... buildout = {
... 'directory': '/buildout',
... },
... db = {
... 'path': 'foo/Main.fs',
... },
... )
>>> recipe = zc.recipe.filestorage.Recipe(
... buildout, 'db', buildout['db'])
>>> print(buildout['db']['path'])
/buildout/foo/Main.fs
>>> print(buildout['db']['zconfig'], end='')
<zodb>
<filestorage>
path /buildout/foo/Main.fs
</filestorage>
</zodb>
>>> recipe.install()
()
>>> import tempfile
>>> d = tempfile.mkdtemp()
>>> buildout = dict(
... buildout = {
... 'parts-directory': d,
... },
... db = {},
... )
>>> recipe = zc.recipe.filestorage.Recipe(
... buildout, 'db', buildout['db'])
>>> print(buildout['db']['path'])
/tmp/tmpQo0DTB/db/Data.fs
>>> print(buildout['db']['zconfig'], end='')
<zodb>
<filestorage>
path /tmp/tmpQo0DTB/db/Data.fs
</filestorage>
</zodb>
>>> recipe.install()
()
>>> import os
>>> os.listdir(d)
['db']
The update method doesn't do much, as the database part's directory
already exists, but it is present, so buildout doesn't complain and doesn't
accidentally run install() again:
>>> recipe.update()
If the storage's directory is removed, is it re-added by the update method:
>>> os.rmdir(os.path.join(d, 'db'))
>>> os.listdir(d)
[]
>>> recipe.update()
>>> os.listdir(d)
['db']
This is useful in development when the directory containing the database is
removed in order to start the database from scratch.
| zc.recipe.filestorage | /zc.recipe.filestorage-2.0.tar.gz/zc.recipe.filestorage-2.0/README.rst | README.rst |
===================================
Recipe for setting up a filestorage
===================================
This recipe can be used to define a file-storage. It creates a ZConfig
file-storage database specification that can be used by other recipes to
generate ZConfig configuration files.
This recipe takes an optional path option. If none is given, it creates and
uses a subdirectory of the buildout parts directory with the same name as the
part.
The recipe records a zconfig option for use by other recipes.
We'll show a couple of examples, using a dictionary as a simulated buildout
object:
>>> import zc.recipe.filestorage
>>> buildout = dict(
... buildout = {
... 'directory': '/buildout',
... },
... db = {
... 'path': 'foo/Main.fs',
... },
... )
>>> recipe = zc.recipe.filestorage.Recipe(
... buildout, 'db', buildout['db'])
>>> print(buildout['db']['path'])
/buildout/foo/Main.fs
>>> print(buildout['db']['zconfig'], end='')
<zodb>
<filestorage>
path /buildout/foo/Main.fs
</filestorage>
</zodb>
>>> recipe.install()
()
>>> import tempfile
>>> d = tempfile.mkdtemp()
>>> buildout = dict(
... buildout = {
... 'parts-directory': d,
... },
... db = {},
... )
>>> recipe = zc.recipe.filestorage.Recipe(
... buildout, 'db', buildout['db'])
>>> print(buildout['db']['path'])
/tmp/tmpQo0DTB/db/Data.fs
>>> print(buildout['db']['zconfig'], end='')
<zodb>
<filestorage>
path /tmp/tmpQo0DTB/db/Data.fs
</filestorage>
</zodb>
>>> recipe.install()
()
>>> import os
>>> os.listdir(d)
['db']
The update method doesn't do much, as the database part's directory
already exists, but it is present, so buildout doesn't complain and doesn't
accidentally run install() again:
>>> recipe.update()
If the storage's directory is removed, is it re-added by the update method:
>>> os.rmdir(os.path.join(d, 'db'))
>>> os.listdir(d)
[]
>>> recipe.update()
>>> os.listdir(d)
['db']
This is useful in development when the directory containing the database is
removed in order to start the database from scratch.
| zc.recipe.filestorage | /zc.recipe.filestorage-2.0.tar.gz/zc.recipe.filestorage-2.0/src/zc/recipe/filestorage/README.rst | README.rst |
import os, sys, shutil, tempfile, urllib2
import setuptools.archive_util
class Recipe:
def __init__(self, buildout, name, options):
self.name = name
self.options = options
self.location = os.path.join(
buildout['buildout']['parts-directory'],
self.name)
options['location'] = self.location
if sys.platform.startswith('linux'):
platform = 'LinuxRedHat'
elif sys.platform.startswith('darwin'):
platform = 'MacOSX'
elif sys.platform.startswith('win32'):
platform = 'win32'
else:
raise SystemError("Can't guess an ICU platform")
options['platform'] = platform
def install(self):
options = self.options
dest = options['location']
if os.path.exists(dest):
return dest
if options['platform'] == 'win32':
return self.install_win32(options, dest)
here = os.getcwd()
tmp = tempfile.mkdtemp()
try:
f = urllib2.urlopen(
'ftp://ftp.software.ibm.com/software/globalization/icu/'
'%(version)s/icu-%(version)s.tgz'
% dict(version=options['version'])
)
open(os.path.join(tmp, 'arch'), 'w').write(f.read())
f.close()
setuptools.archive_util.unpack_archive(
os.path.join(tmp, 'arch'),
tmp,
)
os.chdir(os.path.join(tmp, 'icu', 'source'))
assert os.spawnl(
os.P_WAIT,
os.path.join(tmp, 'icu', 'source', 'runConfigureICU'),
os.path.join(tmp, 'icu', 'source', 'runConfigureICU'),
options['platform'],
'--prefix='+dest,
) == 0
assert os.spawnlp(os.P_WAIT, 'make', 'make', 'install') == 0
finally:
os.chdir(here)
shutil.rmtree(tmp)
return dest
def update(self):
pass
def install_win32(self, options, dest):
tmp = tempfile.mkstemp()
try:
f = urllib2.urlopen(
'ftp://ftp.software.ibm.com/software/globalization/icu/'
'%(version)s/icu-%(version)s-Win32-msvc7.1.zip'
% dict(version=options['version'])
)
open(tmp, 'w').write(f.read())
f.close()
setuptools.archive_util.unpack_archive(tmp, dest)
finally:
shutil.rmfile(tmp)
return dest | zc.recipe.icu | /zc.recipe.icu-1.0.0b1.tar.gz/zc.recipe.icu-1.0.0b1/src/zc/recipe/icu/__init__.py | __init__.py |
==========================
Invoking recipes as macros
==========================
This recipe provides a macro system for buildout parts; the intent is to avoid
repitition in buildout configurations. For buildouts creating several Zope 3
instances, we've commonly observed that the zope.conf option gets fairly large
and is repeated for each instance to allow a few substitutions in the middle
(port numbers and logfile paths are the main culprits). Pushing the bulk of
the zope.conf setting to a macro to avoid repeating the syntax, and allowing
that to refer to settings that are actually specific to instance would
significantly improve both readability and maintainability of the instace
configurations.
The macro recipe allows storing the common portions of a part in a section
that's referred to as a "macro section"; it defines everything common to parts
that use it, except the recipe option.
Macros are used by parts called "macro invocations". The invocation uses the
macro recipe, and identifies the "macro section" using the "macro" option:
Buildout::
[buildout]
parts = instance0 instance1
[instance-macro]
application = application
zope.conf =
<eventlog>
<logfile>
path /var/log/myapp/$${:__name__}-z3.log
</logfile>
</eventlog>
<product-config zc.z3monitor>
port $${:monitor-port}
</product-config>
[instance0]
recipe = zc.recipe.macro
result-recipe = zc.recipe.macro:test1
macro = instance-macro
address = 8080
monitor-port = 8089
[instance1]
recipe = zc.recipe.macro
result-recipe = zc.recipe.macro:test1
macro = instance-macro
address = 9080
monitor-port = 9089
- The ``[buildout]`` section specified two parts, ``instance0`` and
``instance1``.
- These parts in turn specified that they would be using the
macro system: ``recipe = zc.recipe.macro``.
- The output of the macro should be used with the ``zc.recipe.macro:test1``
recipe, as specified by the ``result-recipe`` option.
- This resulting recipe will receive the ``address`` option from the two
instance sections.
- It will also receive the (unprocessed) ``application`` option from the
``instance-macro``.
- The recipe will also receive the fully processed result of the
``instance-macro`` ``zope.conf`` option.
The zope.conf has two substitutions. They both use the prefix ``$${:``
and the suffix ``}``. This syntax is driven in part by the needs of the
ConfigParser library on which zc.buildout relies.
* The ``monitor-port`` is replaced with the ``monitor-port`` values from
the ``instance0`` and ``instance1`` sections, respectively.
* The ``__name__`` is a special token that is replaced with the name of
the section--in this case the strings "instance0" and "instance1"
respectively.
Result::
[instance0]
application = application
result-sections = instance0
zope.conf =
<eventlog>
<logfile>
path /var/log/myapp/instance0-z3.log
</logfile>
</eventlog>
<product-config zc.z3monitor>
port 8089
</product-config>
[instance1]
application = application
result-sections = instance1
zope.conf =
<eventlog>
<logfile>
path /var/log/myapp/instance1-z3.log
</logfile>
</eventlog>
<product-config zc.z3monitor>
port 9089
</product-config>
Note that the options from the invocation are used both to perform
substitutions and as additional options in the expansion. The result-recipe
option is used to determine the recipe used on the resulting part. The
result-sections holds a list of the section modified or created by the
invocation.
Macro invocation without a result-recipe
----------------------------------------
Sometimes it is good to have a macro that does not result in a part.
Buildout::
[buildout]
parts = instance0 instance1
[instance-macro]
application = application
zope.conf =
<eventlog>
<logfile>
path /var/log/myapp/$${:__name__}-z3.log
</logfile>
</eventlog>
<product-config zc.z3monitor>
port $${:monitor-port}
</product-config>
[instance0]
recipe = zc.recipe.macro
macro = instance-macro
address = 8080
monitor-port = 8089
[instance1]
recipe = zc.recipe.macro
macro = instance-macro
address = 9080
monitor-port = 9089
Result::
[instance0]
application = application
recipe = zc.recipe.macro:empty
result-sections = instance0
zope.conf =
<eventlog>
<logfile>
path /var/log/myapp/instance0-z3.log
</logfile>
</eventlog>
<product-config zc.z3monitor>
port 8089
</product-config>
[instance1]
application = application
recipe = zc.recipe.macro:empty
result-sections = instance1
zope.conf =
<eventlog>
<logfile>
path /var/log/myapp/instance1-z3.log
</logfile>
</eventlog>
<product-config zc.z3monitor>
port 9089
</product-config>
In this case, the zc.recipe.macro recipe is used, with its Empty entry point.
This entry point doesn't do anything, but we have to have a recipe to use,
since the macro recipe has declared this to be a part. The same sort of output
will come from an empty result-recipe option.
Buildout::
[buildout]
parts = instance0 instance1
[instance-macro]
application = application
zope.conf =
<eventlog>
<logfile>
path /var/log/myapp/$${:__name__}-z3.log
</logfile>
</eventlog>
<product-config zc.z3monitor>
port $${:monitor-port}
</product-config>
[instance0]
recipe = zc.recipe.macro
result-recipe =
macro = instance-macro
address = 8080
monitor-port = 8089
[instance1]
recipe = zc.recipe.macro
result-recipe =
macro = instance-macro
address = 9080
monitor-port = 9089
Result::
[instance0]
application = application
recipe = zc.recipe.macro:empty
result-sections = instance0
zope.conf =
<eventlog>
<logfile>
path /var/log/myapp/instance0-z3.log
</logfile>
</eventlog>
<product-config zc.z3monitor>
port 8089
</product-config>
[instance1]
application = application
recipe = zc.recipe.macro:empty
result-sections = instance1
zope.conf =
<eventlog>
<logfile>
path /var/log/myapp/instance1-z3.log
</logfile>
</eventlog>
<product-config zc.z3monitor>
port 9089
</product-config>
And of course they are the same as explicitly declaring and empty result.
Targets for Macro Invocation
----------------------------
Macros don't provide for a seperate scope, by default. This is often perfectly
fine, but if we want to make it possible to invoke the same recipe twice, there
must be a way to target the macro invocation. The way targetting works is it
iterates over the names in the targets value, creating a section by that name
if necesary, and putting the results of the invocation in the new section. New
sections are just like any other section, so other sections can refer to their
options, and they can be used as parts.
Buildout::
[buildout]
parts = invoker
[macro]
output = I was invoked on $${:__name__}
[invoker]
recipe = zc.recipe.macro
macro = macro
targets =
zero
one
Result::
[one]
output = I was invoked on one
[zero]
output = I was invoked on zero
It is possible, and much more useful, to provide parameters by specifying other
sections.
Buildout::
[buildout]
parts = invoker
[macro]
output = $${:subject} was invoked on $${:__name__}
[one-parameters]
subject = Fred
[zero-parameters]
subject = Benji
[invoker]
recipe = zc.recipe.macro
macro = macro
targets =
zero:zero-parameters
one:one-parameters
Result::
[one]
output = Fred was invoked on one
[zero]
output = Benji was invoked on zero
Default values in macros
------------------------
It is possible to make default values in macros.
Buildout::
[buildout]
parts = instance0
[instance-macro]
application = application
zope.conf =
<eventlog>
<logfile>
path /var/log/myapp/$${:__name__}-z3.log
</logfile>
</eventlog>
<product-config zc.z3monitor>
port $${:monitor-port}
</product-config>
address = 8080
monitor-port = 8089
[instance0]
recipe = zc.recipe.macro
result-recipe = zc.recipe.macro:test1
macro = instance-macro
Result::
[instance0]
address = 8080
application = application
monitor-port = 8089
recipe = zc.recipe.macro:test1
result-sections = instance0
zope.conf =
<eventlog>
<logfile>
path /var/log/myapp/instance0-z3.log
</logfile>
</eventlog>
<product-config zc.z3monitor>
port 8089
</product-config>
These can be overridden by parameter sections.
Buildout::
[buildout]
parts = invoker
[macro]
output = $${:subject} $${:verb} on $${:__name__}
subject = I
verb = was invoked
[zero-parameters]
verb = drive
[invoker]
recipe = zc.recipe.macro
macro = macro
targets = zero:zero-parameters
Result::
[zero]
output = I drive on zero
verb = drive
subject = I
Edge Case Tests
---------------
It used to cause errors when default macro variables referred to one another
and the invoker targetted itself. This test will prevent regression. The bug
is dependant on the iteration order of a dictionaryish object, and so a
subclass will be created that returns it's keys in a particular order.
>>> import zc.recipe.macro.recipe
>>> class OrderedOptions(zc.buildout.buildout.Options):
... def keys(self):
... return list(
... reversed(sorted(zc.buildout.buildout.Options.keys(self))))
>>> zc.recipe.macro.recipe.Options = OrderedOptions
>>> buildout = setupBuildout(sample_buildout, "buildout.cfg",
... """
... [buildout]
... parts = instance0
...
... [instance-macro]
... address = 8080
... application = application
... monitor-port = 8089
... zope.conf =
... <eventlog>
... <logfile>
... path /var/log/myapp/$${:__name__}-z3.log
... </logfile>
... </eventlog>
... <product-config zc.z3monitor>
... port $${:monitor-port}
... </product-config>
...
... [instance0]
... recipe = zc.recipe.macro
... result-recipe = zc.recipe.macro:test1
... macro = instance-macro
... """)
>>> buildout.install([])
>>> buildout_pprint(buildout)
{'buildout': {...},
'instance-macro': {'address': '8080',
'application': 'application',
'monitor-port': '8089',
'zope.conf': '
<eventlog>
<logfile>
path /var/log/myapp/$${:__name__}-z3.log
</logfile>
</eventlog>
<product-config zc.z3monitor>
port $${:monitor-port}
</product-config>'},
'instance0': {'address': '8080',
'application': 'application',
'monitor-port': '8089',
'recipe': 'zc.recipe.macro:test1',
'result-sections': 'instance0',
'zope.conf': '
<eventlog>
<logfile>
path /var/log/myapp/instance0-z3.log
</logfile>
</eventlog>
<product-config zc.z3monitor>
port 8089
</product-config>'}}
>>> zc.recipe.macro.recipe.Options = zc.buildout.buildout.Options
| zc.recipe.macro | /zc.recipe.macro-1.3.0.tar.gz/zc.recipe.macro-1.3.0/src/zc/recipe/macro/README.txt | README.txt |
import re
import os.path
import zc.buildout.buildout
import pprint
from zc.buildout.buildout import Options
def evaluate_macro(buildout, name, macro, input, recipe):
def replace_match(match):
key = match.groups()[0]
if key in buildout[input]:
ret_val = '${%s:%s}' % (input, key)
if key in new_macro:
new_macro[key] = ret_val
else:
ret_val = '${%s:%s}' % (name, key)
return ret_val
c_re = re.compile(r'\$\$\{:([^}]*)\}')
new_macro = dict(macro)
for key, value in dict(macro).iteritems():
if new_macro.get(key, '') == value:
new_macro[key] = c_re.sub(
replace_match, value.replace('$${:__name__}', name))
if recipe:
new_macro['recipe'] = recipe
return new_macro
def parse_target(invoker, target):
input_section = invoker
if ':' in target:
target, input_section = target.split(':')
return target, input_section
def get_recipe(invocation_section, macro_section, parameter_section, default=None):
if 'recipe' in parameter_section:
return parameter_section['recipe']
elif 'result-recipe' in parameter_section:
return parameter_section['result-recipe']
elif 'result-recipe' in invocation_section:
return invocation_section['result-recipe']
elif 'recipe' in macro_section:
return macro_section['recipe']
else:
return default
def Macro(buildout, name, options):
del options['recipe']
macro = options.pop('macro').strip()
targets = options.pop('targets', name).strip().split()
macro_summation = {}
macro_summation.update(dict(buildout[macro]))
new_sections = []
for output, input in (parse_target(name, target) for target in targets):
recipe = get_recipe(options, macro_summation, buildout[input])
new_sections.append(output)
opt = Options(
buildout,
output,
evaluate_macro(
buildout, output, macro_summation, input, recipe))
opt.pop('result-recipe', '')
if output == name:
# If we're targetting the invoker
options._raw.update(opt._raw)
options['recipe'] = options.get('recipe', 'zc.recipe.macro:empty')
else:
# If we're targetting some other section
buildout._raw[output] = opt._raw
options.pop('result-recipe', '')
#Make a result-sections variable holding the sections that are modified
if new_sections:
options['result-sections'] = ' '.join(new_sections)
#Make sure we have a recipe for this part, even if it is only the empty
#one.
if not options.get('recipe', None):
options['recipe'] = 'zc.recipe.macro:empty'
#Install the resulting recipe
reqs, entry = zc.buildout.buildout._recipe(options._data)
recipe_class = zc.buildout.buildout._install_and_load(
reqs, 'zc.buildout', entry, buildout)
__doing__ = 'Initializing part %s.', name
part = recipe_class(buildout, name, options)
return part
class Empty(object):
def __init__(self, buildout, name, options):
self.buildout, self.name, self.options = buildout, name, options
def install(self):
return []
update = install
def uninstall(self):
pass
class Test(object):
def __init__(self, buildout, name, options):
self.name = name
self.buildout = buildout
self.options = options
def install(self):
return []
update = install
def uninstall(self):
pass | zc.recipe.macro | /zc.recipe.macro-1.3.0.tar.gz/zc.recipe.macro-1.3.0/src/zc/recipe/macro/recipe.py | recipe.py |
*****************************************
ZC Buildout recipe for Redhat RC scripts
*****************************************
This package provides a zc.buildout recipe for creating Red-Hat Linux
compatible run-control scripts.
.. contents::
Changes
*******
1.4.2 (2012-12-20)
==================
Fixed: Errors were raised if stopping a run script failed during
uninstall. This could cause a buildout to be wedged, because
you couldn't uninstall a broken/missing run script.
1.4.1 (2012-08-31)
==================
Fixed: Processes weren't started on update.
In a perfect world, this wouldn't be necessary, as in the
update case, the process would already be running, however,
it's helpful to make sure the process is running by trying to
start it.
1.4.0 (2012-05-18)
==================
- Added optional process-management support. If requested, then run
scripts are run as part of install and uninstall.
- Fixed: missing **test** dependency on ``zope.testing``
1.3.0 (2010/05/26)
==================
New Features
------------
- A new independent-processes option causes multiple processes to be
restarted independently, rather then stoping all of the and the
starting all of them.
Bugs Fixed
----------
- Generated run scripts had trailing whitespace.
1.2.0 (2009/04/06)
==================
displays the name of the script being run
for each script when it is started, stopped, or restarted
1.1.0 (2008/02/01)
==================
Use the deployment name option (as provided by zc.recipe.deployment
0.6.0 and later) if present when generating script names.
Use the deployment rc-directory as the destination when a deployment
is used.
Use /sbin/chkconfig rather than chkconfig, as I'm told it is always in
that location and rarely in anyone's path. :)
1.0.0 (2008/01/15)
==================
Initial public release
| zc.recipe.rhrc | /zc.recipe.rhrc-1.4.2.tar.gz/zc.recipe.rhrc-1.4.2/README.txt | README.txt |
Create Red-Hat Linux (chkconfig) rc scripts
===========================================
The zc.recipes.rhrc recipe creates Red-Hat Linux (chkconfig) rc
scripts. It can create individual rc scripts, as well as combined rc
scripts that start multiple applications.
The recipe has a parts option that takes the names of sections that
define run scripts. They should either:
- Define a run-script option that contains a one-line shell script, or
- The file /etc/init.d/PART should exist, where PART is the part name.
A simple example will, hopefully make this clearer.
>>> demo = tmpdir('demo')
>>> write('buildout.cfg',
... """
... [buildout]
... parts = zoperc
...
... [zoperc]
... recipe = zc.recipe.rhrc
... parts = zope
... dest = %(dest)s
...
... [zope]
... run-script = /opt/zope/bin/zopectl -C /etc/zope.conf
... """ % dict(dest=demo))
Normally the recipe writes scripts to /etc/init.d. We can override
the destination, which we've done here, using a demonstration
directory. We specified a that it should get run-script source from
the zope section. Here the zope section is simply a configuration
section with a run-script option set directly, but it could have been
a part with a run-script option computed from the recipe.
If we run the buildout:
>>> print system('bin/buildout'),
Installing zoperc.
We'll get a zoperc script in our demo directory:
>>> ls(demo)
- zoperc
>>> cat(demo, 'zoperc')
#!/bin/sh
<BLANKLINE>
# This script is for adminstrator convenience. It should
# NOT be installed as a system startup script!
<BLANKLINE>
<BLANKLINE>
case $1 in
stop)
<BLANKLINE>
/opt/zope/bin/zopectl -C /etc/zope.conf $*
<BLANKLINE>
;;
restart)
<BLANKLINE>
${0} stop
sleep 1
${0} start
<BLANKLINE>
;;
*)
<BLANKLINE>
/opt/zope/bin/zopectl -C /etc/zope.conf $*
<BLANKLINE>
;;
esac
<BLANKLINE>
There are a couple of things to note about the generated script:
- It uses $* to pass arguments, so arguments can't be quoted. This is
OK because the arguments will be simple verbs like start and stop.
- It includes a comment saying that the script shouldn't be used as a
system startup script.
For the script to be used for system startup, we need to specify
run-level information. We can to that using the chkconfig option:
>>> write('buildout.cfg',
... """
... [buildout]
... parts = zoperc
...
... [zoperc]
... recipe = zc.recipe.rhrc
... parts = zope
... dest = %(dest)s
... chkconfig = 345 90 10
... chkconfigcommand = echo
...
... [zope]
... run-script = /opt/zope/bin/zopectl -C /etc/zope.conf
... """ % dict(dest=demo))
Here we included a chkconfig option saying that Zope should be started
at run levels 3, 4, and 5 and that it's start and stop ordered should
be 90 and 10.
For demonstration purposes, we don't *really* want to run chkconfig,
so we use the chkconfigcommand option to tell the recipe to run echo
instead.
>>> print system('bin/buildout'),
Uninstalling zoperc.
Running uninstall recipe.
Installing zoperc.
--add zoperc
Now the script contains a chkconfig comment:
>>> cat(demo, 'zoperc')
#!/bin/sh
<BLANKLINE>
# the next line is for chkconfig
# chkconfig: 345 90 10
# description: please, please work
<BLANKLINE>
<BLANKLINE>
case $1 in
stop)
<BLANKLINE>
/opt/zope/bin/zopectl -C /etc/zope.conf $* \
</dev/null
<BLANKLINE>
;;
restart)
<BLANKLINE>
${0} stop
sleep 1
${0} start
<BLANKLINE>
;;
*)
<BLANKLINE>
/opt/zope/bin/zopectl -C /etc/zope.conf $* \
</dev/null
<BLANKLINE>
;;
esac
<BLANKLINE>
We can specify a user that the script should be run as:
>>> write('buildout.cfg',
... """
... [buildout]
... parts = zoperc
...
... [zoperc]
... recipe = zc.recipe.rhrc
... parts = zope
... dest = %(dest)s
... chkconfig = 345 90 10
... chkconfigcommand = echo
... user = zope
...
... [zope]
... run-script = /opt/zope/bin/zopectl -C /etc/zope.conf
... """ % dict(dest=demo))
>>> print system('bin/buildout'),
Uninstalling zoperc.
Running uninstall recipe.
--del zoperc
Installing zoperc.
--add zoperc
Note the --del output. If we hadn't set the chkconfigcommand to echo,
then chkconfig --del would have been run on the zoperc script.
>>> cat(demo, 'zoperc')
#!/bin/sh
<BLANKLINE>
# the next line is for chkconfig
# chkconfig: 345 90 10
# description: please, please work
<BLANKLINE>
<BLANKLINE>
if [ $(whoami) != "root" ]; then
echo "You must be root."
exit 1
fi
<BLANKLINE>
case $1 in
stop)
<BLANKLINE>
su zope -c \
"/opt/zope/bin/zopectl -C /etc/zope.conf $*" \
</dev/null
<BLANKLINE>
;;
restart)
<BLANKLINE>
${0} stop
sleep 1
${0} start
<BLANKLINE>
;;
*)
<BLANKLINE>
su zope -c \
"/opt/zope/bin/zopectl -C /etc/zope.conf $*" \
</dev/null
<BLANKLINE>
;;
esac
<BLANKLINE>
Note that now the su command is used to run the script. Because the
script is included in double quotes, it can't contain double
quotes. (The recipe makes no attempt to escape double quotes.)
Also note that now the script must be run as root, so the generated
script checks that root is running it.
If we say the user is root:
>>> write('buildout.cfg',
... """
... [buildout]
... parts = zoperc
...
... [zoperc]
... recipe = zc.recipe.rhrc
... parts = zope
... dest = %(dest)s
... chkconfig = 345 90 10
... chkconfigcommand = echo
... user = root
...
... [zope]
... run-script = /opt/zope/bin/zopectl -C /etc/zope.conf
... """ % dict(dest=demo))
Then the generated script won't su, but it will still check that root
is running it:
>>> print system('bin/buildout'),
Uninstalling zoperc.
Running uninstall recipe.
--del zoperc
Installing zoperc.
--add zoperc
>>> cat(demo, 'zoperc')
#!/bin/sh
<BLANKLINE>
# the next line is for chkconfig
# chkconfig: 345 90 10
# description: please, please work
<BLANKLINE>
<BLANKLINE>
if [ $(whoami) != "root" ]; then
echo "You must be root."
exit 1
fi
<BLANKLINE>
case $1 in
stop)
<BLANKLINE>
/opt/zope/bin/zopectl -C /etc/zope.conf $* \
</dev/null
<BLANKLINE>
;;
restart)
<BLANKLINE>
${0} stop
sleep 1
${0} start
<BLANKLINE>
;;
*)
<BLANKLINE>
/opt/zope/bin/zopectl -C /etc/zope.conf $* \
</dev/null
<BLANKLINE>
;;
esac
<BLANKLINE>
A part that defines a run script can also define environment-variable
settings to be used by the rc script by supplying an env option:
>>> write('buildout.cfg',
... """
... [buildout]
... parts = zoperc
...
... [zoperc]
... recipe = zc.recipe.rhrc
... parts = zope
... dest = %(dest)s
... chkconfig = 345 90 10
... chkconfigcommand = echo
... user = zope
...
... [zope]
... run-script = /opt/zope/bin/zopectl -C /etc/zope.conf
... env = LD_LIBRARY_PATH=/opt/foolib
... """ % dict(dest=demo))
>>> print system('bin/buildout'),
Uninstalling zoperc.
Running uninstall recipe.
--del zoperc
Installing zoperc.
--add zoperc
>>> cat(demo, 'zoperc')
#!/bin/sh
<BLANKLINE>
# the next line is for chkconfig
# chkconfig: 345 90 10
# description: please, please work
<BLANKLINE>
<BLANKLINE>
if [ $(whoami) != "root" ]; then
echo "You must be root."
exit 1
fi
<BLANKLINE>
case $1 in
stop)
<BLANKLINE>
LD_LIBRARY_PATH=/opt/foolib \
su zope -c \
"/opt/zope/bin/zopectl -C /etc/zope.conf $*" \
</dev/null
<BLANKLINE>
;;
restart)
<BLANKLINE>
${0} stop
sleep 1
${0} start
<BLANKLINE>
;;
*)
<BLANKLINE>
LD_LIBRARY_PATH=/opt/foolib \
su zope -c \
"/opt/zope/bin/zopectl -C /etc/zope.conf $*" \
</dev/null
<BLANKLINE>
;;
esac
<BLANKLINE>
Working with existing control scripts
-------------------------------------
In the example above, we generated a script based on a command line.
If we have a part that creates a control script on it's own, then it
can ommit the run-script option and it's already created run script
will be used. Let's create a run script ourselves:
>>> write(demo, 'zope', '/opt/zope/bin/zopectl -C /etc/zope.conf $*')
Now we can remove the run-script option from the Zope section:
>>> write('buildout.cfg',
... """
... [buildout]
... parts = zoperc
...
... [zoperc]
... recipe = zc.recipe.rhrc
... parts = zope
... dest = %(dest)s
... chkconfig = 345 90 10
... chkconfigcommand = echo
... user = zope
...
... [zope]
... env = LD_LIBRARY_PATH=/opt/foolib
... """ % dict(dest=demo))
>>> print system('bin/buildout'),
Uninstalling zoperc.
Running uninstall recipe.
--del zoperc
Installing zoperc.
--add zoperc
>>> cat(demo, 'zoperc')
#!/bin/sh
<BLANKLINE>
# the next line is for chkconfig
# chkconfig: 345 90 10
# description: please, please work
<BLANKLINE>
<BLANKLINE>
if [ $(whoami) != "root" ]; then
echo "You must be root."
exit 1
fi
<BLANKLINE>
case $1 in
stop)
<BLANKLINE>
echo zope:
/demo/zope "$@" \
</dev/null
<BLANKLINE>
;;
restart)
<BLANKLINE>
${0} stop
sleep 1
${0} start
<BLANKLINE>
;;
*)
<BLANKLINE>
echo zope:
/demo/zope "$@" \
</dev/null
<BLANKLINE>
;;
esac
<BLANKLINE>
Here we just invoke the existing script. Note that don't pay any
reflect the env or user options in the script. When an existing
script is used, it is assumed to be complete.
>>> import os
>>> os.remove(join(demo, 'zope'))
Multiple processes
------------------
Sometimes, you need to start multiple processes. You can specify
multiple parts. For example, suppose we wanted to start 2 Zope
instances:
>>> write('buildout.cfg',
... """
... [buildout]
... parts = zoperc
...
... [zoperc]
... recipe = zc.recipe.rhrc
... parts = instance1 instance2
... dest = %(dest)s
... chkconfig = 345 90 10
... chkconfigcommand = echo
... user = zope
...
... [instance1]
... run-script = /opt/zope/bin/zopectl -C /etc/instance1.conf
... env = LD_LIBRARY_PATH=/opt/foolib
...
... [instance2]
... """ % dict(dest=demo))
>>> write(demo, 'instance2', '')
Note that for instance 2, we are arranging for the script to pre-exist.
>>> print system('bin/buildout'),
Uninstalling zoperc.
Running uninstall recipe.
--del zoperc
Installing zoperc.
--add zoperc
>>> cat(demo, 'zoperc')
#!/bin/sh
<BLANKLINE>
# the next line is for chkconfig
# chkconfig: 345 90 10
# description: please, please work
<BLANKLINE>
<BLANKLINE>
if [ $(whoami) != "root" ]; then
echo "You must be root."
exit 1
fi
<BLANKLINE>
case $1 in
stop)
<BLANKLINE>
echo instance2:
/demo/instance2 "$@" \
</dev/null
<BLANKLINE>
LD_LIBRARY_PATH=/opt/foolib \
su zope -c \
"/opt/zope/bin/zopectl -C /etc/instance1.conf $*" \
</dev/null
<BLANKLINE>
;;
restart)
<BLANKLINE>
${0} stop
sleep 1
${0} start
<BLANKLINE>
;;
*)
<BLANKLINE>
LD_LIBRARY_PATH=/opt/foolib \
su zope -c \
"/opt/zope/bin/zopectl -C /etc/instance1.conf $*" \
</dev/null
<BLANKLINE>
echo instance2:
/demo/instance2 "$@" \
</dev/null
<BLANKLINE>
;;
esac
<BLANKLINE>
Now the rc script starts both instances. Note that it stops them in
reverese order. This isn't so important in a case like this, but
would be more important if a later script depended on an earlier one.
In addition to the zoperc script, we got scripts for the instance with
the run-script option:
>>> ls(demo)
- instance2
- zoperc
- zoperc-instance1
>>> cat(demo, 'zoperc-instance1')
#!/bin/sh
<BLANKLINE>
# This script is for adminstrator convenience. It should
# NOT be installed as a system startup script!
<BLANKLINE>
<BLANKLINE>
if [ $(whoami) != "root" ]; then
echo "You must be root."
exit 1
fi
<BLANKLINE>
case $1 in
stop)
<BLANKLINE>
LD_LIBRARY_PATH=/opt/foolib \
su zope -c \
"/opt/zope/bin/zopectl -C /etc/instance1.conf $*"
<BLANKLINE>
;;
restart)
<BLANKLINE>
${0} stop
sleep 1
${0} start
<BLANKLINE>
;;
*)
<BLANKLINE>
LD_LIBRARY_PATH=/opt/foolib \
su zope -c \
"/opt/zope/bin/zopectl -C /etc/instance1.conf $*"
<BLANKLINE>
;;
esac
<BLANKLINE>
The individual scripts don't have chkconfig information.
Independent processes
---------------------
Normally, processes are assumed to be dependent and are started in
order, stopped in referese order, and, on restart, are all stopped and
then all started.
If the independent-processes option is used, then the generated master
run script will treat the processes as independent and restart
processed individually. With lots of independent processes, this can
reduce the amount of time individual processes are down.
>>> write('buildout.cfg',
... """
... [buildout]
... parts = zoperc
...
... [zoperc]
... recipe = zc.recipe.rhrc
... parts = instance1 instance2
... dest = %(dest)s
... chkconfig = 345 90 10
... chkconfigcommand = echo
... user = zope
... independent-processes = true
...
... [instance1]
... run-script = /opt/zope/bin/zopectl -C /etc/instance1.conf
... env = LD_LIBRARY_PATH=/opt/foolib
...
... [instance2]
... """ % dict(dest=demo))
>>> print system('bin/buildout'),
Uninstalling zoperc.
Running uninstall recipe.
--del zoperc
Installing zoperc.
--add zoperc
>>> cat(demo, 'zoperc')
#!/bin/sh
<BLANKLINE>
# the next line is for chkconfig
# chkconfig: 345 90 10
# description: please, please work
<BLANKLINE>
<BLANKLINE>
if [ $(whoami) != "root" ]; then
echo "You must be root."
exit 1
fi
<BLANKLINE>
LD_LIBRARY_PATH=/opt/foolib \
su zope -c \
"/opt/zope/bin/zopectl -C /etc/instance1.conf $*" \
</dev/null
<BLANKLINE>
echo instance2:
/demo/instance2 "$@" \
</dev/null
.. check validation
>>> write('buildout.cfg',
... """
... [buildout]
... parts = zoperc
...
... [zoperc]
... recipe = zc.recipe.rhrc
... parts = instance1 instance2
... dest = %(dest)s
... chkconfig = 345 90 10
... chkconfigcommand = echo
... user = zope
... independent-processes = yes
...
... [instance1]
... run-script = /opt/zope/bin/zopectl -C /etc/instance1.conf
... env = LD_LIBRARY_PATH=/opt/foolib
...
... [instance2]
... """ % dict(dest=demo))
>>> print system('bin/buildout'),
zc.recipe.rhrc: Invalid value for independent-processes. Use 'true' or 'false'
While:
Installing.
Getting section zoperc.
Initializing part zoperc.
Error: Invalid value for independent-processes: yes
Deployments
-----------
The zc.recipe.rhrc recipe is designed to work with the
zc.recipe.deployment recipe. You can specify the name of a deployment
section. If a deployment section is specified then:
- the deployment name will be used for the rc scripts
- the user from the deployment section will be used if a user isn't
specified in the rc script's own section.
- the rc-directory option from the deployment will be used if
destination isn't specified.
>>> write('buildout.cfg',
... """
... [buildout]
... parts = zoperc
...
... [deployment]
... name = acme
... user = acme
... rc-directory = %(dest)s
...
... [zoperc]
... recipe = zc.recipe.rhrc
... parts = instance1 instance2
... chkconfig = 345 90 10
... chkconfigcommand = echo
... deployment = deployment
...
... [instance1]
... run-script = /opt/zope/bin/zopectl -C /etc/instance1.conf
... env = LD_LIBRARY_PATH=/opt/foolib
...
... [instance2]
... """ % dict(dest=demo))
If a deployment is used, then any existing scripts must be
prefixed with the deployment name. We'll rename the instance2 script
to reflect that:
>>> os.rename(join(demo, 'instance2'), join(demo, 'acme-instance2'))
>>> print system('bin/buildout'),
Uninstalling zoperc.
Running uninstall recipe.
--del zoperc
Installing zoperc.
--add acme
>>> ls(demo)
- acme
- acme-instance1
- acme-instance2
>>> cat(demo, 'acme')
#!/bin/sh
<BLANKLINE>
# the next line is for chkconfig
# chkconfig: 345 90 10
# description: please, please work
<BLANKLINE>
<BLANKLINE>
if [ $(whoami) != "root" ]; then
echo "You must be root."
exit 1
fi
<BLANKLINE>
case $1 in
stop)
<BLANKLINE>
echo acme-instance2:
/demo/acme-instance2 "$@" \
</dev/null
<BLANKLINE>
LD_LIBRARY_PATH=/opt/foolib \
su acme -c \
"/opt/zope/bin/zopectl -C /etc/instance1.conf $*" \
</dev/null
<BLANKLINE>
;;
restart)
<BLANKLINE>
${0} stop
sleep 1
${0} start
<BLANKLINE>
;;
*)
<BLANKLINE>
LD_LIBRARY_PATH=/opt/foolib \
su acme -c \
"/opt/zope/bin/zopectl -C /etc/instance1.conf $*" \
</dev/null
<BLANKLINE>
echo acme-instance2:
/demo/acme-instance2 "$@" \
</dev/null
<BLANKLINE>
;;
esac
<BLANKLINE>
Edge case, when we remove the part, we uninstall acme:
>>> write('buildout.cfg',
... """
... [buildout]
... parts =
... """)
>>> print system('bin/buildout'),
Uninstalling zoperc.
Running uninstall recipe.
--del acme
Process Management
==================
Normally, the recipe doesn't start and stop processes. If we want it
to, we can use the process-management option with a 'true' value.
>>> write('buildout.cfg',
... """
... [buildout]
... parts = zoperc
...
... [zoperc]
... recipe = zc.recipe.rhrc
... parts = zope
... dest = %(dest)s
... process-management = true
...
... [zope]
... run-script = echo zope
... """ % dict(dest=demo))
When the part is installed, the process is started:
>>> print system('bin/buildout'),
Installing zoperc.
zope start
It also gets started when the part updates. This is just to make sure
it is running.
>>> print system('bin/buildout'),
Updating zoperc.
zope start
If we update the part, then when the part is uninstalled and
reinstalled, the process will be stopped and started. We'll often
force this adding a digest option that exists solely to force a
reinstall, typically because something else in the buildout has
changed.
>>> write('buildout.cfg',
... """
... [buildout]
... parts = zoperc
...
... [zoperc]
... recipe = zc.recipe.rhrc
... parts = zope
... dest = %(dest)s
... process-management = true
... digest = 1
...
... [zope]
... run-script = echo zope
... """ % dict(dest=demo))
>>> print system('bin/buildout'),
Uninstalling zoperc.
Running uninstall recipe.
zope stop
Installing zoperc.
zope start
>>> print system('bin/buildout buildout:parts='),
Uninstalling zoperc.
Running uninstall recipe.
zope stop
.. make sure it works with multiple parts
>>> write('buildout.cfg',
... """
... [buildout]
... parts = zoperc
...
... [zoperc]
... recipe = zc.recipe.rhrc
... parts = zope zeo
... dest = %(dest)s
... process-management = true
...
... [zeo]
... run-script = echo zeo
...
... [zope]
... run-script = echo zope
... """ % dict(dest=demo))
>>> print system('bin/buildout'),
Installing zoperc.
zope start
zeo start
>>> print system('bin/buildout buildout:parts='),
Uninstalling zoperc.
Running uninstall recipe.
zeo stop
zope stop
.. make sure it works even if run script is missing
>>> write(demo, 'zeo', '#!/bin/sh\necho zeo\n')
>>> os.chmod(join(demo, 'zeo'), 0755)
>>> write('buildout.cfg',
... """
... [buildout]
... parts = zoperc
...
... [zoperc]
... recipe = zc.recipe.rhrc
... parts = zeo
... dest = %(dest)s
... process-management = true
... [zeo]
... """ % dict(dest=demo))
>>> print system('bin/buildout'),
Installing zoperc.
zeo:
zeo
>>> remove(demo, 'zeo')
>>> print system('bin/buildout buildout:parts=') # doctest: +ELLIPSIS
Uninstalling zoperc.
Running uninstall recipe.
zeo:
...
Regression Tests
================
Exception formatting bug
------------------------
If we do not provide a runscript, we get an exception (bug was: improperly
formatted exception string, contained literal '%s'):
>>> write('buildout.cfg',
... """
... [buildout]
... parts = zoperc
...
... [zoperc]
... recipe = zc.recipe.rhrc
... parts = zope
... dest = %(dest)s
...
... [zope]
... """ % dict(dest=demo))
>>> print system('bin/buildout'),
Installing zoperc.
zc.recipe.rhrc: Part zope doesn't define run-script and /demo/zope doesn't exist.
While:
Installing zoperc.
Error: No script for zope
| zc.recipe.rhrc | /zc.recipe.rhrc-1.4.2.tar.gz/zc.recipe.rhrc-1.4.2/src/zc/recipe/rhrc/README.txt | README.txt |
import logging
import os
import shutil
import subprocess
import stat
import zc.buildout
logger = logging.getLogger('zc.recipe.rhrc')
class Recipe:
def __init__(self, buildout, name, options):
self.name, self.options = name, options
deployment = self.deployment = options.get('deployment')
if deployment:
options['deployment-name'] = buildout[deployment].get('name',
deployment)
if 'user' not in options:
options['user'] = buildout[deployment].get('user', '')
options['dest'] = self.options.get(
'dest', buildout[deployment]['rc-directory'])
else:
options['dest'] = self.options.get('dest', '/etc/init.d')
options['scripts'] = '\n'.join([buildout[part].get('run-script', '')
for part in options['parts'].split()
])
options['envs'] = '\n'.join([buildout[part].get('env', '')
for part in options['parts'].split()
])
independent = options.get('independent-processes')
if independent:
if independent not in ('true', 'false'):
logger.error(
"Invalid value for independent-processes. "
" Use 'true' or 'false'")
raise zc.buildout.UserError(
'Invalid value for independent-processes:', independent)
if options.get('process-management', 'false') not in (
'true', 'false'):
raise zc.buildout.UserError('Invalid process-management option: %r'
% (options['process-management']))
def install(self):
options = self.options
name = options.get('deployment-name', self.name)
parts = options['parts'].split()
if not parts:
return
scripts = options['scripts'].split('\n')
chkconfig = self.options.get('chkconfig')
user = options.get('user', '')
if user == 'root':
user = '' # no need to su to root
envs = options['envs'].split('\n')
created = []
start = self.options.get('process-management')
try:
if len(scripts) == 1:
# No mongo script
script = scripts[0]
if script:
if user:
script = 'su %s -c \\\n "%s $*"' % (user, script)
else:
script += ' $*'
env = envs[0]
if env:
script = env + ' \\\n ' + script
else:
script = self.no_script(parts[0])
if chkconfig:
script += ' \\\n </dev/null'
self.output(chkconfig, script, name, created,
start=start)
else:
cooked = []
for part, env, script in zip(parts, envs, scripts):
if script:
if user:
script = 'su %s -c \\\n "%s $*"' % (
user, script)
else:
script += ' $*'
if env:
script = env + ' \\\n ' + script
self.output('', script, name+'-'+part, created)
else:
script = self.no_script(part)
cooked.append(script)
if chkconfig:
cooked = [s + ' \\\n </dev/null'
for s in cooked]
script = '\n\n '.join(cooked)
cooked.reverse()
rscript = '\n\n '.join(cooked)
self.output(
chkconfig, script, name, created, rscript,
independent=options.get('independent-processes') == 'true',
start=start,
)
return created
except:
[os.remove(f) for f in created]
raise
def no_script(self, part):
options = self.options
name = options.get('deployment-name', self.name)
if self.deployment:
path = os.path.join(options['dest'], name+'-'+part)
script = 'echo %s:\n%s' % ( name+'-'+part, path)
else:
path = os.path.join(options['dest'], part)
script = 'echo %s:\n%s' % (part, path)
if not os.path.exists(path):
logger.error("Part %s doesn't define run-script "
"and %s doesn't exist."
% (part, path))
raise zc.buildout.UserError("No script for", part)
return script + ' "$@"'
def output(self, chkconfig, script, ctl, created,
rscript=None, independent=False, start=False):
if independent:
rc = independent_template % dict(
rootcheck = self.options.get('user') and rootcheck or '',
CHKCONFIG = (chkconfig
and (chkconfig_template % chkconfig)
or non_chkconfig_template),
CTL_SCRIPT = script,
)
else:
rc = rc_template % dict(
rootcheck = self.options.get('user') and rootcheck or '',
CHKCONFIG = (chkconfig
and (chkconfig_template % chkconfig)
or non_chkconfig_template),
CTL_SCRIPT = script,
CTL_SCRIPT_R = rscript or script,
)
dest = self.options.get('dest', '/etc/init.d')
ctlpath = os.path.join(dest, ctl)
open(ctlpath, 'w').write(rc)
created.append(ctlpath)
os.chmod(ctlpath,
os.stat(ctlpath).st_mode | stat.S_IEXEC | stat.S_IXGRP)
if chkconfig:
chkconfigcommand = self.options.get('chkconfigcommand',
'/sbin/chkconfig')
os.system(chkconfigcommand+' --add '+ctl)
if start and subprocess.call([ctlpath, 'start']):
raise RuntimeError("%s start failed" % ctlpath)
update = install
def uninstall(name, options):
name = options.get('deployment-name', name)
if options.get('process-management') == 'true':
ctlpath = os.path.join(options.get('dest', '/etc/init.d'), name)
subprocess.call([ctlpath, 'stop'])
if options.get('chkconfig'):
chkconfigcommand = options.get('chkconfigcommand', '/sbin/chkconfig')
os.system(chkconfigcommand+' --del '+name)
chkconfig_template = '''\
# the next line is for chkconfig
# chkconfig: %s
# description: please, please work
'''
non_chkconfig_template = '''\
# This script is for adminstrator convenience. It should
# NOT be installed as a system startup script!
'''
rootcheck = """
if [ $(whoami) != "root" ]; then
echo "You must be root."
exit 1
fi
"""
rc_template = """#!/bin/sh
%(CHKCONFIG)s
%(rootcheck)s
case $1 in
stop)
%(CTL_SCRIPT_R)s
;;
restart)
${0} stop
sleep 1
${0} start
;;
*)
%(CTL_SCRIPT)s
;;
esac
"""
independent_template = """#!/bin/sh
%(CHKCONFIG)s
%(rootcheck)s
%(CTL_SCRIPT)s
""" | zc.recipe.rhrc | /zc.recipe.rhrc-1.4.2.tar.gz/zc.recipe.rhrc-1.4.2/src/zc/recipe/rhrc/__init__.py | __init__.py |
===========================
Deployment-specific scripts
===========================
Many deployments provide scripts that tie the configurations into the
software. This is often done to make it easier to work with specific
deployments of the software.
The conventional Unix file hierarchy doesn't really provide a good
shared place for such scripts; the zc.recipe.deployment:script recipe
generates these scripts in the deployment's bin-directory, but we'd
rather have the resulting scripts associated with the deployment itself.
The options for the recipe are the same as those for the
zc.recipe.egg:script recipe, with the addition of a required deployment
setting. The etc-directory from the deployment is used instead of the
buildout's bin-directory. This allows deployment-specific information
to be embedded in the script via the initialization setting.
Let's take a look at a simple case. We'll need a package with a
console_script entry point:
>>> write('setup.py', '''\
... from setuptools import setup
... setup(
... name="testpkg",
... package_dir={"": "src"},
... py_modules=["testmodule"],
... zip_safe=False,
... entry_points={
... "console_scripts": [
... "myscript=testmodule:main",
... ],
... },
... )
... ''')
>>> mkdir('src')
>>> write('src', 'testmodule.py', '''\
... some_setting = "42"
... def main():
... print some_setting
... ''')
>>> write('buildout.cfg',
... '''
... [buildout]
... develop = .
... parts = somescript
...
... [mydep]
... recipe = zc.recipe.deployment
... prefix = %s
... user = %s
... etc-user = %s
...
... [somescript]
... recipe = zc.recipe.script
... deployment = mydep
... eggs = testpkg
... scripts = myscript
... initialization =
... import testmodule
... testmodule.some_setting = "24"
... ''' % (sample_buildout, user, user))
>>> print system(join('bin', 'buildout')), # doctest: +NORMALIZE_WHITESPACE
Develop: 'PREFIX/.'
Installing mydep.
zc.recipe.deployment:
Creating 'PREFIX/etc/mydep',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/cache/mydep',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/lib/mydep',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/log/mydep',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/var/run/mydep',
mode 750, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/etc/cron.d',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/etc/init.d',
mode 755, user 'USER', group 'GROUP'
zc.recipe.deployment:
Creating 'PREFIX/etc/logrotate.d',
mode 755, user 'USER', group 'GROUP'
Installing somescript.
Generated script 'PREFIX/etc/mydep/myscript'.
>>> print ls("etc/mydep")
drwxr-xr-x USER GROUP etc/mydep
>>> cat("etc/mydep/myscript") # doctest: +NORMALIZE_WHITESPACE
#!/usr/bin/python
<BLANKLINE>
import sys
sys.path[0:0] = [
'PREFIX/src',
]
<BLANKLINE>
import testmodule
testmodule.some_setting = "24"
<BLANKLINE>
import testmodule
<BLANKLINE>
if __name__ == '__main__':
sys.exit(testmodule.main())
| zc.recipe.script | /zc.recipe.script-1.0.2.tar.gz/zc.recipe.script-1.0.2/src/zc/recipe/script/README.txt | README.txt |
Test-Runner Recipe
==================
The test-runner recipe, zc.recipe.testrunner, creates a test runner
for a project.
The test-runner recipe has several options:
eggs
The eggs option specified a list of eggs to test given as one ore
more setuptools requirement strings. Each string must be given on
a separate line.
script
The script option gives the name of the script to generate, in the
buildout bin directory. Of the option isn't used, the part name
will be used.
extra-paths
One or more extra paths to include in the generated test script.
defaults
The defaults option lets you specify testrunner default
options. These are specified as Python source for an expression
yielding a list, typically a list literal.
working-directory
The working-directory option lets to specify a directory where the
tests will run. The testrunner will change to this directory when
run. If the working directory is the empty string or not specified
at all, the recipe will create a working directory among the parts.
environment
A set of environment variables that should be exported before
starting the tests.
initialization
Provide initialization code to run before running tests.
relative-paths
Use egg, test, and working-directory paths relative to the test script.
(Note that, at this time, due to limitations in the Zope test runner, the
distributions cannot be zip files. TODO: Fix the test runner!)
To illustrate this, we'll create a pair of projects in our sample
buildout:
>>> mkdir(sample_buildout, 'demo')
>>> mkdir(sample_buildout, 'demo', 'demo')
>>> write(sample_buildout, 'demo', 'demo', '__init__.py', '')
>>> write(sample_buildout, 'demo', 'demo', 'tests.py',
... '''
... import unittest
...
... class TestDemo(unittest.TestCase):
... def test(self):
... pass
...
... def test_suite():
... loader = unittest.TestLoader()
... return loader.loadTestsFromTestCase(TestDemo)
... ''')
>>> write(sample_buildout, 'demo', 'setup.py',
... """
... from setuptools import setup
...
... setup(name = "demo")
... """)
>>> write(sample_buildout, 'demo', 'README.txt', '')
>>> mkdir(sample_buildout, 'demo2')
>>> mkdir(sample_buildout, 'demo2', 'demo2')
>>> write(sample_buildout, 'demo2', 'demo2', '__init__.py', '')
>>> write(sample_buildout, 'demo2', 'demo2', 'tests.py',
... '''
... import unittest
...
... class Demo2Tests(unittest.TestCase):
... def test2(self):
... pass
...
... def test_suite():
... loader = unittest.TestLoader()
... return loader.loadTestsFromTestCase(Demo2Tests)
... ''')
>>> write(sample_buildout, 'demo2', 'setup.py',
... """
... from setuptools import setup
...
... setup(name = "demo2", install_requires= ['demoneeded'])
... """)
>>> write(sample_buildout, 'demo2', 'README.txt', '')
Demo 2 depends on demoneeded:
>>> mkdir(sample_buildout, 'demoneeded')
>>> mkdir(sample_buildout, 'demoneeded', 'demoneeded')
>>> write(sample_buildout, 'demoneeded', 'demoneeded', '__init__.py', '')
>>> write(sample_buildout, 'demoneeded', 'demoneeded', 'tests.py',
... '''
... import unittest
...
... class TestNeeded(unittest.TestCase):
... def test_needed(self):
... pass
...
... def test_suite():
... loader = unittest.TestLoader()
... return loader.loadTestsFromTestCase(TestNeeded)
... ''')
>>> write(sample_buildout, 'demoneeded', 'setup.py',
... """
... from setuptools import setup
...
... setup(name = "demoneeded")
... """)
>>> write(sample_buildout, 'demoneeded', 'README.txt', '')
We'll update our buildout to install the demo project as a
develop egg and to create the test script:
>>> write(sample_buildout, 'buildout.cfg',
... """
... [buildout]
... develop = demo demoneeded demo2
... parts = testdemo
... offline = true
...
... [testdemo]
... recipe = zc.recipe.testrunner
... eggs =
... demo
... demo2
... script = test
... """)
Note that we specified both demo and demo2 in the eggs
option and that we put them on separate lines.
We also specified the offline option to run the buildout in offline mode.
Now when we run the buildout:
>>> import os
>>> os.chdir(sample_buildout)
>>> print_(system(os.path.join(sample_buildout, 'bin', 'buildout') + ' -q'),
... end='')
We get a test script installed in our bin directory:
>>> ls(sample_buildout, 'bin')
- buildout
- test
We also get a part directory for the tests to run in:
>>> ls(sample_buildout, 'parts')
d testdemo
And updating leaves its contents intact:
>>> _ = system(os.path.join(sample_buildout, 'bin', 'test') +
... ' -q --coverage=coverage')
>>> ls(sample_buildout, 'parts', 'testdemo')
d coverage
>>> print_(system(os.path.join(sample_buildout, 'bin', 'buildout') + ' -q'),
... end='')
>>> ls(sample_buildout, 'parts', 'testdemo')
d coverage
We can run the test script to run our demo test:
>>> print_(system(os.path.join(sample_buildout, 'bin', 'test') + ' -vv'),
... end='')
Running tests at level 1
Running zope.testrunner.layer.UnitTests tests:
Set up zope.testrunner.layer.UnitTests in 0.001 seconds.
Running:
test (demo.tests.TestDemo...)
test2 (demo2.tests.Demo2Tests...)
Ran 2 tests with 0 failures, 0 errors and 0 skipped in 0.001 seconds.
Tearing down left over layers:
Tear down zope.testrunner.layer.UnitTests in 0.001 seconds.
Note that we didn't run the demoneeded tests. Tests are only run for
the eggs listed, not for their dependencies.
If we leave the script option out of the configuration, then the test
script will get it's name from the part:
>>> write(sample_buildout, 'buildout.cfg',
... """
... [buildout]
... develop = demo
... parts = testdemo
... offline = true
...
... [testdemo]
... recipe = zc.recipe.testrunner
... eggs = demo
... """)
>>> print_(system(os.path.join(sample_buildout, 'bin', 'buildout') + ' -q'),
... end='')
>>> ls(sample_buildout, 'bin')
- buildout
- testdemo
We can run the test script to run our demo test:
>>> print_(system(os.path.join(sample_buildout, 'bin', 'testdemo') + ' -q'),
... end='')
Running zope.testrunner.layer.UnitTests tests:
Set up zope.testrunner.layer.UnitTests in 0.001 seconds.
Ran 1 tests with 0 failures, 0 errors and 0 skipped in 0.001 seconds.
Tearing down left over layers:
Tear down zope.testrunner.layer.UnitTests in 0.001 seconds.
If we need to include other paths in our test script, we can use the
extra-paths option to specify them:
>>> write(sample_buildout, 'buildout.cfg',
... """
... [buildout]
... develop = demo
... parts = testdemo
... offline = true
...
... [testdemo]
... recipe = zc.recipe.testrunner
... eggs = demo
... extra-paths = /usr/local/zope/lib/python
... """)
>>> print_(system(os.path.join(sample_buildout, 'bin', 'buildout') + ' -q'),
... end='')
>>> cat(sample_buildout, 'bin', 'testdemo') # doctest: +ELLIPSIS
#!/usr/local/bin/python2.4
<BLANKLINE>
import sys
sys.path[0:0] = [
...
]
<BLANKLINE>
import os
sys.argv[0] = os.path.abspath(sys.argv[0])
os.chdir('/sample-buildout/parts/testdemo')
<BLANKLINE>
<BLANKLINE>
import zope.testrunner
<BLANKLINE>
if __name__ == '__main__':
sys.exit(zope.testrunner.run([
'--test-path', '/sample-buildout/demo',
]))
We can use the working-directory option to specify a working
directory:
>>> write(sample_buildout, 'buildout.cfg',
... """
... [buildout]
... develop = demo
... parts = testdemo
... offline = true
...
... [testdemo]
... recipe = zc.recipe.testrunner
... eggs = demo
... extra-paths = /usr/local/zope/lib/python
... working-directory = /foo/bar
... """)
>>> print_(system(os.path.join(sample_buildout, 'bin', 'buildout') + ' -q'),
... end='')
>>> cat(sample_buildout, 'bin', 'testdemo') # doctest: +ELLIPSIS
#!/usr/local/bin/python2.4
<BLANKLINE>
import sys
sys.path[0:0] = [
...
]
<BLANKLINE>
import os
sys.argv[0] = os.path.abspath(sys.argv[0])
os.chdir('/foo/bar')
<BLANKLINE>
<BLANKLINE>
import zope.testrunner
<BLANKLINE>
if __name__ == '__main__':
sys.exit(zope.testrunner.run([
'--test-path', '/sample-buildout/demo',
]))
Now that out tests use a specified working directory, their designated
part directory is gone:
>>> ls(sample_buildout, 'parts')
If we need to specify default options, we can use the defaults
option. For example, Zope 3 applications typically define test suites
in modules named ftests or tests. The default test runner behaviour
is to look in modules named tests. To specify that we want to look in
tests and ftests module, we'd supply a default for the --tests-pattern
option. If we like dots, we could also request more verbose output
using the -v option:
>>> write(sample_buildout, 'buildout.cfg',
... """
... [buildout]
... develop = demo
... parts = testdemo
... offline = true
...
... [testdemo]
... recipe = zc.recipe.testrunner
... eggs = demo
... extra-paths = /usr/local/zope/lib/python
... defaults = ['--tests-pattern', '^f?tests$',
... '-v'
... ]
... """)
>>> print_(system(os.path.join(sample_buildout, 'bin', 'buildout') + ' -q'),
... end='')
>>> cat(sample_buildout, 'bin', 'testdemo') # doctest: +ELLIPSIS
#!/usr/local/bin/python2.4
<BLANKLINE>
import sys
sys.path[0:0] = [
...
]
<BLANKLINE>
import os
sys.argv[0] = os.path.abspath(sys.argv[0])
os.chdir('/sample-buildout/parts/testdemo')
<BLANKLINE>
<BLANKLINE>
import zope.testrunner
<BLANKLINE>
if __name__ == '__main__':
sys.exit(zope.testrunner.run((['--tests-pattern', '^f?tests$',
'-v'
]) + [
'--test-path', '/sample-buildout/demo',
]))
Some things to note from this example:
- Parentheses are placed around the given expression.
- Leading whitespace is removed.
To demonstrate the ``environment`` option, we first update the tests to
include a check for an environment variable:
>>> write(sample_buildout, 'demo', 'demo', 'tests.py',
... '''
... import unittest
... import os
...
... class DemoTests(unittest.TestCase):
... def test(self):
... self.assertEqual('42', os.environ.get('zc.recipe.testrunner', '23'))
...
... def test_suite():
... loader = unittest.TestLoader()
... return loader.loadTestsFromTestCase(DemoTests)
... ''')
Running them with the current buildout will produce a failure:
>>> print_(system(os.path.join(sample_buildout, 'bin', 'testdemo')
... + ' -vv'),
... end='') # doctest: +ELLIPSIS
Running tests at level 1
Running zope.testrunner.layer.UnitTests tests:
Set up zope.testrunner.layer.UnitTests in 0.001 seconds.
Running:
test (demo.tests.DemoTests...) (... s)
<BLANKLINE>
<BLANKLINE>
Failure in test test (demo.tests.DemoTests...)
Traceback (most recent call last):
...
AssertionError: '42' != '23'
...
Ran 1 tests with 1 failures, 0 errors and 0 skipped in 0.001 seconds.
Tearing down left over layers:
Tear down zope.testrunner.layer.UnitTests in 0.001 seconds.
<BLANKLINE>
Tests with failures:
test (demo.tests.DemoTests...)
Let's update the buildout to specify the environment variable for the test
runner:
>>> write(sample_buildout, 'buildout.cfg',
... """
... [buildout]
... develop = demo
... parts = testdemo
... offline = true
...
... [testdemo]
... recipe = zc.recipe.testrunner
... eggs = demo
... environment = testenv
...
... [testenv]
... zc.recipe.testrunner = 42
... """)
We run buildout and see that the test runner script now includes setting up
the environment variable. Also, the tests pass again:
>>> print_(system(os.path.join(sample_buildout, 'bin', 'buildout') + ' -q'),
... end='')
>>> cat(sample_buildout, 'bin', 'testdemo') # doctest: +ELLIPSIS
#!/usr/local/bin/python2.4
<BLANKLINE>
import sys
sys.path[0:0] = [
...
]
<BLANKLINE>
import os
sys.argv[0] = os.path.abspath(sys.argv[0])
os.chdir('/sample-buildout/parts/testdemo')
os.environ['zc.recipe.testrunner'] = '42'
<BLANKLINE>
<BLANKLINE>
import zope.testrunner
<BLANKLINE>
if __name__ == '__main__':
sys.exit(zope.testrunner.run([
'--test-path', '/sample-buildout/demo',
]))
>>> print_(system(os.path.join(sample_buildout, 'bin', 'testdemo')+' -vv'),
... end='')
Running tests at level 1
Running zope.testrunner.layer.UnitTests tests:
Set up zope.testrunner.layer.UnitTests in 0.001 seconds.
Running:
test (demo.tests.DemoTests...)
Ran 1 tests with 0 failures, 0 errors and 0 skipped in 0.001 seconds.
Tearing down left over layers:
Tear down zope.testrunner.layer.UnitTests in 0.001 seconds.
One can add initialization steps in the buildout. These will be added to the
end of the script:
>>> write(sample_buildout, 'buildout.cfg',
... r"""
... [buildout]
... develop = demo
... parts = testdemo
... offline = true
...
... [testdemo]
... recipe = zc.recipe.testrunner
... eggs = demo
... extra-paths = /usr/local/zope/lib/python
... defaults = ['--tests-pattern', '^f?tests$',
... '-v'
... ]
... initialization = sys.stdout.write('Hello all you egg-laying pythons!\n')
... """)
>>> print_(system(os.path.join(sample_buildout, 'bin', 'buildout') + ' -q'),
... end='')
>>> cat(sample_buildout, 'bin', 'testdemo') # doctest: +ELLIPSIS
#!/usr/local/bin/python2.4
<BLANKLINE>
import sys
sys.path[0:0] = [
...
]
<BLANKLINE>
import os
sys.argv[0] = os.path.abspath(sys.argv[0])
os.chdir('/sample-buildout/parts/testdemo')
sys.stdout.write('Hello all you egg-laying pythons!\n')
<BLANKLINE>
import zope.testrunner
<BLANKLINE>
if __name__ == '__main__':
sys.exit(zope.testrunner.run((['--tests-pattern', '^f?tests$',
'-v'
]) + [
'--test-path', '/sample-buildout/demo',
]))
This will also work with a multi-line initialization section:
>>> write(sample_buildout, 'buildout.cfg',
... r"""
... [buildout]
... develop = demo
... parts = testdemo
... offline = true
...
... [testdemo]
... recipe = zc.recipe.testrunner
... eggs = demo
... extra-paths = /usr/local/zope/lib/python
... defaults = ['--tests-pattern', '^f?tests$',
... '-v'
... ]
... initialization = sys.stdout.write('Hello all you egg-laying pythons!\n')
... sys.stdout.write('I thought pythons were live bearers?\n')
... """)
>>> print_(system(os.path.join(sample_buildout, 'bin', 'buildout') + ' -q'),
... end='')
>>> cat(sample_buildout, 'bin', 'testdemo') # doctest: +ELLIPSIS
#!/usr/local/bin/python2.4
<BLANKLINE>
import sys
sys.path[0:0] = [
...
]
<BLANKLINE>
import os
sys.argv[0] = os.path.abspath(sys.argv[0])
os.chdir('/sample-buildout/parts/testdemo')
sys.stdout.write('Hello all you egg-laying pythons!\n')
sys.stdout.write('I thought pythons were live bearers?\n')
<BLANKLINE>
import zope.testrunner
<BLANKLINE>
if __name__ == '__main__':
sys.exit(zope.testrunner.run((['--tests-pattern', '^f?tests$',
'-v'
]) + [
'--test-path', '/sample-buildout/demo',
]))
If the relative-paths option is used, egg (and extra) paths are
generated relative to the test script.
>>> write(sample_buildout, 'buildout.cfg',
... """
... [buildout]
... develop = demo
... parts = testdemo
... offline = true
...
... [testdemo]
... recipe = zc.recipe.testrunner
... eggs = demo
... extra-paths = /usr/local/zope/lib/python
... ${buildout:directory}/sources
... relative-paths = true
... """)
>>> print_(system(os.path.join(sample_buildout, 'bin', 'buildout') + ' -q'),
... end='')
>>> cat(sample_buildout, 'bin', 'testdemo') # doctest: +ELLIPSIS
#!/usr/local/bin/python2.4
<BLANKLINE>
import os
<BLANKLINE>
join = os.path.join
base = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
base = os.path.dirname(base)
<BLANKLINE>
import sys
sys.path[0:0] = [
join(base, 'demo'),
...
'/usr/local/zope/lib/python',
join(base, 'sources'),
]
<BLANKLINE>
import os
sys.argv[0] = os.path.abspath(sys.argv[0])
os.chdir(join(base, 'parts/testdemo'))
<BLANKLINE>
<BLANKLINE>
import zope.testrunner
<BLANKLINE>
if __name__ == '__main__':
sys.exit(zope.testrunner.run([
'--test-path', join(base, 'demo'),
]))
The relative-paths option can be specified at the buildout level:
>>> write(sample_buildout, 'buildout.cfg',
... """
... [buildout]
... develop = demo
... parts = testdemo
... offline = true
... relative-paths = true
...
... [testdemo]
... recipe = zc.recipe.testrunner
... eggs = demo
... extra-paths = /usr/local/zope/lib/python
... ${buildout:directory}/sources
... """)
>>> print_(system(os.path.join(sample_buildout, 'bin', 'buildout') + ' -q'),
... end='')
>>> cat(sample_buildout, 'bin', 'testdemo') # doctest: +ELLIPSIS
#!/usr/local/bin/python2.4
<BLANKLINE>
import os
<BLANKLINE>
join = os.path.join
base = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
base = os.path.dirname(base)
<BLANKLINE>
import sys
sys.path[0:0] = [
join(base, 'demo'),
...
'/usr/local/zope/lib/python',
join(base, 'sources'),
]
<BLANKLINE>
import os
sys.argv[0] = os.path.abspath(sys.argv[0])
os.chdir(join(base, 'parts/testdemo'))
<BLANKLINE>
<BLANKLINE>
import zope.testrunner
<BLANKLINE>
if __name__ == '__main__':
sys.exit(zope.testrunner.run([
'--test-path', join(base, 'demo'),
]))
| zc.recipe.testrunner | /zc.recipe.testrunner-3.0-py3-none-any.whl/zc/recipe/testrunner/README.rst | README.rst |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.