rem
stringlengths 1
322k
| add
stringlengths 0
2.05M
| context
stringlengths 4
228k
| meta
stringlengths 156
215
|
---|---|---|---|
if cal_data.name.lower() == 'organizer': | if cal_data.name.lower() == 'organizer': | def parse_ics(self, cr, uid, child, cal_children=None, context=None): """ parse calendaring and scheduling information @param self: The object pointer @param cr: the current row, from the database cursor, @param uid: the current user’s ID for security checks, @param context: A standard dictionary for contextual values """ | 1263dd94aec18dc93f2c1301bed8d5db4810ef61 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/1263dd94aec18dc93f2c1301bed8d5db4810ef61/calendar.py |
for data in datas: | for data in datas: | def create_ics(self, cr, uid, datas, name, ical, context=None): """ create calendaring and scheduling information @param self: The object pointer @param cr: the current row, from the database cursor, @param uid: the current user’s ID for security checks, @param context: A standard dictionary for contextual values """ | 1263dd94aec18dc93f2c1301bed8d5db4810ef61 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/1263dd94aec18dc93f2c1301bed8d5db4810ef61/calendar.py |
_name = 'basic.calendar' | _name = 'basic.calendar' | def import_cal(self, cr, uid, content, data_id=None, context=None): """ Import Calendar @param self: The object pointer @param cr: the current row, from the database cursor, @param uid: the current user’s ID for security checks, @param data_id: Get Data’s ID or False @param context: A standard dictionary for contextual values """ | 1263dd94aec18dc93f2c1301bed8d5db4810ef61 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/1263dd94aec18dc93f2c1301bed8d5db4810ef61/calendar.py |
'name': fields.char("Name", size=64), | 'name': fields.char("Name", size=64), | def import_cal(self, cr, uid, content, data_id=None, context=None): """ Import Calendar @param self: The object pointer @param cr: the current row, from the database cursor, @param uid: the current user’s ID for security checks, @param data_id: Get Data’s ID or False @param context: A standard dictionary for contextual values """ | 1263dd94aec18dc93f2c1301bed8d5db4810ef61 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/1263dd94aec18dc93f2c1301bed8d5db4810ef61/calendar.py |
'line_ids': fields.one2many('basic.calendar.lines', 'calendar_id', 'Calendar Lines'), | 'line_ids': fields.one2many('basic.calendar.lines', 'calendar_id', 'Calendar Lines'), | def import_cal(self, cr, uid, content, data_id=None, context=None): """ Import Calendar @param self: The object pointer @param cr: the current row, from the database cursor, @param uid: the current user’s ID for security checks, @param data_id: Get Data’s ID or False @param context: A standard dictionary for contextual values """ | 1263dd94aec18dc93f2c1301bed8d5db4810ef61 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/1263dd94aec18dc93f2c1301bed8d5db4810ef61/calendar.py |
ctx_res_id = context.get('res_id', None) ctx_model = context.get('model', None) | ctx_res_id = context.get('res_id', None) ctx_model = context.get('model', None) | def get_calendar_objects(self, cr, uid, ids, parent=None, domain=None, context=None): if not context: context = {} if not domain: domain = [] res = [] ctx_res_id = context.get('res_id', None) ctx_model = context.get('model', None) for cal in self.browse(cr, uid, ids): for line in cal.line_ids: if ctx_model and ctx_model != line.object_id.model: continue if line.name in ('valarm', 'attendee'): continue line_domain = eval(line.domain) line_domain += domain if ctx_res_id: line_domain += [('id','=',ctx_res_id)] mod_obj = self.pool.get(line.object_id.model) data_ids = mod_obj.search(cr, uid, line_domain, context=context) for data in mod_obj.browse(cr, uid, data_ids, context): ctx = parent and parent.context or None node = res_node_calendar('%s' %data.id, parent, ctx, data, line.object_id.model, data.id) res.append(node) return res | 1263dd94aec18dc93f2c1301bed8d5db4810ef61 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/1263dd94aec18dc93f2c1301bed8d5db4810ef61/calendar.py |
if ctx_res_id: | if ctx_res_id: | def get_calendar_objects(self, cr, uid, ids, parent=None, domain=None, context=None): if not context: context = {} if not domain: domain = [] res = [] ctx_res_id = context.get('res_id', None) ctx_model = context.get('model', None) for cal in self.browse(cr, uid, ids): for line in cal.line_ids: if ctx_model and ctx_model != line.object_id.model: continue if line.name in ('valarm', 'attendee'): continue line_domain = eval(line.domain) line_domain += domain if ctx_res_id: line_domain += [('id','=',ctx_res_id)] mod_obj = self.pool.get(line.object_id.model) data_ids = mod_obj.search(cr, uid, line_domain, context=context) for data in mod_obj.browse(cr, uid, data_ids, context): ctx = parent and parent.context or None node = res_node_calendar('%s' %data.id, parent, ctx, data, line.object_id.model, data.id) res.append(node) return res | 1263dd94aec18dc93f2c1301bed8d5db4810ef61 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/1263dd94aec18dc93f2c1301bed8d5db4810ef61/calendar.py |
data_ids = mod_obj.search(cr, uid, line_domain, context=context) | data_ids = mod_obj.search(cr, uid, line_domain, context=context) | def get_calendar_objects(self, cr, uid, ids, parent=None, domain=None, context=None): if not context: context = {} if not domain: domain = [] res = [] ctx_res_id = context.get('res_id', None) ctx_model = context.get('model', None) for cal in self.browse(cr, uid, ids): for line in cal.line_ids: if ctx_model and ctx_model != line.object_id.model: continue if line.name in ('valarm', 'attendee'): continue line_domain = eval(line.domain) line_domain += domain if ctx_res_id: line_domain += [('id','=',ctx_res_id)] mod_obj = self.pool.get(line.object_id.model) data_ids = mod_obj.search(cr, uid, line_domain, context=context) for data in mod_obj.browse(cr, uid, data_ids, context): ctx = parent and parent.context or None node = res_node_calendar('%s' %data.id, parent, ctx, data, line.object_id.model, data.id) res.append(node) return res | 1263dd94aec18dc93f2c1301bed8d5db4810ef61 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/1263dd94aec18dc93f2c1301bed8d5db4810ef61/calendar.py |
node = res_node_calendar('%s' %data.id, parent, ctx, data, line.object_id.model, data.id) | node = res_node_calendar('%s' %data.id, parent, ctx, data, line.object_id.model, data.id) | def get_calendar_objects(self, cr, uid, ids, parent=None, domain=None, context=None): if not context: context = {} if not domain: domain = [] res = [] ctx_res_id = context.get('res_id', None) ctx_model = context.get('model', None) for cal in self.browse(cr, uid, ids): for line in cal.line_ids: if ctx_model and ctx_model != line.object_id.model: continue if line.name in ('valarm', 'attendee'): continue line_domain = eval(line.domain) line_domain += domain if ctx_res_id: line_domain += [('id','=',ctx_res_id)] mod_obj = self.pool.get(line.object_id.model) data_ids = mod_obj.search(cr, uid, line_domain, context=context) for data in mod_obj.browse(cr, uid, data_ids, context): ctx = parent and parent.context or None node = res_node_calendar('%s' %data.id, parent, ctx, data, line.object_id.model, data.id) res.append(node) return res | 1263dd94aec18dc93f2c1301bed8d5db4810ef61 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/1263dd94aec18dc93f2c1301bed8d5db4810ef61/calendar.py |
ctx_model = context.get('model', None) ctx_res_id = context.get('res_id', None) | ctx_model = context.get('model', None) ctx_res_id = context.get('res_id', None) | def export_cal(self, cr, uid, ids, vobj='vevent', context=None): """ Export Calendar @param self: The object pointer @param cr: the current row, from the database cursor, @param uid: the current user’s ID for security checks, @param ids: List of calendar’s IDs @param context: A standard dictionary for contextual values """ if not context: context = {} ctx_model = context.get('model', None) ctx_res_id = context.get('res_id', None) ical = vobject.iCalendar() for cal in self.browse(cr, uid, ids): for line in cal.line_ids: if ctx_model and ctx_model != line.object_id.model: continue if line.name in ('valarm', 'attendee'): continue domain = eval(line.domain) if ctx_res_id: domain += [('id','=',ctx_res_id)] mod_obj = self.pool.get(line.object_id.model) data_ids = mod_obj.search(cr, uid, domain, context=context) datas = mod_obj.read(cr, uid, data_ids, context=context) context.update({'model': line.object_id.model, 'calendar_id': cal.id }) self.__attribute__ = get_attribute_mapping(cr, uid, line.name, context) self.create_ics(cr, uid, datas, line.name, ical, context=context) return ical.serialize() | 1263dd94aec18dc93f2c1301bed8d5db4810ef61 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/1263dd94aec18dc93f2c1301bed8d5db4810ef61/calendar.py |
if ctx_res_id: | if ctx_res_id: | def export_cal(self, cr, uid, ids, vobj='vevent', context=None): """ Export Calendar @param self: The object pointer @param cr: the current row, from the database cursor, @param uid: the current user’s ID for security checks, @param ids: List of calendar’s IDs @param context: A standard dictionary for contextual values """ if not context: context = {} ctx_model = context.get('model', None) ctx_res_id = context.get('res_id', None) ical = vobject.iCalendar() for cal in self.browse(cr, uid, ids): for line in cal.line_ids: if ctx_model and ctx_model != line.object_id.model: continue if line.name in ('valarm', 'attendee'): continue domain = eval(line.domain) if ctx_res_id: domain += [('id','=',ctx_res_id)] mod_obj = self.pool.get(line.object_id.model) data_ids = mod_obj.search(cr, uid, domain, context=context) datas = mod_obj.read(cr, uid, data_ids, context=context) context.update({'model': line.object_id.model, 'calendar_id': cal.id }) self.__attribute__ = get_attribute_mapping(cr, uid, line.name, context) self.create_ics(cr, uid, datas, line.name, ical, context=context) return ical.serialize() | 1263dd94aec18dc93f2c1301bed8d5db4810ef61 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/1263dd94aec18dc93f2c1301bed8d5db4810ef61/calendar.py |
self.__attribute__ = get_attribute_mapping(cr, uid, line.name, context) | self.__attribute__ = get_attribute_mapping(cr, uid, line.name, context) | def export_cal(self, cr, uid, ids, vobj='vevent', context=None): """ Export Calendar @param self: The object pointer @param cr: the current row, from the database cursor, @param uid: the current user’s ID for security checks, @param ids: List of calendar’s IDs @param context: A standard dictionary for contextual values """ if not context: context = {} ctx_model = context.get('model', None) ctx_res_id = context.get('res_id', None) ical = vobject.iCalendar() for cal in self.browse(cr, uid, ids): for line in cal.line_ids: if ctx_model and ctx_model != line.object_id.model: continue if line.name in ('valarm', 'attendee'): continue domain = eval(line.domain) if ctx_res_id: domain += [('id','=',ctx_res_id)] mod_obj = self.pool.get(line.object_id.model) data_ids = mod_obj.search(cr, uid, domain, context=context) datas = mod_obj.read(cr, uid, data_ids, context=context) context.update({'model': line.object_id.model, 'calendar_id': cal.id }) self.__attribute__ = get_attribute_mapping(cr, uid, line.name, context) self.create_ics(cr, uid, datas, line.name, ical, context=context) return ical.serialize() | 1263dd94aec18dc93f2c1301bed8d5db4810ef61 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/1263dd94aec18dc93f2c1301bed8d5db4810ef61/calendar.py |
vals = [] | vals = [] | def import_cal(self, cr, uid, content, data_id=None, context=None): """ Import Calendar @param self: The object pointer @param cr: the current row, from the database cursor, @param uid: the current user’s ID for security checks, @param data_id: Get Data’s ID or False @param context: A standard dictionary for contextual values """ | 1263dd94aec18dc93f2c1301bed8d5db4810ef61 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/1263dd94aec18dc93f2c1301bed8d5db4810ef61/calendar.py |
data_id = self.search(cr, uid, [])[0] | data_id = self.search(cr, uid, [])[0] | def import_cal(self, cr, uid, content, data_id=None, context=None): """ Import Calendar @param self: The object pointer @param cr: the current row, from the database cursor, @param uid: the current user’s ID for security checks, @param data_id: Get Data’s ID or False @param context: A standard dictionary for contextual values """ | 1263dd94aec18dc93f2c1301bed8d5db4810ef61 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/1263dd94aec18dc93f2c1301bed8d5db4810ef61/calendar.py |
obj = self.pool.get(cal_children[child.name.lower()]) if hasattr(obj, 'check_import'): obj.check_import(cr, uid, vals, context=context) else: | objs.append(cal_children[child.name.lower()]) for obj_name in list(set(objs)): obj = self.pool.get(obj_name) if hasattr(obj, 'check_import'): obj.check_import(cr, uid, vals, context=context) checked = True if not checked: | def import_cal(self, cr, uid, content, data_id=None, context=None): """ Import Calendar @param self: The object pointer @param cr: the current row, from the database cursor, @param uid: the current user’s ID for security checks, @param data_id: Get Data’s ID or False @param context: A standard dictionary for contextual values """ | 1263dd94aec18dc93f2c1301bed8d5db4810ef61 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/1263dd94aec18dc93f2c1301bed8d5db4810ef61/calendar.py |
progress = dict(map(lambda x: (x[0], (x[1], x[2], x[3])), cr.fetchall())) | progress = dict(map(lambda x: (x[0], (x[1] or 0.0, x[2] or 0.0, x[3] or 0.0)), cr.fetchall())) | def _get_all_child_projects(ids): """Recursively get child project ids""" child_ids = flatten([project_hierarchy.get(idn, []) for idn in ids]) if child_ids: child_ids = _get_all_child_projects(child_ids) return ids + child_ids | 567f02338b5c7e30d51fc9f0ec5d630d3cee7449 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/567f02338b5c7e30d51fc9f0ec5d630d3cee7449/project.py |
def _get_all_child_projects(ids): """Recursively get child project ids""" child_ids = flatten([project_hierarchy.get(idn, []) for idn in ids]) if child_ids: child_ids = _get_all_child_projects(child_ids) return ids + child_ids | 567f02338b5c7e30d51fc9f0ec5d630d3cee7449 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/567f02338b5c7e30d51fc9f0ec5d630d3cee7449/project.py |
||
def _get_all_child_projects(ids): """Recursively get child project ids""" child_ids = flatten([project_hierarchy.get(idn, []) for idn in ids]) if child_ids: child_ids = _get_all_child_projects(child_ids) return ids + child_ids | 567f02338b5c7e30d51fc9f0ec5d630d3cee7449 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/567f02338b5c7e30d51fc9f0ec5d630d3cee7449/project.py |
||
'planned_hours': fields.function(_progress_rate, multi="progress", method=True, string='Planned Time', help="Sum of planned hours of all tasks related to this project and its child projects.", store=True), 'effective_hours': fields.function(_progress_rate, multi="progress", method=True, string='Time Spent', help="Sum of spent hours of all tasks related to this project and its child projects.", store=True), 'total_hours': fields.function(_progress_rate, multi="progress", method=True, string='Total Time', help="Sum of total hours of all tasks related to this project and its child projects.", store=True), 'progress_rate': fields.function(_progress_rate, multi="progress", method=True, string='Progress', type='float', group_operator="avg", help="Percent of tasks closed according to the total of tasks todo.", store=True), | 'planned_hours': fields.function(_progress_rate, multi="progress", method=True, string='Planned Time', help="Sum of planned hours of all tasks related to this project and its child projects.", store = { 'project.project': (lambda self, cr, uid, ids, c={}: ids, ['tasks'], 10), 'project.task': (_get_project_task, ['planned_hours', 'effective_hours', 'remaining_hours', 'total_hours', 'progress', 'delay_hours'], 10), 'project.task.work': (_get_project_work, ['hours'], 10), }), 'effective_hours': fields.function(_progress_rate, multi="progress", method=True, string='Time Spent', help="Sum of spent hours of all tasks related to this project and its child projects.", store = { 'project.project': (lambda self, cr, uid, ids, c={}: ids, ['tasks'], 10), 'project.task': (_get_project_task, ['planned_hours', 'effective_hours', 'remaining_hours', 'total_hours', 'progress', 'delay_hours'], 10), 'project.task.work': (_get_project_work, ['hours'], 10), }), 'total_hours': fields.function(_progress_rate, multi="progress", method=True, string='Total Time', help="Sum of total hours of all tasks related to this project and its child projects.", store = { 'project.project': (lambda self, cr, uid, ids, c={}: ids, ['tasks'], 10), 'project.task': (_get_project_task, ['planned_hours', 'effective_hours', 'remaining_hours', 'total_hours', 'progress', 'delay_hours'], 10), 'project.task.work': (_get_project_work, ['hours'], 10), }), 'progress_rate': fields.function(_progress_rate, multi="progress", method=True, string='Progress', type='float', group_operator="avg", help="Percent of tasks closed according to the total of tasks todo.", store = { 'project.project': (lambda self, cr, uid, ids, c={}: ids, ['tasks'], 10), 'project.task': (_get_project_task, ['planned_hours', 'effective_hours', 'remaining_hours', 'total_hours', 'progress', 'delay_hours'], 10), 'project.task.work': (_get_project_work, ['hours'], 10), }), | def unlink(self, cr, uid, ids, *args, **kwargs): for proj in self.browse(cr, uid, ids): if proj.tasks: raise osv.except_osv(_('Operation Not Permitted !'), _('You can not delete a project with tasks. I suggest you to deactivate it.')) return super(project, self).unlink(cr, uid, ids, *args, **kwargs) | 567f02338b5c7e30d51fc9f0ec5d630d3cee7449 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/567f02338b5c7e30d51fc9f0ec5d630d3cee7449/project.py |
'effective_hours': fields.function(_hours_get, method=True, string='Hours Spent', multi='hours', store=True, help="Computed using the sum of the task work done."), | 'effective_hours': fields.function(_hours_get, method=True, string='Hours Spent', multi='hours', help="Computed using the sum of the task work done.", store = { 'project.task': (lambda self, cr, uid, ids, c={}: ids, ['work_ids'], 10), 'project.task.work': (_get_task, ['hours'], 10), }), | def _is_template(self, cr, uid, ids, field_name, arg, context=None): res = {} for task in self.browse(cr, uid, ids, context=context): res[task.id] = True if task.project_id: if task.project_id.active == False or task.project_id.state == 'template': res[task.id] = False return res | 567f02338b5c7e30d51fc9f0ec5d630d3cee7449 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/567f02338b5c7e30d51fc9f0ec5d630d3cee7449/project.py |
'total_hours': fields.function(_hours_get, method=True, string='Total Hours', multi='hours', store=True, help="Computed as: Time Spent + Remaining Time."), 'progress': fields.function(_hours_get, method=True, string='Progress (%)', multi='hours', group_operator="avg", store=True, help="Computed as: Time Spent / Total Time."), 'delay_hours': fields.function(_hours_get, method=True, string='Delay Hours', multi='hours', store=True, help="Computed as difference of the time estimated by the project manager and the real time to close the task."), | 'total_hours': fields.function(_hours_get, method=True, string='Total Hours', multi='hours', help="Computed as: Time Spent + Remaining Time.", store = { 'project.task': (lambda self, cr, uid, ids, c={}: ids, ['work_ids'], 10), 'project.task.work': (_get_task, ['hours'], 10), }), 'progress': fields.function(_hours_get, method=True, string='Progress (%)', multi='hours', group_operator="avg", help="Computed as: Time Spent / Total Time.", store = { 'project.task': (lambda self, cr, uid, ids, c={}: ids, ['work_ids'], 10), 'project.task.work': (_get_task, ['hours'], 10), }), 'delay_hours': fields.function(_hours_get, method=True, string='Delay Hours', multi='hours', help="Computed as difference of the time estimated by the project manager and the real time to close the task.", store = { 'project.task': (lambda self, cr, uid, ids, c={}: ids, ['work_ids'], 10), 'project.task.work': (_get_task, ['hours'], 10), }), | def _is_template(self, cr, uid, ids, field_name, arg, context=None): res = {} for task in self.browse(cr, uid, ids, context=context): res[task.id] = True if task.project_id: if task.project_id.active == False or task.project_id.state == 'template': res[task.id] = False return res | 567f02338b5c7e30d51fc9f0ec5d630d3cee7449 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/567f02338b5c7e30d51fc9f0ec5d630d3cee7449/project.py |
dt_start = datetime.datetime.strptime(leaves[i]['date_from'], '%Y-%m-%d %H:%M:%S') dt_end = datetime.datetime.strptime(leaves[i]['date_to'], '%Y-%m-%d %H:%M:%S') | dt_start = datetime.strptime(leaves[i]['date_from'], '%Y-%m-%d %H:%M:%S') dt_end = datetime.strptime(leaves[i]['date_to'], '%Y-%m-%d %H:%M:%S') | def compute_vacation(self, cr, uid, calendar_id, resource_id=False, resource_calendar=False, context=None): """ Compute the vacation from the working calendar of the resource. @param calendar_id : working calendar of the project @param resource_id : resource working on phase/task @param resource_calendar : working calendar of the resource """ if context is None: context = {} resource_calendar_leaves_pool = self.pool.get('resource.calendar.leaves') leave_list = [] if resource_id: leave_ids = resource_calendar_leaves_pool.search(cr, uid, ['|', ('calendar_id', '=', calendar_id), ('calendar_id', '=', resource_calendar), ('resource_id', '=', resource_id) ], context=context) else: leave_ids = resource_calendar_leaves_pool.search(cr, uid, [('calendar_id', '=', calendar_id), ('resource_id', '=', False) ], context=context) leaves = resource_calendar_leaves_pool.read(cr, uid, leave_ids, ['date_from', 'date_to'], context=context) for i in range(len(leaves)): dt_start = datetime.datetime.strptime(leaves[i]['date_from'], '%Y-%m-%d %H:%M:%S') dt_end = datetime.datetime.strptime(leaves[i]['date_to'], '%Y-%m-%d %H:%M:%S') no = dt_end - dt_start [leave_list.append((dt_start + datetime.timedelta(days=x)).strftime('%Y-%m-%d')) for x in range(int(no.days + 1))] leave_list.sort() return leave_list | 1bd291fc95789677a55f80352223e871416c7df9 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/1bd291fc95789677a55f80352223e871416c7df9/resource.py |
[leave_list.append((dt_start + datetime.timedelta(days=x)).strftime('%Y-%m-%d')) for x in range(int(no.days + 1))] | [leave_list.append((dt_start + timedelta(days=x)).strftime('%Y-%m-%d')) for x in range(int(no.days + 1))] | def compute_vacation(self, cr, uid, calendar_id, resource_id=False, resource_calendar=False, context=None): """ Compute the vacation from the working calendar of the resource. @param calendar_id : working calendar of the project @param resource_id : resource working on phase/task @param resource_calendar : working calendar of the resource """ if context is None: context = {} resource_calendar_leaves_pool = self.pool.get('resource.calendar.leaves') leave_list = [] if resource_id: leave_ids = resource_calendar_leaves_pool.search(cr, uid, ['|', ('calendar_id', '=', calendar_id), ('calendar_id', '=', resource_calendar), ('resource_id', '=', resource_id) ], context=context) else: leave_ids = resource_calendar_leaves_pool.search(cr, uid, [('calendar_id', '=', calendar_id), ('resource_id', '=', False) ], context=context) leaves = resource_calendar_leaves_pool.read(cr, uid, leave_ids, ['date_from', 'date_to'], context=context) for i in range(len(leaves)): dt_start = datetime.datetime.strptime(leaves[i]['date_from'], '%Y-%m-%d %H:%M:%S') dt_end = datetime.datetime.strptime(leaves[i]['date_to'], '%Y-%m-%d %H:%M:%S') no = dt_end - dt_start [leave_list.append((dt_start + datetime.timedelta(days=x)).strftime('%Y-%m-%d')) for x in range(int(no.days + 1))] leave_list.sort() return leave_list | 1bd291fc95789677a55f80352223e871416c7df9 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/1bd291fc95789677a55f80352223e871416c7df9/resource.py |
return super(account_invoice,self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu) | res = super(account_invoice,self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu) for field in res['fields']: type = context.get('journal_type', 'sale') if field == 'journal_id': journal_select = self.pool.get('account.journal')._name_search(cr, uid, '', [('type', '=', type)], context=context, limit=None, name_get_uid=1) res['fields'][field]['selection'] = journal_select return res | def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False): if context.get('active_model','') in ['res.partner']: partner = self.pool.get(context['active_model']).read(cr,uid,context['active_ids'],['supplier','customer'])[0] if not view_type: view_id = self.pool.get('ir.ui.view').search(cr,uid,[('name','=','account.invoice.tree')])[0] view_type = 'tree' if view_type == 'form': if partner['supplier'] and not partner['customer']: view_id = self.pool.get('ir.ui.view').search(cr,uid,[('name','=','account.invoice.supplier.form')])[0] else: view_id = self.pool.get('ir.ui.view').search(cr,uid,[('name','=','account.invoice.form')])[0] return super(account_invoice,self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu) | bf7e51864126d4267f02c1c35cfcfa43266ab62b /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/bf7e51864126d4267f02c1c35cfcfa43266ab62b/invoice.py |
def read_group(self, cr, uid, domain, *args, **kwargs): todel=[] | def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False): | def read_group(self, cr, uid, domain, *args, **kwargs): todel=[] fiscalyear_obj = self.pool.get('account.fiscalyear') period_obj = self.pool.get('account.period') for arg in domain: if arg[0] == 'period_id' and arg[2] == 'current_period': current_period = period_obj.find(cr, uid)[0] domain.append(['period_id','in',[current_period]]) todel.append(arg) break elif arg[0] == 'period_id' and arg[2] == 'current_year': current_year = fiscalyear_obj.find(cr, uid) ids = fiscalyear_obj.read(cr, uid, [current_year], ['period_ids'])[0]['period_ids'] domain.append(['period_id','in',ids]) todel.append(arg) for a in [['period_id','in','current_year'], ['period_id','in','current_period']]: if a in domain: domain.remove(a) return super(account_entries_report, self).read_group(cr, uid, domain, *args, **kwargs) | 4363de12dda2bcdd8aec1a2b2b8f7f774568ca01 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/4363de12dda2bcdd8aec1a2b2b8f7f774568ca01/account_entries_report.py |
for arg in domain: if arg[0] == 'period_id' and arg[2] == 'current_period': current_period = period_obj.find(cr, uid)[0] domain.append(['period_id','in',[current_period]]) todel.append(arg) break elif arg[0] == 'period_id' and arg[2] == 'current_year': current_year = fiscalyear_obj.find(cr, uid) ids = fiscalyear_obj.read(cr, uid, [current_year], ['period_ids'])[0]['period_ids'] domain.append(['period_id','in',ids]) todel.append(arg) for a in [['period_id','in','current_year'], ['period_id','in','current_period']]: if a in domain: domain.remove(a) return super(account_entries_report, self).read_group(cr, uid, domain, *args, **kwargs) | if context.get('period', False) == 'current_period': current_period = period_obj.find(cr, uid)[0] domain.append(['period_id','in',[current_period]]) elif context.get('year', False) == 'current_year': current_year = fiscalyear_obj.find(cr, uid) ids = fiscalyear_obj.read(cr, uid, [current_year], ['period_ids'])[0]['period_ids'] domain.append(['period_id','in',ids]) else: domain = domain return super(account_entries_report, self).read_group(cr, uid, domain, fields, groupby, offset, limit, context, orderby) | def read_group(self, cr, uid, domain, *args, **kwargs): todel=[] fiscalyear_obj = self.pool.get('account.fiscalyear') period_obj = self.pool.get('account.period') for arg in domain: if arg[0] == 'period_id' and arg[2] == 'current_period': current_period = period_obj.find(cr, uid)[0] domain.append(['period_id','in',[current_period]]) todel.append(arg) break elif arg[0] == 'period_id' and arg[2] == 'current_year': current_year = fiscalyear_obj.find(cr, uid) ids = fiscalyear_obj.read(cr, uid, [current_year], ['period_ids'])[0]['period_ids'] domain.append(['period_id','in',ids]) todel.append(arg) for a in [['period_id','in','current_year'], ['period_id','in','current_period']]: if a in domain: domain.remove(a) return super(account_entries_report, self).read_group(cr, uid, domain, *args, **kwargs) | 4363de12dda2bcdd8aec1a2b2b8f7f774568ca01 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/4363de12dda2bcdd8aec1a2b2b8f7f774568ca01/account_entries_report.py |
'time':time} print rule.domain_force | 'time':time} | def _domain_force_get(self, cr, uid, ids, field_name, arg, context={}): res = {} for rule in self.browse(cr, uid, ids, context): eval_user_data = {'user': self.pool.get('res.users').browse(cr, 1, uid), 'time':time} print rule.domain_force res[rule.id] = eval(rule.domain_force, eval_user_data) return res | 432f72d2808fa910acff244271c2303689036d43 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/432f72d2808fa910acff244271c2303689036d43/ir_rule.py |
print 'Domain', dom, model_name | def domain_get(self, cr, uid, model_name, mode='read', context={}): dom = self._compute_domain(cr, uid, model_name, mode=mode) if dom: print 'Domain', dom, model_name query = self.pool.get(model_name)._where_calc(cr, uid, dom, active_test=False) return query.where_clause, query.where_clause_params, query.tables return [], [], ['"'+self.pool.get(model_name)._table+'"'] | 432f72d2808fa910acff244271c2303689036d43 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/432f72d2808fa910acff244271c2303689036d43/ir_rule.py |
|
self.localcontext = localcontext.copy() | self.localcontext = (localcontext or {}).copy() | def __init__(self, node, localcontext, styles, self2): self.localcontext = localcontext.copy() self.node = node self.styles = styles self.width = utils.unit_get(node.get('width')) self.height = utils.unit_get(node.get('height')) self.self2 = self2 | b04e16db9c0a235722cfb699b7fecd839139d2f3 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/b04e16db9c0a235722cfb699b7fecd839139d2f3/trml2pdf.py |
""" | """ | def make_invoices(self, cr, uid, ids, context): """ To make invoices. @param self: The object pointer. @param cr: A database cursor @param uid: ID of the user currently logged in @param ids: the ID or list of IDs @param context: A standard dictionary @return: A dictionary which of fields with values. """ | 06ae11efd0f151a77ff6935b78493ba7a9d05913 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/06ae11efd0f151a77ff6935b78493ba7a9d05913/sale_line_invoice.py |
def make_invoices(self, cr, uid, ids, context): """ To make invoices. @param self: The object pointer. @param cr: A database cursor @param uid: ID of the user currently logged in @param ids: the ID or list of IDs @param context: A standard dictionary @return: A dictionary which of fields with values. """ | 06ae11efd0f151a77ff6935b78493ba7a9d05913 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/06ae11efd0f151a77ff6935b78493ba7a9d05913/sale_line_invoice.py |
||
@param ids: the ID or list of IDs @param context: A standard dictionary @return: A dictionary which of fields with values. """ | @param ids: the ID or list of IDs @param context: A standard dictionary @return: A dictionary which of fields with values. """ | def make_invoices(self, cr, uid, ids, context): """ To make invoices. @param self: The object pointer. @param cr: A database cursor @param uid: ID of the user currently logged in @param ids: the ID or list of IDs @param context: A standard dictionary @return: A dictionary which of fields with values. """ | 06ae11efd0f151a77ff6935b78493ba7a9d05913 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/06ae11efd0f151a77ff6935b78493ba7a9d05913/sale_line_invoice.py |
""" | """ | def make_invoice(order, lines): """ To make invoices. @param order: @param lines: @return: """ a = order.partner_id.property_account_receivable.id if order.partner_id and order.partner_id.property_payment_term.id: pay_term = order.partner_id.property_payment_term.id else: pay_term = False inv = { 'name': order.name, 'origin': order.name, 'type': 'out_invoice', 'reference': "P%dSO%d" % (order.partner_id.id, order.id), 'account_id': a, 'partner_id': order.partner_id.id, 'address_invoice_id': order.partner_invoice_id.id, 'address_contact_id': order.partner_invoice_id.id, 'invoice_line': [(6, 0, lines)], 'currency_id' : order.pricelist_id.currency_id.id, 'comment': order.note, 'payment_term': pay_term, 'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id, } inv_id = self.pool.get('account.invoice').create(cr, uid, inv) return inv_id | 06ae11efd0f151a77ff6935b78493ba7a9d05913 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/06ae11efd0f151a77ff6935b78493ba7a9d05913/sale_line_invoice.py |
@param order: @param lines: @return: """ | @param order: @param lines: @return: """ | def make_invoice(order, lines): """ To make invoices. @param order: @param lines: @return: """ a = order.partner_id.property_account_receivable.id if order.partner_id and order.partner_id.property_payment_term.id: pay_term = order.partner_id.property_payment_term.id else: pay_term = False inv = { 'name': order.name, 'origin': order.name, 'type': 'out_invoice', 'reference': "P%dSO%d" % (order.partner_id.id, order.id), 'account_id': a, 'partner_id': order.partner_id.id, 'address_invoice_id': order.partner_invoice_id.id, 'address_contact_id': order.partner_invoice_id.id, 'invoice_line': [(6, 0, lines)], 'currency_id' : order.pricelist_id.currency_id.id, 'comment': order.note, 'payment_term': pay_term, 'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id, } inv_id = self.pool.get('account.invoice').create(cr, uid, inv) return inv_id | 06ae11efd0f151a77ff6935b78493ba7a9d05913 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/06ae11efd0f151a77ff6935b78493ba7a9d05913/sale_line_invoice.py |
for result in invoices.values(): order = result[0][0].order_id il = map(lambda x: x[1], result) res = make_invoice(order, il) cr.execute('INSERT INTO sale_order_invoice_rel \ (order_id,invoice_id) values (%s,%s)', (order.id, res)) | for result in invoices.values(): order = result[0][0].order_id il = map(lambda x: x[1], result) res = make_invoice(order, il) cr.execute('INSERT INTO sale_order_invoice_rel \ (order_id,invoice_id) values (%s,%s)', (order.id, res)) | def make_invoice(order, lines): """ To make invoices. @param order: @param lines: @return: """ a = order.partner_id.property_account_receivable.id if order.partner_id and order.partner_id.property_payment_term.id: pay_term = order.partner_id.property_payment_term.id else: pay_term = False inv = { 'name': order.name, 'origin': order.name, 'type': 'out_invoice', 'reference': "P%dSO%d" % (order.partner_id.id, order.id), 'account_id': a, 'partner_id': order.partner_id.id, 'address_invoice_id': order.partner_invoice_id.id, 'address_contact_id': order.partner_invoice_id.id, 'invoice_line': [(6, 0, lines)], 'currency_id' : order.pricelist_id.currency_id.id, 'comment': order.note, 'payment_term': pay_term, 'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id, } inv_id = self.pool.get('account.invoice').create(cr, uid, inv) return inv_id | 06ae11efd0f151a77ff6935b78493ba7a9d05913 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/06ae11efd0f151a77ff6935b78493ba7a9d05913/sale_line_invoice.py |
def make_invoice(order, lines): """ To make invoices. @param order: @param lines: @return: """ a = order.partner_id.property_account_receivable.id if order.partner_id and order.partner_id.property_payment_term.id: pay_term = order.partner_id.property_payment_term.id else: pay_term = False inv = { 'name': order.name, 'origin': order.name, 'type': 'out_invoice', 'reference': "P%dSO%d" % (order.partner_id.id, order.id), 'account_id': a, 'partner_id': order.partner_id.id, 'address_invoice_id': order.partner_invoice_id.id, 'address_contact_id': order.partner_invoice_id.id, 'invoice_line': [(6, 0, lines)], 'currency_id' : order.pricelist_id.currency_id.id, 'comment': order.note, 'payment_term': pay_term, 'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id, } inv_id = self.pool.get('account.invoice').create(cr, uid, inv) return inv_id | 06ae11efd0f151a77ff6935b78493ba7a9d05913 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/06ae11efd0f151a77ff6935b78493ba7a9d05913/sale_line_invoice.py |
||
view_id = self.pool.get('ir.ui.view').search(cr, uid, [('name', '=', 'account.invoice.tree')])[0] | view_id = self.pool.get('ir.ui.view').search(cr, uid, [('name', '=', 'account.invoice.tree')]) | def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False): journal_obj = self.pool.get('account.journal') if context is None: context = {} if context.get('active_model', '') in ['res.partner'] and context.get('active_ids', False) and context['active_ids']: partner = self.pool.get(context['active_model']).read(cr, uid, context['active_ids'], ['supplier','customer'])[0] if not view_type: view_id = self.pool.get('ir.ui.view').search(cr, uid, [('name', '=', 'account.invoice.tree')])[0] view_type = 'tree' if view_type == 'form': if partner['supplier'] and not partner['customer']: view_id = self.pool.get('ir.ui.view').search(cr,uid,[('name', '=', 'account.invoice.supplier.form')])[0] else: view_id = self.pool.get('ir.ui.view').search(cr,uid,[('name', '=', 'account.invoice.form')])[0] res = super(account_invoice,self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu) type = context.get('journal_type', 'sale') for field in res['fields']: if field == 'journal_id': journal_select = journal_obj._name_search(cr, uid, '', [('type', '=', type)], context=context, limit=None, name_get_uid=1) res['fields'][field]['selection'] = journal_select | 45ba775c1a23e5c298f6831fd04bca40ac1e7f50 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/45ba775c1a23e5c298f6831fd04bca40ac1e7f50/invoice.py |
view_id = self.pool.get('ir.ui.view').search(cr,uid,[('name', '=', 'account.invoice.supplier.form')])[0] | view_id = self.pool.get('ir.ui.view').search(cr,uid,[('name', '=', 'account.invoice.supplier.form')]) | def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False): journal_obj = self.pool.get('account.journal') if context is None: context = {} if context.get('active_model', '') in ['res.partner'] and context.get('active_ids', False) and context['active_ids']: partner = self.pool.get(context['active_model']).read(cr, uid, context['active_ids'], ['supplier','customer'])[0] if not view_type: view_id = self.pool.get('ir.ui.view').search(cr, uid, [('name', '=', 'account.invoice.tree')])[0] view_type = 'tree' if view_type == 'form': if partner['supplier'] and not partner['customer']: view_id = self.pool.get('ir.ui.view').search(cr,uid,[('name', '=', 'account.invoice.supplier.form')])[0] else: view_id = self.pool.get('ir.ui.view').search(cr,uid,[('name', '=', 'account.invoice.form')])[0] res = super(account_invoice,self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu) type = context.get('journal_type', 'sale') for field in res['fields']: if field == 'journal_id': journal_select = journal_obj._name_search(cr, uid, '', [('type', '=', type)], context=context, limit=None, name_get_uid=1) res['fields'][field]['selection'] = journal_select | 45ba775c1a23e5c298f6831fd04bca40ac1e7f50 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/45ba775c1a23e5c298f6831fd04bca40ac1e7f50/invoice.py |
view_id = self.pool.get('ir.ui.view').search(cr,uid,[('name', '=', 'account.invoice.form')])[0] | view_id = self.pool.get('ir.ui.view').search(cr,uid,[('name', '=', 'account.invoice.form')]) if view_id and isinstance(view_id, (list, tuple)): view_id = view_id[0] | def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False): journal_obj = self.pool.get('account.journal') if context is None: context = {} if context.get('active_model', '') in ['res.partner'] and context.get('active_ids', False) and context['active_ids']: partner = self.pool.get(context['active_model']).read(cr, uid, context['active_ids'], ['supplier','customer'])[0] if not view_type: view_id = self.pool.get('ir.ui.view').search(cr, uid, [('name', '=', 'account.invoice.tree')])[0] view_type = 'tree' if view_type == 'form': if partner['supplier'] and not partner['customer']: view_id = self.pool.get('ir.ui.view').search(cr,uid,[('name', '=', 'account.invoice.supplier.form')])[0] else: view_id = self.pool.get('ir.ui.view').search(cr,uid,[('name', '=', 'account.invoice.form')])[0] res = super(account_invoice,self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu) type = context.get('journal_type', 'sale') for field in res['fields']: if field == 'journal_id': journal_select = journal_obj._name_search(cr, uid, '', [('type', '=', type)], context=context, limit=None, name_get_uid=1) res['fields'][field]['selection'] = journal_select | 45ba775c1a23e5c298f6831fd04bca40ac1e7f50 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/45ba775c1a23e5c298f6831fd04bca40ac1e7f50/invoice.py |
if not context: | if context is None: | def write(self, cr, uid, ids, vals, context=None, check=True, update_check=True): if not context: context={} if vals.get('account_tax_id', False): raise osv.except_osv(_('Unable to change tax !'), _('You can not change the tax, you should remove and recreate lines !')) | 99d8cd6e3cf40d4aa3daacffdfd574f1c9cf5590 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/99d8cd6e3cf40d4aa3daacffdfd574f1c9cf5590/account_move_line.py |
cr.execute('select id, state, name from account_move where journal_id=%s and period_id=%s order by id limit 1', (context['journal_id'],context['period_id'])) res = cr.fetchone() | res = self._check_moves(cr, uid, context) | def create(self, cr, uid, vals, context=None, check=True): if not context: context={} account_obj = self.pool.get('account.account') tax_obj=self.pool.get('account.tax') if ('account_id' in vals) and not account_obj.read(cr, uid, vals['account_id'], ['active'])['active']: raise osv.except_osv(_('Bad account!'), _('You can not use an inactive account!')) if 'journal_id' in vals and 'journal_id' not in context: context['journal_id'] = vals['journal_id'] if 'period_id' in vals and 'period_id' not in context: context['period_id'] = vals['period_id'] if ('journal_id' not in context) and ('move_id' in vals) and vals['move_id']: m = self.pool.get('account.move').browse(cr, uid, vals['move_id']) context['journal_id'] = m.journal_id.id context['period_id'] = m.period_id.id | 99d8cd6e3cf40d4aa3daacffdfd574f1c9cf5590 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/99d8cd6e3cf40d4aa3daacffdfd574f1c9cf5590/account_move_line.py |
if res[1] != 'draft': raise osv.except_osv(_('UserError'), _('The Ledger Posting (%s) for centralisation ' \ 'has been confirmed!') % res[2]) | def create(self, cr, uid, vals, context=None, check=True): if not context: context={} account_obj = self.pool.get('account.account') tax_obj=self.pool.get('account.tax') if ('account_id' in vals) and not account_obj.read(cr, uid, vals['account_id'], ['active'])['active']: raise osv.except_osv(_('Bad account!'), _('You can not use an inactive account!')) if 'journal_id' in vals and 'journal_id' not in context: context['journal_id'] = vals['journal_id'] if 'period_id' in vals and 'period_id' not in context: context['period_id'] = vals['period_id'] if ('journal_id' not in context) and ('move_id' in vals) and vals['move_id']: m = self.pool.get('account.move').browse(cr, uid, vals['move_id']) context['journal_id'] = m.journal_id.id context['period_id'] = m.period_id.id | 99d8cd6e3cf40d4aa3daacffdfd574f1c9cf5590 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/99d8cd6e3cf40d4aa3daacffdfd574f1c9cf5590/account_move_line.py |
|
journal_list = journal_pool.name_search(cr, uid, '', [], context=context) | type_search = { 'bank':[('type','in',['bank','cash'])], 'cash':[('type','in',['bank','cash'])], 'sale':[('type','in',['sale','purchase_refund'])], 'purchase':[('type','in',['purchase','sale_refund'])], 'expense':[('type','in',['purchase'])], 'sale_refund':[('type','in',['sale','purchase_refund'])], 'purchase_refund':[('type','in',['purchase','sale_refund'])] } domain = type_search.get(context.get('journal_type')) journal_list = journal_pool.name_search(cr, uid, '', domain) | def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False): """ Returns views and fields for current model where view will depend on {view_type}. @param view_id: list of fields, which required to read signatures @param view_type: defines a view type. it can be one of (form, tree, graph, calender, gantt, search, mdx) @param context: context arguments, like lang, time zone @param toolbar: contains a list of reports, wizards, and links related to current model @return: Returns a dict that contains definition for fields, views, and toolbars """ data_pool = self.pool.get('ir.model.data') journal_pool = self.pool.get('account.journal') voucher_type = { 'sale':'view_sale_receipt_form', 'purchase':'view_purchase_receipt_form', 'payment':'view_vendor_payment_form', 'receipt':'view_vendor_receipt_form' } if view_type == 'form': tview = voucher_type.get(context.get('type')) tview = tview or 'view_voucher_form' result = data_pool._get_id(cr, uid, 'account_voucher', tview) view_id = data_pool.browse(cr, uid, result, context=context).res_id res = super(account_voucher, self).fields_view_get(cr, uid, view_id, view_type, context, toolbar, submenu) #Restrict the list of journal view in search view if view_type == 'search': journal_list = journal_pool.name_search(cr, uid, '', [], context=context) res['fields']['journal_id']['selection'] = journal_list return res | 0c89d09d4b03c0053ea6076312a22e79de022cc8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/0c89d09d4b03c0053ea6076312a22e79de022cc8/voucher.py |
def _pricelist_type_get(self, cr, uid, context={}): pricelist_type_obj = self.pool.get('product.pricelist.type') pricelist_type_ids = pricelist_type_obj.search(cr, uid, [], order='name') pricelist_types = pricelist_type_obj.read(cr, uid, pricelist_type_ids, ['key','name'], context=context) res = [] for type in pricelist_types: res.append((type['key'],type['name'])) return res | 57cc12f1e87f05028d38149bf0ffc10fd154f806 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/57cc12f1e87f05028d38149bf0ffc10fd154f806/pricelist.py |
||
def _pricelist_type_get(self, cr, uid, context={}): pricelist_type_obj = self.pool.get('product.pricelist.type') pricelist_type_ids = pricelist_type_obj.search(cr, uid, [], order='name') pricelist_types = pricelist_type_obj.read(cr, uid, pricelist_type_ids, ['key','name'], context=context) res = [] for type in pricelist_types: res.append((type['key'],type['name'])) return res | 57cc12f1e87f05028d38149bf0ffc10fd154f806 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/57cc12f1e87f05028d38149bf0ffc10fd154f806/pricelist.py |
||
def _pricelist_type_get(self, cr, uid, context={}): pricelist_type_obj = self.pool.get('product.pricelist.type') pricelist_type_ids = pricelist_type_obj.search(cr, uid, [], order='name') pricelist_types = pricelist_type_obj.read(cr, uid, pricelist_type_ids, ['key','name'], context=context) res = [] for type in pricelist_types: res.append((type['key'],type['name'])) return res | 57cc12f1e87f05028d38149bf0ffc10fd154f806 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/57cc12f1e87f05028d38149bf0ffc10fd154f806/pricelist.py |
||
def _pricelist_type_get(self, cr, uid, context={}): pricelist_type_obj = self.pool.get('product.pricelist.type') pricelist_type_ids = pricelist_type_obj.search(cr, uid, [], order='name') pricelist_types = pricelist_type_obj.read(cr, uid, pricelist_type_ids, ['key','name'], context=context) res = [] for type in pricelist_types: res.append((type['key'],type['name'])) return res | 57cc12f1e87f05028d38149bf0ffc10fd154f806 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/57cc12f1e87f05028d38149bf0ffc10fd154f806/pricelist.py |
||
def name_get(self, cr, uid, ids, context={}): result= [] if not all(ids): return result for pl in self.browse(cr, uid, ids, context): name = pl.name + ' ('+ pl.currency_id.name + ')' result.append((pl.id,name)) return result | 57cc12f1e87f05028d38149bf0ffc10fd154f806 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/57cc12f1e87f05028d38149bf0ffc10fd154f806/pricelist.py |
||
where = [('name', '=', partner) ] | where = [('name', '=', partner) ] | def price_get(self, cr, uid, ids, prod_id, qty, partner=None, context=None): ''' context = { 'uom': Unit of Measure (int), 'partner': Partner ID (int), 'date': Date of the pricelist (%Y-%m-%d), } ''' context = context or {} currency_obj = self.pool.get('res.currency') product_obj = self.pool.get('product.product') supplierinfo_obj = self.pool.get('product.supplierinfo') price_type_obj = self.pool.get('product.price.type') | 57cc12f1e87f05028d38149bf0ffc10fd154f806 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/57cc12f1e87f05028d38149bf0ffc10fd154f806/pricelist.py |
price_type.field)[prod_id], round=False) | price_type.field, context=context)[prod_id], round=False, context=context) | def price_get(self, cr, uid, ids, prod_id, qty, partner=None, context=None): ''' context = { 'uom': Unit of Measure (int), 'partner': Partner ID (int), 'date': Date of the pricelist (%Y-%m-%d), } ''' context = context or {} currency_obj = self.pool.get('res.currency') product_obj = self.pool.get('product.product') supplierinfo_obj = self.pool.get('product.supplierinfo') price_type_obj = self.pool.get('product.price.type') | 57cc12f1e87f05028d38149bf0ffc10fd154f806 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/57cc12f1e87f05028d38149bf0ffc10fd154f806/pricelist.py |
result[id] = price | result[id] = price | def price_get(self, cr, uid, ids, prod_id, qty, partner=None, context=None): ''' context = { 'uom': Unit of Measure (int), 'partner': Partner ID (int), 'date': Date of the pricelist (%Y-%m-%d), } ''' context = context or {} currency_obj = self.pool.get('res.currency') product_obj = self.pool.get('product.product') supplierinfo_obj = self.pool.get('product.supplierinfo') price_type_obj = self.pool.get('product.price.type') | 57cc12f1e87f05028d38149bf0ffc10fd154f806 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/57cc12f1e87f05028d38149bf0ffc10fd154f806/pricelist.py |
uid, uom.id, result[id], context['uom']) | uid, uom.id, result[id], context['uom']) | def price_get(self, cr, uid, ids, prod_id, qty, partner=None, context=None): ''' context = { 'uom': Unit of Measure (int), 'partner': Partner ID (int), 'date': Date of the pricelist (%Y-%m-%d), } ''' context = context or {} currency_obj = self.pool.get('res.currency') product_obj = self.pool.get('product.product') supplierinfo_obj = self.pool.get('product.supplierinfo') price_type_obj = self.pool.get('product.price.type') | 57cc12f1e87f05028d38149bf0ffc10fd154f806 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/57cc12f1e87f05028d38149bf0ffc10fd154f806/pricelist.py |
def _price_field_get(self, cr, uid, context={}): pt = self.pool.get('product.price.type') ids = pt.search(cr, uid, [], context=context) result = [] for line in pt.browse(cr, uid, ids, context=context): result.append((line.id, line.name)) | 57cc12f1e87f05028d38149bf0ffc10fd154f806 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/57cc12f1e87f05028d38149bf0ffc10fd154f806/pricelist.py |
||
return False | return False | def _check_recursion(self, cr, uid, ids): for obj_list in self.browse(cr, uid, ids): if obj_list.base == -1: main_pricelist = obj_list.price_version_id.pricelist_id.id other_pricelist = obj_list.base_pricelist_id.id if main_pricelist == other_pricelist: return False return True | 57cc12f1e87f05028d38149bf0ffc10fd154f806 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/57cc12f1e87f05028d38149bf0ffc10fd154f806/pricelist.py |
def _check_recursion(self, cr, uid, ids): for obj_list in self.browse(cr, uid, ids): if obj_list.base == -1: main_pricelist = obj_list.price_version_id.pricelist_id.id other_pricelist = obj_list.base_pricelist_id.id if main_pricelist == other_pricelist: return False return True | 57cc12f1e87f05028d38149bf0ffc10fd154f806 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/57cc12f1e87f05028d38149bf0ffc10fd154f806/pricelist.py |
||
def _check_recursion(self, cr, uid, ids): for obj_list in self.browse(cr, uid, ids): if obj_list.base == -1: main_pricelist = obj_list.price_version_id.pricelist_id.id other_pricelist = obj_list.base_pricelist_id.id if main_pricelist == other_pricelist: return False return True | 57cc12f1e87f05028d38149bf0ffc10fd154f806 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/57cc12f1e87f05028d38149bf0ffc10fd154f806/pricelist.py |
||
def _check_recursion(self, cr, uid, ids): for obj_list in self.browse(cr, uid, ids): if obj_list.base == -1: main_pricelist = obj_list.price_version_id.pricelist_id.id other_pricelist = obj_list.base_pricelist_id.id if main_pricelist == other_pricelist: return False return True | 57cc12f1e87f05028d38149bf0ffc10fd154f806 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/57cc12f1e87f05028d38149bf0ffc10fd154f806/pricelist.py |
||
'bank_account': fields.char('Bank Account', size=64), 'partner_id': fields.related('company_id', 'partner_id', type='many2one', relation='res.partner', readonly=True), | def job_open(self, cr, uid, ids, *args): self.write(cr, uid, ids, {'state': 'open'}) return True | 5d7b8578290c4bc7b3f915d6b68dd31b7e4cd61c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/5d7b8578290c4bc7b3f915d6b68dd31b7e4cd61c/hr.py |
|
res = self.onchange_chart_id(cr, uid, chart_id, context=context) | res = self.onchange_chart_id(cr, uid, [], chart_id, context=context) | def _get_def_reserve_account(self, cr, uid, context=None): chart_id = self._get_account(cr, uid, context=context) # Reuse the onchange function, for symmetry res = self.onchange_chart_id(cr, uid, chart_id, context=context) if not res: return False return res['value']['reserve_account_id'] | 2210c4cd4ecec951c7287a3b2ab87bda7c748855 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/2210c4cd4ecec951c7287a3b2ab87bda7c748855/account_report_balance_sheet.py |
def onchange_chart_id(self, cr, uid, chart_id, context=None): | def onchange_chart_id(self, cr, uid, ids, chart_id, context=None): | def onchange_chart_id(self, cr, uid, chart_id, context=None): if not chart_id: return False account = self.pool.get('account.account').browse(cr, uid, chart_id , context=context) if not account.company_id.property_reserve_and_surplus_account: return False # We cannot raise an exception, because that's before the wizard return { 'value': {'reserve_account_id': account.company_id.property_reserve_and_surplus_account.id}} | 2210c4cd4ecec951c7287a3b2ab87bda7c748855 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/2210c4cd4ecec951c7287a3b2ab87bda7c748855/account_report_balance_sheet.py |
return False | return {} | def onchange_chart_id(self, cr, uid, chart_id, context=None): if not chart_id: return False account = self.pool.get('account.account').browse(cr, uid, chart_id , context=context) if not account.company_id.property_reserve_and_surplus_account: return False # We cannot raise an exception, because that's before the wizard return { 'value': {'reserve_account_id': account.company_id.property_reserve_and_surplus_account.id}} | 2210c4cd4ecec951c7287a3b2ab87bda7c748855 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/2210c4cd4ecec951c7287a3b2ab87bda7c748855/account_report_balance_sheet.py |
return False | return { 'value': {'reserve_account_id': False}} | def onchange_chart_id(self, cr, uid, chart_id, context=None): if not chart_id: return False account = self.pool.get('account.account').browse(cr, uid, chart_id , context=context) if not account.company_id.property_reserve_and_surplus_account: return False # We cannot raise an exception, because that's before the wizard return { 'value': {'reserve_account_id': account.company_id.property_reserve_and_surplus_account.id}} | 2210c4cd4ecec951c7287a3b2ab87bda7c748855 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/2210c4cd4ecec951c7287a3b2ab87bda7c748855/account_report_balance_sheet.py |
def _print_report(self, cr, uid, ids, data, context=None): if context is None: context = {} data['form'].update(self.read(cr, uid, ids, ['display_type','reserve_account_id'])[0]) if not data['form']['reserve_account_id']: # only in < v6.1, where orm_memory does not honor required fields raise osv.except_osv(_('Warning'),_('Please define the Reserve and Profit/Loss account for current user company !')) data = self.pre_print_report(cr, uid, ids, data, context=context) if data['form']['display_type']: return { 'type': 'ir.actions.report.xml', 'report_name': 'account.balancesheet.horizontal', 'datas': data, } else: return { 'type': 'ir.actions.report.xml', 'report_name': 'account.balancesheet', 'datas': data, } | 2210c4cd4ecec951c7287a3b2ab87bda7c748855 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/2210c4cd4ecec951c7287a3b2ab87bda7c748855/account_report_balance_sheet.py |
||
cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL REFERENCES "%s" ON DELETE CASCADE, "%s" INTEGER NOT NULL REFERENCES "%s" ON DELETE CASCADE) WITH OIDS' % (f._rel, f._id1, self._table, f._id2, ref)) | cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL REFERENCES "%s" ON DELETE CASCADE, "%s" INTEGER NOT NULL REFERENCES "%s" ON DELETE CASCADE, UNIQUE("%s","%s")) WITH OIDS' % (f._rel, f._id1, self._table, f._id2, ref, f._id1, f._id2)) | def _auto_init(self, cr, context=None): if context is None: context = {} store_compute = False create = False todo_end = [] self._field_create(cr, context=context) if getattr(self, '_auto', True): cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,)) if not cr.rowcount: cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id)) WITHOUT OIDS' % (self._table,)) cr.execute("COMMENT ON TABLE \"%s\" IS '%s'" % (self._table, self._description.replace("'", "''"))) create = True self.__schema.debug("Table '%s': created", self._table) | 456579b198cd0ec544c90c36226308d298ea93fa /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/456579b198cd0ec544c90c36226308d298ea93fa/orm.py |
progress = len(update_docs)/len(proc_change.process_document_ids) | if proc_change.process_document_ids: progress = len(update_docs)/len(proc_change.process_document_ids) | def _get_progress(self, cr, uid, ids, field_name, arg, context={}): result = {} update_docs = [] for proc_change in self.browse(cr, uid, ids): for doc in proc_change.process_document_ids: if doc.state in ('to_update', 'change_propose'): update_docs.append(doc) progress = len(update_docs)/len(proc_change.process_document_ids) result[proc_change.id] = progress return result | 267cd84032e2ebe45d3c64a8cdac5fcff88dd6ba /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/267cd84032e2ebe45d3c64a8cdac5fcff88dd6ba/document_change.py |
phase_type_obj = self.pool.get('document.change.process.phase.type') document_type_obj = self.pool.get('document.change.type') | def generate_phases(self, cr, uid, ids, *args): phase_obj = self.pool.get('document.change.process.phase') phase_type_obj = self.pool.get('document.change.process.phase.type') document_type_obj = self.pool.get('document.change.type') directory_obj = self.pool.get('document.directory') document_obj = self.pool.get('ir.attachment') new_doc_ids = [] for process in self.browse(cr, uid, ids): if process.process_model_id: directory_ids = directory_obj.search(cr, uid, [('parent_id','child_of',process.structure_id and process.structure_id.id)]) for phase_type_id in process.process_model_id.phase_type_ids: phase_value = { 'name' : '%s-%s' %(phase_type_id.name, process.name), 'phase_type_id': phase_type_id.id, 'process_id': process.id } phase_id = phase_obj.create(cr, uid, phase_value) cr.execute('select document_type_id from document_type_phase_type_rel where phase_type_id = %s' % phase_type_id.id) document_type_ids = map(lambda x: x[0], cr.fetchall()) document_ids = document_obj.search(cr, uid, [ ('parent_id','in',directory_ids), ('change_type_id','in',document_type_ids)]) for document_id in document_ids: vals = {'process_phase_id': phase_id} if process.pending_directory_id: vals.update({'parent_id':process.pending_directory_id.id}) new_doc_ids.append(document_obj.copy(cr, uid, document_id, vals)) phase_obj.write(cr, uid, [phase_id], {'phase_document_ids': [(6,0,document_ids)]}) self.write(cr, uid, [process.id],{'process_document_ids': [(6,0,new_doc_ids)]}) | 267cd84032e2ebe45d3c64a8cdac5fcff88dd6ba /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7397/267cd84032e2ebe45d3c64a8cdac5fcff88dd6ba/document_change.py |
|
event.add_callback(self.on_playback_start, 'playback_player_start') event.add_callback(self.on_playback_end, 'playback_player_end') | event.add_callback(self.on_playback_start, 'playback_track_start') event.add_callback(self.on_playback_end, 'playback_track_end') | def _bind_events(self): event.add_callback(self.on_playback_start, 'playback_player_start') event.add_callback(self.on_playback_end, 'playback_player_end') event.add_callback(self.on_playback_toggle_pause, 'playback_toggle_pause') | e3ae35e70b0900f4e988ae66583b29655a154705 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13807/e3ae35e70b0900f4e988ae66583b29655a154705/mpris2.py |
@dbus.service.method(dbus.PROPERTIES_IFACE, out_signature='a{sv}') | def Rate(self): pass | e3ae35e70b0900f4e988ae66583b29655a154705 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13807/e3ae35e70b0900f4e988ae66583b29655a154705/mpris2.py |
|
return True | track = self.exaile.player.current playlist = self.exaile.queue.current_playlist return not ((len(playlist)-1) == playlist.index(track)) | def CanGoNext(self): return True | e3ae35e70b0900f4e988ae66583b29655a154705 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13807/e3ae35e70b0900f4e988ae66583b29655a154705/mpris2.py |
return True | return not (playlist.index(track) == 0) | def CanGoPrevious(self): return True | e3ae35e70b0900f4e988ae66583b29655a154705 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13807/e3ae35e70b0900f4e988ae66583b29655a154705/mpris2.py |
return True | return not self.exaile.player.is_playing() | def CanPlay(self): return True | e3ae35e70b0900f4e988ae66583b29655a154705 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13807/e3ae35e70b0900f4e988ae66583b29655a154705/mpris2.py |
return True | return self.exaile.player.is_playing() | def CanPause(self): return True | e3ae35e70b0900f4e988ae66583b29655a154705 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13807/e3ae35e70b0900f4e988ae66583b29655a154705/mpris2.py |
props['CanPause'] = self.CanPause() props['CanPlay'] = self.CanPlay() | def on_playback_start(self, evt, exaile, data): props = {} props['PlaybackStatus'] = self.PlaybackStatus() props['Metadata'] = self.Metadata() props['CanGoNext'] = self.CanGoNext() props['CanGoPrevious'] = self.CanGoPrevious() props['CanPause'] = self.CanPause() props['CanPlay'] = self.CanPlay() self.PropertiesChanged(ORG_MPRIS_MEDIAPLAYER2_PLAYER, props, []) | 195bf164ce62bbe50cb63bbc2502e2be180118d8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13807/195bf164ce62bbe50cb63bbc2502e2be180118d8/mpris2.py |
|
props['Metadata'] = self.Metadata() props['CanPause'] = self.CanPause() props['CanPlay'] = self.CanPlay() | def on_playback_end(self, evt, exaile, data): props = {} props['Metadata'] = self.Metadata() props['CanPause'] = self.CanPause() props['CanPlay'] = self.CanPlay() props['PlaybackStatus'] = self.PlaybackStatus() self.PropertiesChanged(ORG_MPRIS_MEDIAPLAYER2_PLAYER, props, []) | 195bf164ce62bbe50cb63bbc2502e2be180118d8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13807/195bf164ce62bbe50cb63bbc2502e2be180118d8/mpris2.py |
|
props['CanPause'] = self.CanPause() props['CanPlay'] = self.CanPlay() | def on_playback_toggle_pause(self, evt, exaile, data): props = {} props['PlaybackStatus'] = self.PlaybackStatus() props['CanPause'] = self.CanPause() props['CanPlay'] = self.CanPlay() self.PropertiesChanged(ORG_MPRIS_MEDIAPLAYER2_PLAYER, props, []) | 195bf164ce62bbe50cb63bbc2502e2be180118d8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13807/195bf164ce62bbe50cb63bbc2502e2be180118d8/mpris2.py |
|
meta['mpris:length'] = dbus.types.Int64(int(track.get_tag_raw('__length'))*1000) | meta['mpris:length'] = dbus.types.Int64(int(track.get_tag_raw('__length') or 0)*1000) | def _get_metadata(self, track): ## mpris2.0 meta map, defined at http://xmms2.org/wiki/MPRIS_Metadata meta = {} meta['xesam:title'] = unicode(track.get_tag_raw('title')[0]) meta['xesam:album'] = unicode(track.get_tag_raw('album')[0]) meta['xesam:artist'] = dbus.types.Array([unicode(track.get_tag_raw('artist')[0])], signature='s') meta['mpris:length'] = dbus.types.Int64(int(track.get_tag_raw('__length'))*1000) | 195bf164ce62bbe50cb63bbc2502e2be180118d8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13807/195bf164ce62bbe50cb63bbc2502e2be180118d8/mpris2.py |
def load(self): stream = file('config', 'rU') d = yaml.load(stream) for key in d.keys(): setattr(self, key, d[key]) | def load(self): try: stream = file('config', 'rU') except: pass else: d = yaml.load(stream) for key in d.keys(): setattr(self, key, d[key]) | def load(self): stream = file('config', 'rU') d = yaml.load(stream) for key in d.keys(): setattr(self, key, d[key]) | f3cfc15e8d12898114520383eba2682a940f12d3 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11761/f3cfc15e8d12898114520383eba2682a940f12d3/main.py |
setattr(self.loadobj, field, self.fields[field].get()) | fld = self.fields[field].get() try: fld = eval(fld) except: pass setattr(self.loadobj, field, fld) | def saveFile(self, *args): print os.path.join(self.config.path, self.loaded) for field in self.fields: if hasattr(self.loadobj, field): setattr(self.loadobj, field, self.fields[field].get()) fileObj = open(os.path.join(self.config.path, self.loaded),"w") fileObj.write(self.loadobj.dump()) fileObj.close() | f3cfc15e8d12898114520383eba2682a940f12d3 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11761/f3cfc15e8d12898114520383eba2682a940f12d3/main.py |
elif y > 1.0 - prob_fix(p, N, t): | elif y > 1.0 - prob_fix_leg(leg_r, N, t): | def sample_freq_CDF(p, N, t): """ Takes an allele frequency p, a population size N, and a time period t. Samples from the CDF derived from Kimura to get a new allele frequency. N.B.: The current version fails sometimes (on some N, t pairs), presumably due to errors in freq_CDF_leg. These need to be fixed. """ import scipy.optimize #, random y = random.random() leg_r = legendre(1.0-2*p) extinction = prob_fix(1.0-p, N, t) # probability of allele extinction if y < extinction: return 0.0 # sample an extinction event elif y > 1.0 - prob_fix(p, N, t): return 1.0 # sample a fixation event else: def f(T): return freq_CDF_legs_noends(leg_r, legendre(1.0-2*T), N, t) \ - y + extinction # trims extinction probability, assures brentq works return scipy.optimize.brentq(f, 0.0, 1.0, disp=False) | 0db82e4177d48ff2c1f56d530cbce00e84d4bdea /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8482/0db82e4177d48ff2c1f56d530cbce00e84d4bdea/coal.py |
def prob_fix(p, n, t, k=100, esp=0.000001): | def prob_fix(p, n, t, k=50, esp=0.000001): | def prob_fix(p, n, t, k=100, esp=0.000001): """Probability of fixation""" r = 1 - 2*p leg = legendre(r) prob = p for i in xrange(1, k+1): term = (.5 * (-1)**i * (leg(i-1) - leg(i+1)) * exp(-t * i * (i+1) / (4 * n))) if term != 0.0 and abs(term) < esp: return prob + term prob += term return prob | 0db82e4177d48ff2c1f56d530cbce00e84d4bdea /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8482/0db82e4177d48ff2c1f56d530cbce00e84d4bdea/coal.py |
tree2.writeNewick(filename) | tree2.write(filename) | def write_in_tree(filename, tree, labels): tree2 = tree.copy() rename_tree_with_ids(tree2, labels) for node in tree2.nodes.values(): node.dist = 0 tree2.writeNewick(filename) | fbcef0514ac80732a481ca1ada1150cfe23ed533 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8482/fbcef0514ac80732a481ca1ada1150cfe23ed533/phylip.py |
proposal = self.proposer.next_proposal() | def recon(self, nsearch=1000): """Perform reconciliation""" self.init_search() for i in xrange(nsearch): print "search", i proposal = self.proposer.next_proposal() p = self.eval_proposal(proposal) self.eval_search(p, proposal) # rename locus tree nodes rename_nodes(self.maxrecon["locus_tree"], self.name_internal) return self.maxp, self.maxrecon | f97e996acfdbe702a5ae09cef4cb25391e05ee64 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8482/f97e996acfdbe702a5ae09cef4cb25391e05ee64/dlcoal.py |
|
return dl_prob + d_prob + util.safelog(prob / nsamples, -util.INF) | return dl_prob + d_prob + util.safelog(prob / nsamples) | def prob_dlcoal_recon_topology(coal_tree, coal_recon, locus_tree, locus_recon, locus_events, daughters, stree, n, duprate, lossrate, pretime=None, premean=None, maxdoom=20, nsamples=100, add_spec=True): """ Probability of a reconcile gene tree in the DLCoal model. coal_tree -- coalescent tree coal_recon -- reconciliation of coalescent tree to locus tree locus_tree -- locus tree (has dup-loss) locus_recon -- reconciliation of locus tree to species tree locus_events -- events dict for locus tree stree -- species tree n -- population sizes in species tree duprate -- duplication rate lossrate -- loss rate You must also specify one of the following pretime -- starting time before species tree premean -- mean starting time before species tree Note: locus tree must have implied speciation nodes present """ dups = phylo.count_dup(locus_tree, locus_events) # ensure implicit speciations are present if add_spec: phylo.add_implied_spec_nodes(locus_tree, stree, locus_recon, locus_events) # init popsizes for locus tree stree_popsizes = coal.init_popsizes(stree, n) popsizes = {} for node in locus_tree: popsizes[node.name] = stree_popsizes[locus_recon[node].name] # duploss probability util.tic("top") dl_prob = spidir.calc_birth_death_prior(locus_tree, stree, locus_recon, duprate, lossrate, maxdoom=maxdoom) util.toc() # daughters probability d_prob = dups * log(.5) # integrate over duplication times using sampling prob = 0.0 #util.tic("int") for i in xrange(nsamples): # sample duplication times locus_times = spidir.topology_prior.sample_dup_times( locus_tree, stree, locus_recon, duprate, lossrate, pretime, premean, events=locus_events) assert len(locus_times) == len(locus_tree.nodes), ( len(locus_times), len(locus_tree.nodes)) birthdeath.set_dists_from_timestamps(locus_tree, locus_times) # coal topology probability coal_prob = prob_coal_recon_topology(coal_tree, coal_recon, locus_tree, popsizes, daughters) prob += exp(coal_prob) print coal_prob #util.toc() return dl_prob + d_prob + util.safelog(prob / nsamples, -util.INF) | f97e996acfdbe702a5ae09cef4cb25391e05ee64 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8482/f97e996acfdbe702a5ae09cef4cb25391e05ee64/dlcoal.py |
popsizes[snode.name]), -util.INF) | popsizes[snode.name])) | def prob_coal_recon_topology(tree, recon, locus_tree, n, daughters): """ Returns the log probability of a reconciled gene tree ('tree', 'recon') from the coalescent model given a locus_tree 'locus_tree', population sizes 'n', and daughters set 'daughters' """ # init population sizes popsizes = coal.init_popsizes(locus_tree, n) # log probability lnp = 0.0 nodes = set(tree.postorder()) # init reverse reconciliation rev_recon = {} for node, snode in recon.iteritems(): if node not in nodes: raise Exception("node '%s' not in tree" % node.name) rev_recon.setdefault(snode, []).append(node) # init lineage counts lineages = {} for snode in locus_tree: if snode.is_leaf(): lineages[snode] = len([x for x in rev_recon[snode] if x.is_leaf()]) else: lineages[snode] = 0 # iterate through species tree branches for snode in locus_tree.postorder(): if snode.parent: # non root branch u = lineages[snode] # subtract number of coals in branch v = u - len([x for x in rev_recon.get(snode, []) if not x.is_leaf()]) lineages[snode.parent] += v if snode not in daughters: try: lnp += util.safelog( coal.prob_coal_counts(u, v, snode.dist, popsizes[snode.name]), -util.INF) except: print u, v, snode.dist, popsizes[snode.name] raise else: assert v == 1 lnp -= util.safelog(coal.num_labeled_histories(u, v), -util.INF) else: # normal coalesent u = lineages[snode] lnp -= util.safelog(coal.num_labeled_histories(u, 1), -util.INF) # correct for topologies H(T) # find connected subtrees that are in the same species branch subtrees = [] subtree_root = {} for node in tree.preorder(): if node.parent and recon[node] == recon[node.parent]: subtree_root[node] = subtree_root[node.parent] else: subtrees.append(node) subtree_root[node] = node # find leaves through recursion def walk(node, subtree, leaves): if node.is_leaf(): leaves.append(node) elif (subtree_root[node.children[0]] != subtree and subtree_root[node.children[1]] != subtree): leaves.append(node) else: for child in node.children: walk(child, subtree, leaves) # apply correction for each subtree for subtree in subtrees: leaves = [] for child in subtree.children: walk(subtree, subtree, leaves) if len(leaves) > 2: lnp += util.safelog( birthdeath.num_topology_histories(subtree, leaves), -util.INF) return lnp | f97e996acfdbe702a5ae09cef4cb25391e05ee64 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8482/f97e996acfdbe702a5ae09cef4cb25391e05ee64/dlcoal.py |
lnp -= util.safelog(coal.num_labeled_histories(u, v), -util.INF) | lnp -= util.safelog(coal.num_labeled_histories(u, v)) | def prob_coal_recon_topology(tree, recon, locus_tree, n, daughters): """ Returns the log probability of a reconciled gene tree ('tree', 'recon') from the coalescent model given a locus_tree 'locus_tree', population sizes 'n', and daughters set 'daughters' """ # init population sizes popsizes = coal.init_popsizes(locus_tree, n) # log probability lnp = 0.0 nodes = set(tree.postorder()) # init reverse reconciliation rev_recon = {} for node, snode in recon.iteritems(): if node not in nodes: raise Exception("node '%s' not in tree" % node.name) rev_recon.setdefault(snode, []).append(node) # init lineage counts lineages = {} for snode in locus_tree: if snode.is_leaf(): lineages[snode] = len([x for x in rev_recon[snode] if x.is_leaf()]) else: lineages[snode] = 0 # iterate through species tree branches for snode in locus_tree.postorder(): if snode.parent: # non root branch u = lineages[snode] # subtract number of coals in branch v = u - len([x for x in rev_recon.get(snode, []) if not x.is_leaf()]) lineages[snode.parent] += v if snode not in daughters: try: lnp += util.safelog( coal.prob_coal_counts(u, v, snode.dist, popsizes[snode.name]), -util.INF) except: print u, v, snode.dist, popsizes[snode.name] raise else: assert v == 1 lnp -= util.safelog(coal.num_labeled_histories(u, v), -util.INF) else: # normal coalesent u = lineages[snode] lnp -= util.safelog(coal.num_labeled_histories(u, 1), -util.INF) # correct for topologies H(T) # find connected subtrees that are in the same species branch subtrees = [] subtree_root = {} for node in tree.preorder(): if node.parent and recon[node] == recon[node.parent]: subtree_root[node] = subtree_root[node.parent] else: subtrees.append(node) subtree_root[node] = node # find leaves through recursion def walk(node, subtree, leaves): if node.is_leaf(): leaves.append(node) elif (subtree_root[node.children[0]] != subtree and subtree_root[node.children[1]] != subtree): leaves.append(node) else: for child in node.children: walk(child, subtree, leaves) # apply correction for each subtree for subtree in subtrees: leaves = [] for child in subtree.children: walk(subtree, subtree, leaves) if len(leaves) > 2: lnp += util.safelog( birthdeath.num_topology_histories(subtree, leaves), -util.INF) return lnp | f97e996acfdbe702a5ae09cef4cb25391e05ee64 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8482/f97e996acfdbe702a5ae09cef4cb25391e05ee64/dlcoal.py |
lnp -= util.safelog(coal.num_labeled_histories(u, 1), -util.INF) | lnp -= util.safelog(coal.num_labeled_histories(u, 1)) | def prob_coal_recon_topology(tree, recon, locus_tree, n, daughters): """ Returns the log probability of a reconciled gene tree ('tree', 'recon') from the coalescent model given a locus_tree 'locus_tree', population sizes 'n', and daughters set 'daughters' """ # init population sizes popsizes = coal.init_popsizes(locus_tree, n) # log probability lnp = 0.0 nodes = set(tree.postorder()) # init reverse reconciliation rev_recon = {} for node, snode in recon.iteritems(): if node not in nodes: raise Exception("node '%s' not in tree" % node.name) rev_recon.setdefault(snode, []).append(node) # init lineage counts lineages = {} for snode in locus_tree: if snode.is_leaf(): lineages[snode] = len([x for x in rev_recon[snode] if x.is_leaf()]) else: lineages[snode] = 0 # iterate through species tree branches for snode in locus_tree.postorder(): if snode.parent: # non root branch u = lineages[snode] # subtract number of coals in branch v = u - len([x for x in rev_recon.get(snode, []) if not x.is_leaf()]) lineages[snode.parent] += v if snode not in daughters: try: lnp += util.safelog( coal.prob_coal_counts(u, v, snode.dist, popsizes[snode.name]), -util.INF) except: print u, v, snode.dist, popsizes[snode.name] raise else: assert v == 1 lnp -= util.safelog(coal.num_labeled_histories(u, v), -util.INF) else: # normal coalesent u = lineages[snode] lnp -= util.safelog(coal.num_labeled_histories(u, 1), -util.INF) # correct for topologies H(T) # find connected subtrees that are in the same species branch subtrees = [] subtree_root = {} for node in tree.preorder(): if node.parent and recon[node] == recon[node.parent]: subtree_root[node] = subtree_root[node.parent] else: subtrees.append(node) subtree_root[node] = node # find leaves through recursion def walk(node, subtree, leaves): if node.is_leaf(): leaves.append(node) elif (subtree_root[node.children[0]] != subtree and subtree_root[node.children[1]] != subtree): leaves.append(node) else: for child in node.children: walk(child, subtree, leaves) # apply correction for each subtree for subtree in subtrees: leaves = [] for child in subtree.children: walk(subtree, subtree, leaves) if len(leaves) > 2: lnp += util.safelog( birthdeath.num_topology_histories(subtree, leaves), -util.INF) return lnp | f97e996acfdbe702a5ae09cef4cb25391e05ee64 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8482/f97e996acfdbe702a5ae09cef4cb25391e05ee64/dlcoal.py |
birthdeath.num_topology_histories(subtree, leaves), -util.INF) | birthdeath.num_topology_histories(subtree, leaves)) | def walk(node, subtree, leaves): if node.is_leaf(): leaves.append(node) elif (subtree_root[node.children[0]] != subtree and subtree_root[node.children[1]] != subtree): leaves.append(node) else: for child in node.children: walk(child, subtree, leaves) | f97e996acfdbe702a5ae09cef4cb25391e05ee64 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8482/f97e996acfdbe702a5ae09cef4cb25391e05ee64/dlcoal.py |
optarg_data = cmd_dict[cmd] handler = commands.CommandHandler(argv[2:], optarg_data) | run_cmd(cmd, argv[2:]) | def start_cmd(argv): cmd_arg = '' if len(argv) > 1: cmd_arg = argv[1] # change common help args to help command if cmd_arg in ('--help', '-h', '--usage', '-u', '/?'): cmd_arg = 'usage' completions = complete_command(cmd_arg) if cmd_arg and len(completions) > 0: if len(completions) == 1: # get the only completion (since in this case we have 1) cmd = completions[0] # build up the first part of the map (for illustrative purposes) cmd_map = list() if cmd_arg != cmd: cmd_map.append(cmd_arg) cmd_map.append(cmd) # map an alias to the command, and build up the map if cmd in cmd_alias_dict.keys(): alias = cmd if cmd_arg == cmd: cmd_map.append(alias) cmd = cmd_alias_dict[cmd] cmd_map.append(cmd) # show command map to avoid confusion if len(cmd_map) != 0: print 'Mapping command: %s' % ' -> '.join(cmd_map) # pass args and optarg data to command handler, which figures out # how to handle the arguments optarg_data = cmd_dict[cmd] handler = commands.CommandHandler(argv[2:], optarg_data) # use reflection to get the function pointer cmd_func = getattr(handler, cmd) cmd_func() return 0 else: print ( 'Command `%s` too ambiguous, ' 'could mean any of: %s' ) % (cmd_arg, ', '.join(completions)) else: if len(argv) == 1: print 'No command specified, showing usage.\n' else: print 'Command not recognised: %s\n' % cmd_arg commands.usage(argv[2:]) # generic error code if not returned sooner return 1 | f91beeacf06ee26c7f93ba881ffc708acb9b6bcf /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10355/f91beeacf06ee26c7f93ba881ffc708acb9b6bcf/hm.py |
cmd_func = getattr(handler, cmd) cmd_func() | def start_cmd(argv): cmd_arg = '' if len(argv) > 1: cmd_arg = argv[1] # change common help args to help command if cmd_arg in ('--help', '-h', '--usage', '-u', '/?'): cmd_arg = 'usage' completions = complete_command(cmd_arg) if cmd_arg and len(completions) > 0: if len(completions) == 1: # get the only completion (since in this case we have 1) cmd = completions[0] # build up the first part of the map (for illustrative purposes) cmd_map = list() if cmd_arg != cmd: cmd_map.append(cmd_arg) cmd_map.append(cmd) # map an alias to the command, and build up the map if cmd in cmd_alias_dict.keys(): alias = cmd if cmd_arg == cmd: cmd_map.append(alias) cmd = cmd_alias_dict[cmd] cmd_map.append(cmd) # show command map to avoid confusion if len(cmd_map) != 0: print 'Mapping command: %s' % ' -> '.join(cmd_map) # pass args and optarg data to command handler, which figures out # how to handle the arguments optarg_data = cmd_dict[cmd] handler = commands.CommandHandler(argv[2:], optarg_data) # use reflection to get the function pointer cmd_func = getattr(handler, cmd) cmd_func() return 0 else: print ( 'Command `%s` too ambiguous, ' 'could mean any of: %s' ) % (cmd_arg, ', '.join(completions)) else: if len(argv) == 1: print 'No command specified, showing usage.\n' else: print 'Command not recognised: %s\n' % cmd_arg commands.usage(argv[2:]) # generic error code if not returned sooner return 1 | f91beeacf06ee26c7f93ba881ffc708acb9b6bcf /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10355/f91beeacf06ee26c7f93ba881ffc708acb9b6bcf/hm.py |
|
commands.usage(argv[2:]) | run_cmd('usage') | def start_cmd(argv): cmd_arg = '' if len(argv) > 1: cmd_arg = argv[1] # change common help args to help command if cmd_arg in ('--help', '-h', '--usage', '-u', '/?'): cmd_arg = 'usage' completions = complete_command(cmd_arg) if cmd_arg and len(completions) > 0: if len(completions) == 1: # get the only completion (since in this case we have 1) cmd = completions[0] # build up the first part of the map (for illustrative purposes) cmd_map = list() if cmd_arg != cmd: cmd_map.append(cmd_arg) cmd_map.append(cmd) # map an alias to the command, and build up the map if cmd in cmd_alias_dict.keys(): alias = cmd if cmd_arg == cmd: cmd_map.append(alias) cmd = cmd_alias_dict[cmd] cmd_map.append(cmd) # show command map to avoid confusion if len(cmd_map) != 0: print 'Mapping command: %s' % ' -> '.join(cmd_map) # pass args and optarg data to command handler, which figures out # how to handle the arguments optarg_data = cmd_dict[cmd] handler = commands.CommandHandler(argv[2:], optarg_data) # use reflection to get the function pointer cmd_func = getattr(handler, cmd) cmd_func() return 0 else: print ( 'Command `%s` too ambiguous, ' 'could mean any of: %s' ) % (cmd_arg, ', '.join(completions)) else: if len(argv) == 1: print 'No command specified, showing usage.\n' else: print 'Command not recognised: %s\n' % cmd_arg commands.usage(argv[2:]) # generic error code if not returned sooner return 1 | f91beeacf06ee26c7f93ba881ffc708acb9b6bcf /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10355/f91beeacf06ee26c7f93ba881ffc708acb9b6bcf/hm.py |
if self.generator_index: | if self.generator_id: | def get_generator_from_config(self): if self.generator_index: generators = self.get_generators() return generators[self.generator_index] else: config = ConfigParser.RawConfigParser() config.read(self.config_filepath()) return config.get('cmake', 'generator') | 7b11bc6318534730d9215befa843133d0fd6b9e2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10355/7b11bc6318534730d9215befa843133d0fd6b9e2/commands.py |
return generators[self.generator_index] | return generators[self.generator_id] | def get_generator_from_config(self): if self.generator_index: generators = self.get_generators() return generators[self.generator_index] else: config = ConfigParser.RawConfigParser() config.read(self.config_filepath()) return config.get('cmake', 'generator') | 7b11bc6318534730d9215befa843133d0fd6b9e2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10355/7b11bc6318534730d9215befa843133d0fd6b9e2/commands.py |
if self.generator_index: return generators[self.generator_index] | if self.generator_id: return generators[self.generator_id] | def get_generator_from_prompt(self): generators = self.get_generators() # if user has specified a generator as an argument if self.generator_index: return generators[self.generator_index] # if we can accept user input elif not self.no_prompts: generator_options = '' generators_sorted = sorted(generators.iteritems(), key=lambda t: int(t[0])) for id, generator in generators_sorted: generator_options += '\n ' + id + ': ' + generator | 7b11bc6318534730d9215befa843133d0fd6b9e2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10355/7b11bc6318534730d9215befa843133d0fd6b9e2/commands.py |
self.ic.generator_index = a | self.ic.generator_id = a | def __init__(self, argv, opts, args): self.opts = opts self.args = args for o, a in self.opts: if o == '--no-prompts': self.ic.no_prompts = True elif o in ('-g', '--generator'): self.ic.generator_index = a | 7b11bc6318534730d9215befa843133d0fd6b9e2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10355/7b11bc6318534730d9215befa843133d0fd6b9e2/commands.py |
if o == ('-d', '--debug'): | if o in ('-d', '--debug'): | def get_build_mode(self): mode = None for o, a in self.opts: if o == ('-d', '--debug'): mode = 'debug' elif o == ('-r', '--release'): mode = 'release' return mode | 8aa4cbe02caedd5743911de2cb0580bec4230c16 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10355/8aa4cbe02caedd5743911de2cb0580bec4230c16/commands.py |
elif o == ('-r', '--release'): | elif o in ('-r', '--release'): | def get_build_mode(self): mode = None for o, a in self.opts: if o == ('-d', '--debug'): mode = 'debug' elif o == ('-r', '--release'): mode = 'release' return mode | 8aa4cbe02caedd5743911de2cb0580bec4230c16 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10355/8aa4cbe02caedd5743911de2cb0580bec4230c16/commands.py |
self.open_internal(xcodeproj_filepath(), 'open') | self.open_internal(self.xcodeproj_filepath(), 'open') | def open(self): generator = self.get_generator_from_config() if generator.startswith('Visual Studio'): print 'Opening with %s...' % generator self.open_internal(self.sln_filepath()) elif generator.startswith('Xcode'): print 'Opening with %s...' % generator self.open_internal(xcodeproj_filepath(), 'open') else: raise Exception('Not supported with generator: ' + generator) | e8f891858622cf800de358f3521ef791c4d8bdaf /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10355/e8f891858622cf800de358f3521ef791c4d8bdaf/commands.py |
err = os.system(xcodebuild_cmd) | err = os.system(self.xcodebuild_cmd) | def build(self, mode = None): | af9fd1e78436c970f261c3855633caa5ec8e2911 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10355/af9fd1e78436c970f261c3855633caa5ec8e2911/commands.py |
version = '2.8.0' | version = '>= 2.8.0' | def persist_cmake(self): if sys.platform == 'win32': version = '2.8.0' found_cmd = '' for test_cmd in (self.cmake_cmd, r'tool\cmake\bin\%s' % self.cmake_cmd): print 'Testing for CMake version %s by running `%s`...' % (version, test_cmd) p = subprocess.Popen([test_cmd, '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = p.communicate() if p.returncode == 0 and stdout == 'cmake version %s\r\n' % version: # found one that works, hurrah! print 'Found valid CMake version' found_cmd = test_cmd # HACK: gotta go out so just hacking this for now if found_cmd == r'tool\cmake\bin\%s' % self.cmake_cmd: found_cmd = r'..\tool\cmake\bin\%s' % self.cmake_cmd break if not found_cmd: found_cmake = False # if prompting allowed if not self.no_prompts: msg = 'CMake 2.8.0 not installed. Auto download now? [Y/n]' print msg, yn = raw_input() # if response was anyting but no if yn not in ['n', 'N']: if not os.path.exists('tool'): os.mkdir('tool') err = os.system(r'svn checkout https://synergy-plus.googlecode.com/svn/tools/win/cmake tool\cmake') if err != 0: raise Exception('Unable to get cmake from repository with error code code: ' + str(err)) found_cmd = r'..\tool\cmake\bin\%s' % self.cmake_cmd found_cmake = True # if cmake was not found if not found_cmake: raise Exception('Cannot continue without CMake, exiting.') return found_cmd else: return self.cmake_cmd | 49438a67204437f2f3e41dccb68551ea7debbd7f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10355/49438a67204437f2f3e41dccb68551ea7debbd7f/commands.py |
if p.returncode == 0 and stdout == 'cmake version %s\r\n' % version: | m = re.search('cmake version (2\.8\.\d+)', stdout) if p.returncode == 0 and m: | def persist_cmake(self): if sys.platform == 'win32': version = '2.8.0' found_cmd = '' for test_cmd in (self.cmake_cmd, r'tool\cmake\bin\%s' % self.cmake_cmd): print 'Testing for CMake version %s by running `%s`...' % (version, test_cmd) p = subprocess.Popen([test_cmd, '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = p.communicate() if p.returncode == 0 and stdout == 'cmake version %s\r\n' % version: # found one that works, hurrah! print 'Found valid CMake version' found_cmd = test_cmd # HACK: gotta go out so just hacking this for now if found_cmd == r'tool\cmake\bin\%s' % self.cmake_cmd: found_cmd = r'..\tool\cmake\bin\%s' % self.cmake_cmd break if not found_cmd: found_cmake = False # if prompting allowed if not self.no_prompts: msg = 'CMake 2.8.0 not installed. Auto download now? [Y/n]' print msg, yn = raw_input() # if response was anyting but no if yn not in ['n', 'N']: if not os.path.exists('tool'): os.mkdir('tool') err = os.system(r'svn checkout https://synergy-plus.googlecode.com/svn/tools/win/cmake tool\cmake') if err != 0: raise Exception('Unable to get cmake from repository with error code code: ' + str(err)) found_cmd = r'..\tool\cmake\bin\%s' % self.cmake_cmd found_cmake = True # if cmake was not found if not found_cmake: raise Exception('Cannot continue without CMake, exiting.') return found_cmd else: return self.cmake_cmd | 49438a67204437f2f3e41dccb68551ea7debbd7f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10355/49438a67204437f2f3e41dccb68551ea7debbd7f/commands.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.